query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
The `ComputeCluster` data source can be used to discover the ID of a cluster in vSphere. This is useful to fetch the ID of a cluster that you want to use for virtual machine placement via the `VirtualMachine` resource, allowing to specify the cluster's root resource pool directly versus using the alias available through the `ResourcePool` data source. > You may also wish to see the `ComputeCluster` resource for more information about clusters and how to managed the resource in this provider. Example Usage ```python import pulumi import pulumi_vsphere as vsphere datacenter = vsphere.get_datacenter(name="dc01") compute_cluster = vsphere.get_compute_cluster(name="cluster01", datacenter_id=datacenter.id) ```
def get_compute_cluster_output(datacenter_id: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetComputeClusterResult]: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_compute_cluster(datacenter_id: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeClusterResult:\n __args__ = dict()\n __args__['datacenterId'] = datacenter_id\n __args__['name'] = name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('vsphere:index/getComputeCluster:getComputeCluster', __args__, opts=opts, typ=GetComputeClusterResult).value\n\n return AwaitableGetComputeClusterResult(\n datacenter_id=pulumi.get(__ret__, 'datacenter_id'),\n id=pulumi.get(__ret__, 'id'),\n name=pulumi.get(__ret__, 'name'),\n resource_pool_id=pulumi.get(__ret__, 'resource_pool_id'))", "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def get_cluster_id(self):\n cmd = \"svcinfo lscluster -delim :\"\n\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n return None\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_CLUSTER_ID)\n cluster_id = values[index]\n return cluster_id", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def find_cluster(self, id: str) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def cluster_id(self):\n return self._cluster_id", "def find_cluster(self, id):\n raise NotImplementedError", "def cluster_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier\")", "def find_kubernetes_cluster(self, id: str) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def get_coe_cluster(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)", "def lookup_cluster_by_name(cluster_name):\n cluster_root = get_cluster_root()\n if not cluster_root:\n print('Cannot get the root of the linked list of clusters')\n return\n cluster = None\n\n # lookup for the task associated with the id\n if cluster_root['cluster_']['name'].string() == cluster_name:\n cluster = cluster_root['cluster_'].address\n else:\n curr = cluster_root\n while True:\n curr = curr['next'].cast(uClusterDL_ptr_type)\n\n if curr['cluster_']['name'].string() == cluster_name:\n cluster = curr['cluster_'].address\n break\n\n if curr == cluster_root:\n break\n\n if not cluster:\n print(\n (\"Cannot find a cluster with the name: {}.\".format(cluster_name))\n )\n return cluster", "def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)", "def show_vsan_cluster(self, cluster_id):\n url = \"clusters/%s\" % str(cluster_id)\n resp, body = self.get(url)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])", "def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")", "def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")", "def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)", "def get_datacenter(conn):\n datacenter_id = get_datacenter_id()\n\n for item in conn.list_datacenters()[\"items\"]:\n if item[\"id\"] == datacenter_id:\n return item\n\n raise SaltCloudNotFound(\n \"The specified datacenter '{}' could not be found.\".format(datacenter_id)\n )", "def get_cluster(cluster_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:\n __args__ = dict()\n __args__['clusterId'] = cluster_id\n __args__['location'] = location\n __args__['project'] = project\n if opts is None:\n opts = pulumi.InvokeOptions()\n if opts.version is None:\n opts.version = _utilities.get_version()\n __ret__ = pulumi.runtime.invoke('google-native:container/v1:getCluster', __args__, opts=opts, typ=GetClusterResult).value\n\n return AwaitableGetClusterResult(\n addons_config=__ret__.addons_config,\n authenticator_groups_config=__ret__.authenticator_groups_config,\n autopilot=__ret__.autopilot,\n autoscaling=__ret__.autoscaling,\n binary_authorization=__ret__.binary_authorization,\n cluster_ipv4_cidr=__ret__.cluster_ipv4_cidr,\n conditions=__ret__.conditions,\n confidential_nodes=__ret__.confidential_nodes,\n create_time=__ret__.create_time,\n current_master_version=__ret__.current_master_version,\n current_node_version=__ret__.current_node_version,\n database_encryption=__ret__.database_encryption,\n default_max_pods_constraint=__ret__.default_max_pods_constraint,\n description=__ret__.description,\n enable_kubernetes_alpha=__ret__.enable_kubernetes_alpha,\n enable_tpu=__ret__.enable_tpu,\n endpoint=__ret__.endpoint,\n expire_time=__ret__.expire_time,\n initial_cluster_version=__ret__.initial_cluster_version,\n ip_allocation_policy=__ret__.ip_allocation_policy,\n label_fingerprint=__ret__.label_fingerprint,\n legacy_abac=__ret__.legacy_abac,\n location=__ret__.location,\n locations=__ret__.locations,\n logging_config=__ret__.logging_config,\n logging_service=__ret__.logging_service,\n maintenance_policy=__ret__.maintenance_policy,\n master_auth=__ret__.master_auth,\n master_authorized_networks_config=__ret__.master_authorized_networks_config,\n mesh_certificates=__ret__.mesh_certificates,\n monitoring_config=__ret__.monitoring_config,\n monitoring_service=__ret__.monitoring_service,\n name=__ret__.name,\n network=__ret__.network,\n network_config=__ret__.network_config,\n network_policy=__ret__.network_policy,\n node_ipv4_cidr_size=__ret__.node_ipv4_cidr_size,\n node_pools=__ret__.node_pools,\n notification_config=__ret__.notification_config,\n private_cluster_config=__ret__.private_cluster_config,\n release_channel=__ret__.release_channel,\n resource_labels=__ret__.resource_labels,\n resource_usage_export_config=__ret__.resource_usage_export_config,\n self_link=__ret__.self_link,\n services_ipv4_cidr=__ret__.services_ipv4_cidr,\n shielded_nodes=__ret__.shielded_nodes,\n status=__ret__.status,\n subnetwork=__ret__.subnetwork,\n tpu_ipv4_cidr_block=__ret__.tpu_ipv4_cidr_block,\n vertical_pod_autoscaling=__ret__.vertical_pod_autoscaling,\n workload_identity_config=__ret__.workload_identity_config)", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def resource_type(self):\n return 'cluster'", "def pc_cluster(data, clusters):\n dist = MorningstarPCA.pc_distance(data, clusters)\n return MorningstarPCA.get_column_with_min_value(dist)", "def cluster_name(self):\n return self._data['cluster_name']", "def get_cluster(self, label):\n try:\n return self._clusters[label]\n except KeyError:\n return None", "def find_kubernetes_cluster_template(self, id: str) -> dto.KubernetesClusterTemplate:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def find_cluster_sample(self, sample):\n for cluster in self.cluster_lst:\n if sample in cluster.get_samples():\n return cluster.get_c_id()", "def cluster_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_name\")", "def get_coe_cluster_certificate(self, cluster_id):\n return (\n self.container_infrastructure_management.get_cluster_certificate(\n cluster_id\n )\n )", "def cluster_query(cluster_id):\n request_debug(r, logger)\n # cluster_id = request_get(r, \"cluster_id\")\n\n result = cluster_handler.get_by_id(cluster_id)\n logger.info(result)\n if result:\n response_ok['data'] = result\n return jsonify(response_ok), CODE_OK\n else:\n logger.warning(\"cluster not found with id=\" + cluster_id)\n response_fail[\"data\"] = r.form\n response_fail[\"code\"] = CODE_NOT_FOUND\n return jsonify(response_fail), CODE_NOT_FOUND", "def get_datacenter_id():\n datacenter_id = config.get_cloud_config_value(\n \"datacenter_id\", get_configured_provider(), __opts__, search_global=False\n )\n\n conn = get_conn()\n\n try:\n conn.get_datacenter(datacenter_id=datacenter_id)\n except PBNotFoundError:\n log.error(\"Failed to get datacenter: %s\", datacenter_id)\n raise\n\n return datacenter_id", "def get_cluster_output(cluster_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:\n ...", "def get_cluster_name(cls):\n\n mid = Machineid()\n if mid.is_sps_cluster:\n return cls.SPS\n if mid.is_spts_cluster:\n return cls.SPTS\n if mid.is_mdfl_cluster:\n return cls.MDFL\n\n return cls.LOCAL", "def get_cluster_idx(_cluster):\n\n return _cluster.cluster_idx", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def get_cluster(t2_url, t2_token, id):\n response = requests.get(f\"{t2_url}/api/clusters/{id}\", headers={ \"t2-token\": t2_token })\n if(response.status_code != 200):\n log(f\"API call to get cluster returned error code {response.status_code}\")\n return None\n return response.json()", "def get_cluster_by_id(self, c_id: str) -> List[str]:\n return [k for k, v in self._clusters.items() if v == c_id]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_id: Optional[pulumi.Input[str]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n size_gb: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"cluster_id\"] = cluster_id\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"size_gb\"] = size_gb\n __props__.__dict__[\"tags\"] = tags\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def getClusterData(clusterName,data):\n clusters = rhevGet(\"/api/clusters\")\n doc = libxml2.parseDoc(clusters)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/clusters/cluster[name [position()=1]= '\"+ clusterName + \"']\")\n return res[0].prop(data)", "def cluster_identity_get(self, desired_attributes=None):\n return self.request( \"cluster-identity-get\", {\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ ClusterIdentityInfo, 'None' ], False ],\n }, {\n 'attributes': [ ClusterIdentityInfo, False ],\n } )", "def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n public_points: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"auto_renew\"] = auto_renew\n __props__.__dict__[\"auto_renew_period\"] = auto_renew_period\n __props__.__dict__[\"cluster_name\"] = cluster_name\n __props__.__dict__[\"data_center_name\"] = data_center_name\n __props__.__dict__[\"disk_size\"] = disk_size\n __props__.__dict__[\"disk_type\"] = disk_type\n __props__.__dict__[\"enable_public\"] = enable_public\n __props__.__dict__[\"instance_type\"] = instance_type\n __props__.__dict__[\"ip_white\"] = ip_white\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"major_version\"] = major_version\n __props__.__dict__[\"node_count\"] = node_count\n __props__.__dict__[\"password\"] = password\n __props__.__dict__[\"pay_type\"] = pay_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"period_unit\"] = period_unit\n __props__.__dict__[\"public_points\"] = public_points\n __props__.__dict__[\"security_groups\"] = security_groups\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)", "def cluster_name(self):\n return self.name", "async def get(id):\n cluster = clusters.get_by_id(id)\n\n if cluster is None:\n raise HTTPException(status_code=404, detail=\"Cluster not found for ID: {0}\".format(id))\n\n return cluster.export()", "def orig_cluster_id(self):\n if self.old_cluster_name is None:\n raise RuntimeError('old_cluster_name is not set')\n return self.fuel_web.client.get_cluster_id(self.old_cluster_name)", "def get_cluster_config(cohesity_client):\n config = cohesity_client.cluster.get_cluster()\n return config", "def _find_cluster(clusters, label):\n for clst in clusters:\n if clst.label == label: return clst\n return None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ClusterArgs.__new__(ClusterArgs)\n\n __props__.__dict__[\"allocation_state\"] = None\n __props__.__dict__[\"allocation_state_transition_time\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"current_node_count\"] = None\n __props__.__dict__[\"errors\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"node_setup\"] = None\n __props__.__dict__[\"node_state_counts\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"provisioning_state_transition_time\"] = None\n __props__.__dict__[\"scale_settings\"] = None\n __props__.__dict__[\"subnet\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"user_account_settings\"] = None\n __props__.__dict__[\"virtual_machine_configuration\"] = None\n __props__.__dict__[\"vm_priority\"] = None\n __props__.__dict__[\"vm_size\"] = None\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def _get_center(data, node_id, feature_columns):\n if node_id in data.id.values:\n return data[data.id == node_id][feature_columns].values\n else:\n return _get_center(data, node_id[:-1], feature_columns)", "def cluster_type(self) -> str:\n return pulumi.get(self, \"cluster_type\")", "def get_cluster(self, profile):\n if self._value.has_option(profile, 'cluster'):\n if self._value.has_option(profile, 'cluster'):\n cluster = self._value.get(profile, 'cluster')\n self.logger.info(\"Connecting to: %s cluster\" % cluster)\n else:\n self.logger.error(\n \"No cluster parameter found\"\n )\n exit(1)\n else:\n self.logger.error(\n \"No profile found. Please define a default profile, \\\n or specify a named profile using `--profile`\"\n )\n exit(1)\n return cluster", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description", "def gke_cluster(self) -> Optional['outputs.MembershipEndpointGkeCluster']:\n return pulumi.get(self, \"gke_cluster\")", "def get_cluster_template(self, name_or_id, filters=None, detail=False):\n return _utils._get_entity(\n self,\n 'cluster_template',\n name_or_id,\n filters=filters,\n detail=detail,\n )", "def launch_cluster(self):\n version = self.get_latest_spark_version()\n import os\n real_path = os.path.dirname(os.path.realpath(__file__))\n if self.is_aws():\n with open(real_path+'/../data/aws_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n else:\n with open(real_path+'/../data/azure_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n # set the latest spark release regardless of defined cluster json\n cluster_json['spark_version'] = version['key']\n c_info = self.post('/clusters/create', cluster_json)\n self.wait_for_cluster(c_info['cluster_id'])\n return c_info['cluster_id']", "def ecs_getClusterArn(region, cluster):\n client = boto3.client('ecs', region_name=region)\n response = client.describe_clusters(clusters=[cluster])\n\n logging.debug(\"ECS Cluster Details: %s\", response)\n if len(response['clusters']) == 1:\n return (response['clusters'][0]['clusterArn'])\n else:\n return ''", "def cluster_solution_id(dataset_name, cluster_solution_name):\n dataset_id = get_dataset(name=dataset_name).id\n cs_id = db.session.query(ClusterSolution)\\\n .filter(\n and_(\n ClusterSolution.dataset_id == dataset_id,\n ClusterSolution.name == cluster_solution_name\n )\n )[0].id\n return cs_id", "def cluster_info(self, target_nodes: Optional[\"TargetNodesT\"] = None) -> ResponseT:\n return self.execute_command(\"CLUSTER INFO\", target_nodes=target_nodes)", "def DescribeCluster(self, ResourceId):\n\n Client = boto3.client('emr')\n \n response = Client.describe_cluster (\n ClusterId = ResourceId\n\t)\n\n return response", "def search_cluster_by_node(self, target):\n for i in range(len(self.result)):\n cluster = self.result[i]\n for node in cluster.get_nodes():\n if target == node:\n return i\n return None", "def getClusterVmNextId(self):\n data = self.connect('get','cluster/nextid',None)\n return data", "def create_coe_cluster(\n self,\n name,\n cluster_template_id,\n **kwargs,\n ):\n cluster = self.container_infrastructure_management.create_cluster(\n name=name,\n cluster_template_id=cluster_template_id,\n **kwargs,\n )\n\n self.list_coe_clusters.invalidate(self)\n return cluster", "def get_datacenter_id(options):\n datacenter = get_datacenter(options)\n return datacenter._GetMoId()", "def get_ceph_clusters_by_pcc(conn: dict, id: str) -> dict:\n return get(conn, f\"{S3PCCS}/{id}/storage/clusters\")", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def get_cluster_pool(cluster_pool_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterPoolResult:\n __args__ = dict()\n __args__['clusterPoolName'] = cluster_pool_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:hdinsight/v20230601preview:getClusterPool', __args__, opts=opts, typ=GetClusterPoolResult).value\n\n return AwaitableGetClusterPoolResult(\n aks_cluster_profile=pulumi.get(__ret__, 'aks_cluster_profile'),\n aks_managed_resource_group_name=pulumi.get(__ret__, 'aks_managed_resource_group_name'),\n cluster_pool_profile=pulumi.get(__ret__, 'cluster_pool_profile'),\n compute_profile=pulumi.get(__ret__, 'compute_profile'),\n deployment_id=pulumi.get(__ret__, 'deployment_id'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n log_analytics_profile=pulumi.get(__ret__, 'log_analytics_profile'),\n managed_resource_group_name=pulumi.get(__ret__, 'managed_resource_group_name'),\n name=pulumi.get(__ret__, 'name'),\n network_profile=pulumi.get(__ret__, 'network_profile'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n status=pulumi.get(__ret__, 'status'),\n system_data=pulumi.get(__ret__, 'system_data'),\n tags=pulumi.get(__ret__, 'tags'),\n type=pulumi.get(__ret__, 'type'))", "def data_center_name(self) -> str:\n return pulumi.get(self, \"data_center_name\")", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def get_cluster_properties(redshift_client):\n cluster_properties = redshift_client.describe_clusters(\n ClusterIdentifier=IDENTIFIER\n )['Clusters'][0]\n return cluster_properties", "def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }", "def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None", "def cluster_kmeans(self, data, n_clusters):\n km = cl.KMeans(n_clusters)\n kmf = km.fit(data)\n\n labels = kmf.labels_\n\n return labels, [np.nan]", "def __str__(self):\n return \"Cluster\"", "def get_datacenter(options):\n content = get_vc_content(options)\n rootFolder = content.rootFolder\n for item in rootFolder.childEntity:\n if (options.datacenter == item.name):\n return item\n return None", "def list_clusters(self, **kwargs):\n return self._get_names('SCVMHostCluster')" ]
[ "0.72311264", "0.70518404", "0.67735595", "0.6768537", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.66624004", "0.66624004", "0.66624004", "0.66624004", "0.6613303", "0.660601", "0.660601", "0.6474703", "0.6474703", "0.6474703", "0.63924193", "0.63861185", "0.63270766", "0.6276351", "0.6269334", "0.62308496", "0.6221327", "0.62061673", "0.61876196", "0.6160735", "0.61367285", "0.61367285", "0.6121547", "0.6103939", "0.606107", "0.6054131", "0.6054131", "0.6031278", "0.6031278", "0.60063285", "0.5993482", "0.5970224", "0.5945154", "0.5939524", "0.593516", "0.5921245", "0.58835465", "0.5882657", "0.5878035", "0.58768255", "0.5869813", "0.5865723", "0.5855551", "0.5855551", "0.58481365", "0.58481365", "0.58481365", "0.58481365", "0.58481365", "0.5829678", "0.58203787", "0.5819459", "0.57718134", "0.5758876", "0.57381326", "0.5734544", "0.57087946", "0.56975925", "0.56702477", "0.5658413", "0.56578165", "0.56324726", "0.5627083", "0.56178856", "0.5616781", "0.56036544", "0.5601759", "0.5601101", "0.5597886", "0.5590274", "0.55648077", "0.5554557", "0.5547291", "0.5545548", "0.55238587", "0.5521631", "0.55111134", "0.5500737", "0.5494587", "0.54801834", "0.5477136", "0.54766876", "0.546071", "0.5446311", "0.5440939", "0.5437183", "0.54263484", "0.54172444", "0.54061675", "0.5401415", "0.5395855" ]
0.70052683
2
Despliega el formulario de creacion de usuario y empleado para una sucursal.
def get(self,request,*args,**kwargs): sucursal = Sucursal.objects.get(id=kwargs['spk']) user_form = UserForm() empleado_form = EmpleadoForm( initial={'sucursal':sucursal.id} ) forms = [user_form,empleado_form] context = { 'section_title':'Nuevo Empleado', 'button_text':'Crear', 'sucursal':sucursal, 'user_form':user_form, 'empleado_form':empleado_form } return render_to_response( 'empleado/empleado_form.html', context, context_instance=RequestContext(request))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_user(request):\n user = User.objects.get(username=request.user.username)\n #Validacion de permisos----------------------------------------------\n roles = UsuarioRolSistema.objects.filter(usuario = user).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos = []\n for i in permisos_obj:\n permisos.append(i.nombre)\n #--------------------------------------------------------------------\n #Agrega los datos del nuevo usuario\n if request.method == 'POST':\n form = UsuariosForm(request.POST)\n if form.is_valid():\n nuevo=User()\n\n nuevo.username = form.cleaned_data['username']\n nuevo.first_name = form.cleaned_data['first_name']\n nuevo.last_name = form.cleaned_data['last_name']\n nuevo.email = form.cleaned_data['email']\n nuevo.set_password(form.cleaned_data['password'])\n nuevo.is_staff = True\n nuevo.is_active = True\n\n nuevo.is_superuser = True\n nuevo.last_login = datetime.now()\n nuevo.date_joined = datetime.now()\n nuevo.save()\n return HttpResponseRedirect(\"/usuarios\")\n else:\n form = UsuariosForm()\n return render_to_response('admin/usuarios/crear_usuario.html',{'form':form,\n 'user':user,\n 'crear_usuario': 'Crear usuario' in permisos}, context_instance=RequestContext(request))", "def users_create():", "def AggComite(request,pk):#esta enlazado con la clase FaseForm del archivo getion/forms\n ##ESte es lo que yo(presi) le agregue\n try:\n proyectos = Proyecto.objects.get(id_proyecto=pk)\n except Proyecto.DoesNotExist:\n return HttpResponse(\"Proyecto no existe\",status=400)\n\n try:\n proyecto = User_Proyecto.objects.filter(proyecto_id=pk)\n except:\n return HttpResponse(\"Proyecto no existe\",status=400)\n\n gerente = User.objects.get(id=proyecto[0].user_id)\n print(gerente.username)\n\n proyecto_validar=Proyecto.objects.get(id_proyecto=pk)\n\n if validar_permiso(request.user, \"is_gerente\",proyecto_validar)==False: # primero se valida si es gerente en el proyecto actual)\n messages.error(request, 'No eres gerente de proyecto, por lo tanto no puedes crear el comite de cambio')\n return redirect('gestion:comite', pk)\n\n proyectos=Proyecto.objects.get(id_proyecto=pk)\n comite= Comite.objects.all()\n form = Usuario.objects.all()\n registrados = User_Proyecto.objects.all()\n\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n some_var=request.POST.getlist('checkbox')\n\n if ((len(some_var)+1)%2==0 or (len(some_var)+1)==1):# SE VALIDA QUE DEBE DE SER IMPAR Y MAYOR A 1\n messages.error(request,'EL NUMERO DE USUARIOS EN EL COMITE DEBE DE SER IMPAR Y MAYOR A UNO')\n return redirect('gestion:AggComite',pk)\n p=Comite(id_proyecto=pk,id_user=gerente.id)\n p.save()\n for id in some_var:###### SE GUARDAN EN USER_PROYECTOS LAS RELACIONES\n id_user =id\n usuario=User.objects.get(id=id_user)\n registrarAuditoriaProyecto(request.user,\"Añadio al comite de cambio al usuario: \"+str(usuario.username),proyectos.id_proyecto,proyectos.nombre,\"\")\n p=Comite(id_proyecto=pk,id_user=id_user)\n p.save()\n\n return redirect('gestion:comite',pk)\n else:\n list=[]\n for i in range(form.count()):\n ok = False\n if form[i].esta_aprobado == True:\n for x in range(registrados.count()):\n if registrados[x].proyecto_id == pk and registrados[x].user_id == form[i].user.id and registrados[x].activo == True:# esta en el proyecto?\n ok=True\n for z in range(comite.count()):#si ya esta en el comite no\n if form[i].user.id == comite[z].id_user and pk==comite[z].id_proyecto:\n ok=False\n if ok:\n list.append(form[i].user.id)\n\n return render(request, 'proyectos/agg_comite.html', {'form': form,'list':list,'proyectos':proyectos,'idGerente':gerente.id})", "def cria_cadastro(request):\n if request.method == 'POST':\n nome = request.POST['nome'].strip()\n email = request.POST['email'].strip()\n senha = request.POST['password'].strip()\n senha2 = request.POST['password2'].strip()\n if not nome:\n messages.error(request,'O campo nome não pode ficar em branco')\n return redirect('cadastro')\n if not email:\n messages.error(request,'O campo email não pode ficar em branco')\n return redirect('cadastro')\n if senha != senha2:\n messages.error(request, 'As senhas não são iguais')\n return redirect('cadastro')\n if User.objects.filter(email=email).exists():\n messages.error(request,'Usuário já cadastrado')\n return redirect('cadastro')\n if User.objects.filter(username=nome).exists():\n messages.error(request,'Usuário já cadastrado')\n return redirect('cadastro')\n if email == 'daniel_herbert_barbosa@hotmail.com' or email == 'diogom382@gmail.com' or email == 'admindispenser@gmail.com':\n user = User.objects.create_user(username=nome, email=email, password=senha, is_superuser=True)\n user.save()\n messages.error(request, 'Usuário cadastrado com sucesso!')\n return redirect('index')\n else:\n user = User.objects.create_user(username=nome, email=email, password=senha)\n user.save()\n messages.error(request, 'Usuário cadastrado com sucesso!')\n return redirect('index')", "def cadastro(request):\n\n if request.user.is_authenticated:\n return redirect('dashboard')\n \n else:\n if request.method == 'POST':\n nome = request.POST['nome']\n email = request.POST['email']\n senha = request.POST['password']\n senha2 = request.POST['password2']\n\n if not nome.strip():\n messages.error(request, 'O campo nome não pode ficar em branco')\n return redirect('cadastro')\n\n if not email.strip():\n messages.error(request, 'O campo email não pode ficar em branco')\n return redirect('cadastro')\n\n if senhas_nao_iguais(senha, senha2):\n messages.error(request, 'As senhas não são iguais')\n return redirect('cadastro')\n\n if User.objects.filter(email = email).exists():\n messages.error(request, 'Email já cadastrado')\n return redirect('cadastro')\n\n if User.objects.filter(username = nome).exists():\n messages.error(request, 'Usuário já cadastrado')\n return redirect('cadastro')\n \n user = User.objects.create_user(username = nome, email = email, password = senha)\n user.save()\n \n messages.success(request, 'Cadastro realizado com sucesso')\n return redirect('login')\n\n else:\n return render(request, 'usuarios/cadastro.html')", "def CreadorComentario(hora, fecha, contenido, act, usuario): \n nuevoComentario = Comentario(horacomentario=hora, fechacomentario=fecha, contenido=contenido, idactcomentario=act,loginusuario=usuario)\n nuevoComentario.save()\n Accion.objects.crearAccion(\n usuario,\n \"El usuario %s hizo un comentario en la actividad %s\" % (usuario.username, act.nombreact),\n 'i')\n\n Accion.objects.crearAccion(\n usuario,\n \"Se creo una instancia de Comentario con los valores Fecha: %s, Contenido: %s\" % (fecha, contenido),\n 'd'\n )", "def input_usuario(self):\n print(\"Elegir Nivel de operacion\\n\")\n while True:\n input_complejo = self.elegir_nivel_operacion()\n \n if input_complejo == '1':\n self.elegir_operacion_basica()\n elif input_complejo == '2':\n self.elegir_operacion_avanzada()\n elif input_complejo == '3':\n self.entrada_operacion_fasores()\n elif input_complejo == 'x' or input_complejo == 'X':\n print('Programa finalizado')\n sys.exit()\n else:\n print('Elegir el Nivel de operacion correcta\\n')", "def nuevopermiso(): \n if not current_user.is_authenticated():\n flash('Debe loguearse primeramente!!!!', 'loggin')\n return render_template('index.html')\n \n permission = UserRol('ADMINISTRADOR')\n if permission.can():\n form = PermisoFormulario(request.form)\n form.id_recurso.choices= [(r.id, r.nombre) for r in db_session.query(Recurso).order_by(Recurso.nombre)]\n if request.method == 'POST' and form.validate():\n try:\n permiso = Permiso(form.codigo.data, form.descripcion.data, form.id_recurso.data)\n db_session.add(permiso)\n db_session.commit()\n flash('El permiso ha sido registrado con exito','info')\n return redirect('/permiso/administrarpermiso')\n except DatabaseError, e:\n if e.args[0].find('duplicate key value violates unique') != -1:\n flash('Clave unica violada por favor ingrese otra combinacion de permiso con recurso unica' , 'error')\n else:\n flash('Error en la Base de Datos' + e.args[0], 'error')\n return render_template('permiso/nuevopermiso.html', form=form)\n return render_template('permiso/nuevopermiso.html', form=form)\n else:\n flash('Sin permisos para agregar permisos', 'permiso')\n return render_template('index.html')", "def crear ():\n #secuencia\n secuencia = input('\\nNúmero de secuencia: ')\n valido_entero = validacion_entero(secuencia)\n while valido_entero == False:\n print('Num de secuencia no válido. Tiene que ser un número entero: ')\n secuencia = input('\\nNúmero de secuencia: ')\n valido_entero = validacion_entero(secuencia)\n #sexo\n sexo = str(input('Sexo (Hombre/Mujer): '))\n while sexo != 'Hombre' and sexo != \"Mujer\":\n print('Sexo no válido. Introduzca de nuevo el sexo:')\n sexo = str(input('\\nSexo (Hombre/Mujer): '))\n #edad\n edad = input('Edad: ')\n valido_entero = validacion_entero(edad)\n while valido_entero == False:\n print('Edad no válida. Tiene que ser un número entero: ')\n edad = input('\\nEdad: ')\n valido_entero = validacion_entero(edad)\n #persona \n persona = Informacion(secuencia, sexo, edad)\n return persona", "def crear_rol(request):\n user = User.objects.get(username=request.user.username)\n #Validacion de permisos---------------------------------------------\n roles = UsuarioRolSistema.objects.filter(usuario = user).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos = []\n for i in permisos_obj:\n permisos.append(i.nombre)\n print permisos\n #-------------------------------------------------------------------\n if request.method == 'POST':\n form = RolesForm(request.POST)\n if form.is_valid():\n r = Rol()\n r.nombre = form.cleaned_data['nombre']\n r.descripcion = form.cleaned_data['descripcion']\n r.fecHor_creacion = datetime.now()\n r.usuario_creador = user\n r.categoria = form.cleaned_data['categoria']\n r.save()\n if r.categoria == \"1\":\n return HttpResponseRedirect(\"/roles/sist\")\n return HttpResponseRedirect(\"/roles/proy\")\n else:\n form = RolesForm()\n return render_to_response('admin/roles/crear_rol.html',{'form':form,\n 'user':user,\n 'crear_rol': 'Crear rol' in permisos},context_instance=RequestContext(request))", "def atualizar_usuario():\n try:\n if current_user.is_administrator():\n cod_id = request.args.get('id')\n usuario = Usuario.query.filter_by(cod_usuario = cod_id).one()\n grupos = Grupo.query.all()\n if request.method == 'POST':\n usuario.usuario = request.form['usuario']\n usuario.email = request.form['email']\n usuario.senha = request.form['senha']\n usuario.cod_grupo = request.form['tipo_usuario']\n db.session.commit()\n return redirect(url_for('admin.listar_usuarios'))\n return render_template('admin/atualizar_usuario.html', usuario=usuario, grupos=grupos)\n return redirect(url_for('main.index'))\n except Exception as e:\n abort(500, e)", "def crear_userstory(request, id_proyecto):\n band=False\n context = RequestContext(request)\n\n\n rol_en_proyecto=Equipo.objects.get(usuario_id=request.user.pk, proyecto_id=id_proyecto)\n rol = Group.objects.get(id=rol_en_proyecto.rol.pk)\n user_permissions_groups = list(rol.permissions.all())\n\n for p in user_permissions_groups:\n if (p.codename == 'add_userstory'):\n band = True\n\n if (band == True):\n\n #valor booleano para llamar al template cuando el registro fue correcto\n registered = False\n\n if request.method == 'POST':\n userstory_form = UserstoryForm(data=request.POST, id_proyecto=id_proyecto)\n\n # If the two forms are valid...\n if userstory_form.is_valid():\n\n # Guarda el Usuarios en la bd\n #us = userstory_form\n userstory_form.clean()\n nombre = userstory_form.cleaned_data['nombre']\n descripcion =userstory_form.cleaned_data['descripcion']\n tiempoestimado =userstory_form.cleaned_data['tiempoestimado']\n usuarioasignado= userstory_form.cleaned_data['usuarioasignado']\n prioridad =userstory_form.cleaned_data['prioridad']\n porcentajerealizado= userstory_form.cleaned_data['porcentajerealizado']\n sprint=userstory_form.cleaned_data['sprint']\n\n us = Userstory()\n\n if usuarioasignado and sprint:\n us.estado = 'InPlanning'\n\n if userstory_form.cleaned_data['prioridad'] == 'Alta':\n cambioDePrioridades(usuarioasignado, sprint)\n\n us.nombre = nombre\n us.descripcion =descripcion\n us.tiempoestimado =tiempoestimado\n us.usuarioasignado =usuarioasignado\n us.prioridad=prioridad\n us.porcentajerealizado=porcentajerealizado\n us.sprint = sprint\n\n us.save()\n #Actualiza la variable para llamar al template cuando el registro fue correcto\n registered = True\n\n # Invalid form or forms - mistakes or something else?\n # Print problems to the terminal.\n # They'll also be shown to the user.\n else:\n print userstory_form.errors\n\n pass\n\n # Not a HTTP POST, so we render our form using two ModelForm instances.\n # These forms will be blank, ready for user input.\n else:\n userstory_form = UserstoryForm(id_proyecto=id_proyecto)\n\n\n # Render the template depending on the context.\n return render_to_response('./Userstories/crearUserstory.html', {'user_form': userstory_form, 'registered': registered, 'id_proyecto': id_proyecto}, context)\n else:\n raise Http404(\"No cuenta con los permisos necesarios\")", "def nuevo_usuario(request):\n usuario = request.user\n if not usuario.is_superuser:\n return HttpResponseRedirect('/gestion')\n if request.method == 'POST':\n formulario = UserCreateForm(request.POST)\n if formulario.is_valid:\n try:\n formulario.save()\n return HttpResponseRedirect('/')\n except:\n error = 'Error al procesar la entidad'\n return render_to_response('crear.html', {'formulario': formulario, 'errors': error, 'usuario': usuario},\n context_instance=RequestContext(request))\n else:\n formulario = UserCreateForm()\n return render_to_response('crear.html', {'formulario': formulario, 'usuario': usuario},\n context_instance=RequestContext(request))", "def form_valid(self, form):\n usuario = form.save(commit=False)\n usuario.usuario = User.objects.get(username=self.request.user)\n usuario.save()\n return HttpResponseRedirect(self.get_success_url())", "def UsersProyecto(request,pk):#esta enlazado con la clase FaseForm del archivo getion/forms\n proyecto=Proyecto.objects.get(id_proyecto=pk)\n\n user= request.user## USER ACTUAL\n form = User.objects.all()\n registrados = User_Proyecto.objects.all()\n\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n #if form.is_valid():\n some_var=request.POST.getlist('checkbox')\n print(some_var)\n #form.save()\n return redirect('gestion:menu')\n else:\n list=[]\n for i in range(form.count()):\n ok = False\n if form[i].id != user.id: #and form[i].esta_aprobado == True :\n for x in range(registrados.count()):\n if registrados[x].proyecto_id == pk:\n if form[i].id == registrados[x].user_id:\n ok=True\n if ok:\n list.append(form[i].id)\n\n return render(request, 'proyectos/usuarios_proyectos.html', {'form': form,'list':list,'pk':pk,'proyectos':proyecto})", "def asignar_roles_sistema(request, usuario_id):\n user = User.objects.get(username=request.user.username)\n permisos = get_permisos_sistema(user)\n usuario = get_object_or_404(User, id=usuario_id)\n lista_roles = UsuarioRolSistema.objects.filter(usuario = usuario)\n lista_permisos = RolPermiso.objects.filter()\n lista_rolusuario = RolUsuario.objects.filter(usuario=usuario)\n print lista_permisos\n tam=len(lista_permisos)\n relac=ProductOwner()\n print tam\n if request.method == 'POST':\n form = AsignarRolesForm(1, request.POST)\n if form.is_valid():\n lista_nueva = form.cleaned_data['roles']\n for i in lista_roles:\n i.delete()\n lista_rolusuario.delete()\n\n for i in lista_nueva:\n nuevo = UsuarioRolSistema()\n rel = RolUsuario()\n nuevo.usuario = usuario\n nuevo.rol = i\n nuevo.save()\n if i.id == 2:\n rel.usuario = usuario\n rel.save()\n if i.id == 5:\n relac.usuario = usuario\n relac.save()\n return HttpResponseRedirect(\"/usuarios\")\n else:\n if usuario.id == 1:\n error = \"No se puede editar roles sobre el superusuario.\"\n return render_to_response(\"admin/usuarios/asignar_roles.html\", {'mensaje': error,\n 'usuario':usuario,\n 'user': user,\n 'asignar_roles': 'Asignar rol' in permisos},context_instance=RequestContext(request))\n dict = {}\n for i in lista_roles:\n print i.rol\n dict[i.rol.id] = False\n form = AsignarRolesForm(1,initial = {'roles': dict})\n return render_to_response(\"admin/usuarios/asignar_roles.html\", {'form':form, 'usuario':usuario, 'user':user, 'asignar_roles': 'Asignar rol' in permisos},context_instance=RequestContext(request))", "def create_user_questionnaire_in_progress(self):\n username = 'pseudo'\n email = 'martine@tests.com'\n password = '00000000'\n user_created = self.user.objects.create_user(id=2, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n list_advice_id = [1, 5, 10]\n self.add_advice_to_user_created(user_created, list_advice_id)\n\n return user_created", "def crearFase(request,nroFase):\n\n fase = FaseForm(request.POST)\n global CANTIDAD\n cantidad = CANTIDAD\n cantidad_fases = 4\n\n if fase.is_valid():\n x = Proyecto.objects.last()\n nombreFase = fase.cleaned_data.get(\"nombre\")\n descFase = fase.cleaned_data.get(\"descripcion\")\n z = Fase(nombre=nombreFase,descripcion=descFase,id_Proyecto=x)\n z.save()\n registrarAuditoria(request.user, 'Creo la Fase: '+str(z.nombre)+' en el proyecto: '+ str(x.nombre))\n if nroFase != 0:\n cantidad = cantidad - 1\n CANTIDAD = cantidad\n nroFase=nroFase-1\n return redirect('gestion:crearFase', nroFase)\n else:\n assign_perm('is_gerente', request.user, x)\n add_permission_gerente(request.user,False)\n return redirect('gestion:menu')\n\n #Se crea la variable cantidad fases para pintarla en el template en tiempo real\n cantidad_fases = cantidad_fases - nroFase\n cantidad_fases = cantidad_fases + 1\n\n context = {\n 'form': fase,\n 'cantidad_fases': cantidad_fases,\n }\n return render(request, 'proyectos/crear_fase.html', context)", "def create_user(self):\r\n room_num = self.create_room() # assign room_num to self.creat_room() that return room number\r\n self.room_num.config(text=room_num) # change room_num Label to ⬆\r\n # change room_text Label to `RoomNumber :`\r\n self.room_text.config(text='RoomNumber :')\r\n with open('users.txt', 'at') as f:\r\n # write the content in a file\r\n f.write(f'{self.firstname_value.get()} {self.lastname_value.get()} {self.id_number_value.get()} {self.country_value.get()} {room_num}\\n')\r\n self.clear() # clear all Entry\r", "def post(self):\r\n piso=self.request.get('piso')\r\n numext=self.request.get('numext')\r\n numint=self.request.get('numint')\r\n piso=self.validonumero(piso)\r\n numext=self.validonumero(numext)\r\n numint=self.validonumero(numint)\r\n \r\n empresa=empresas()\r\n empresa.nombre=self.request.get('desc')\r\n empresa.calle=self.request.get('calle')\r\n empresa.numeroExterior=int(numext)\r\n empresa.numeroInterior=int(numint)\r\n empresa.colonia=self.request.get('colonia')\r\n empresa.piso=int(piso)\r\n empresa.andador=self.request.get('andador')\r\n empresa.codigo_postal=int(self.request.get('cp'))\r\n empresa.sitioweb=self.request.get('web')\r\n empresa.correo=self.request.get('mail')\r\n empresa.nombreContacto=\"\"\r\n empresa.paternoContacto=\"\"\r\n empresa.maternoContacto=\"\"\r\n #### \r\n ciudad=self.request.get('ciudad')\r\n query=\"where ciudad='%s'\"%ciudad\r\n cd=ciudades.gql(query)\r\n city=cd.fetch(1)\r\n for lstcd in city:\r\n empresa.id_Ciudad=lstcd.key().id()\r\n empresa.put()\r\n jsondic={}\r\n jsondata=[]\r\n jsondata+=[self.addKey(jsondic,\"Dato\", empresa.key().id())]\r\n self.response.out.write(simplejson.dumps(jsondata))\r\n return False", "def createuser (d={'user','AAuser'}):\n print ('*creating:',d)\n x=\"//*[@value='Add User...']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))).send_keys(Keys.RETURN)\n x=\"//*[@name='name']\"; e = g.wait.until(EC.element_to_be_clickable((By.XPATH, x))); e.clear(); e.send_keys(d.get('user'))\n x=\"//*[@name='password']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys('optimize')\n x=\"//*[@name='password-confirm']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys('optimize')\n x=\"//*[@name='firstName']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys('firstName')\n x=\"//*[@name='lastName']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys(d.get('ln','lastName'))\n x=\"//*[@name='email']\"; e = g.driver.find_element(By.XPATH, x); e.clear(); e.send_keys(d.get('email',d.get('user')+'@softwareag.com'))\n x=\"//*[@name='submitbutton']\"; g.driver.find_element(By.XPATH, x).send_keys(Keys.RETURN)\n x=\"//*[@name='name' and @type='hidden' and @value='\"+d.get('user')+\"']\"; g.wait.until(EC.presence_of_element_located((By.XPATH, x)))\n x=\"//*[@name='cancelbutton']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))).send_keys(Keys.RETURN)\n x=\"//*[@value='Add User...']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))) #fuzzy check, when (many) users paged off\n #x=\"//*/a[text() = '\"+d.get('user')+\"']\"; g.wait.until(EC.element_to_be_clickable((By.XPATH, x))) #strict check, all users on (same) page", "def nuevo_grupo(request):\n usuario = request.user\n if request.method == 'POST':\n formulario = RolForm(request.POST)\n if formulario.is_valid:\n try:\n proyecto = Proyecto.objects.get(scrum_master=usuario)\n\n formulario.proyecto = proyecto\n rol = formulario.save()\n rol = get_object_or_404(Rol, pk=rol.id)\n rol.proyecto = proyecto\n rol.save()\n return HttpResponseRedirect('/../grupos')\n except:\n error = 'Error al procesar la entidad'\n return render_to_response('crear_grupo.html',{'formulario':formulario,'errors':error,'usuario':usuario}, context_instance=RequestContext(request))\n else:\n formulario = RolForm()\n return render_to_response('crear_grupo.html', {'formulario': formulario,'usuario':usuario}, context_instance=RequestContext(request))", "def cadastrar_usuario(self, dados={}):\n\n if self.busca_usuario(email=dados['email']):\n # retorna false pois ja existe usuario cadastrado com\n # tal email e assim o novo cadastrado nao foi efetuado\n # com sucesso\n return {'status': False,\n 'msg': 'usuario ja existe',\n 'dados': dados}\n else:\n self.user = self.uPersistencia.criaUsuario(\n id_tipo_usuario=dados['id_tipo_usuario'],\n nome=dados['nome'],\n email=dados['email'],\n latitude_atual=dados['latitude'],\n longitude_atual=dados['longitude'],\n cep_atual=('cep' in dados.keys() and\n dados['cep'] or\n None),\n ddd_telefone=('ddd_telefone' in dados.keys() and\n dados['ddd_telefone'] or None),\n telefone=('telefone' in dados.keys() and\n dados['telefone'] or None),\n ddd_celular=('ddd_celular' in dados.keys() and\n dados['ddd_celular'] or None),\n celular=('celular' in dados.keys() and\n dados['celular'] or None))\n\n # persiste as informacoes no bd\n res = self.user.inserir()\n\n return {'status': res,\n 'msg': (res and 'usuario cadastrado com sucesso' or\n 'usuario nao cadastrado'),\n 'dados': self.user.getToString()}", "def principal(request):\n user = User.objects.get(username=request.user.username)\n #Validacion de permisos---------------------------------------------\n roles = UsuarioRolSistema.objects.filter(usuario = user).only('rol')\n print roles\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos_sistema = []\n for i in permisos_obj:\n permisos_sistema.append(i.nombre)\n variables ={}\n for i in permisos_sistema:\n if i == 'Ver roles' or i == 'Crear rol' or i == 'Modificar rol' or i == 'Eliminar rol' or i == 'Asignar rol':\n variables['roles'] = True\n\n if i == 'Ver usuarios' or i == 'Crear usuario' or i == 'Modificar usuario' or i == 'Eliminar usuario':\n variables['usuarios'] = True\n\tif i == 'Ver proyectos' or i == 'Crear proyecto' or i == 'Modificar proyecto' or i == 'Eliminar proyecto':\n \t variables['proyectos'] = True\n\n variables['user'] = user\n print variables\n rolesp = UsuarioRolProyecto.objects.filter(usuario = user).only('rol')\n lista_proyectos = []\n for i in rolesp:\n print \"kkk\"\n if not i.proyecto.id in lista_proyectos:\n print \"tt\"\n lista_proyectos.append(i.proyecto.id)\n #variables['acciones']=True\n print lista_proyectos\n variables['permisos_proyecto'] = lista_proyectos\n #-------------------------------------------------------------------\n lista = Proyecto.objects.all()\n variables['lista'] = lista\n return render_to_response('main_page.html', variables, context_instance=RequestContext(request))", "def save_inscription():\n user = None\n f = InscriptionForm()\n if f.validate_on_submit() and f.uniq_Username() and f.passwd_confirmed():\n from hashlib import sha256\n m = sha256()\n m.update(f.get_mdp().encode())\n user = Utilisateur(\n idU = f.get_id(),\n mdpU = m.hexdigest(),\n nomU = f.get_name(),\n prenomU = f.get_surname())\n db.session.add(user)\n i = Actions(\n contenu = \"Bienvenue à \"+f.get_id(),\n liste = 1\n )\n db.session.add(i)\n db.session.commit()\n return redirect(url_for('login'))\n return render_template(\n\t\t\"inscription.html\",\n\t\tform = f,\n title = \"Inscription\")", "def form_valid(self, form):\n form.instance.founder = self.request.user\n print('Project Create user:', self.request.user)\n form.save()\n\n tc_lib.generate_user_matches(form)\n\n return super(ProjectCreate, self).form_valid(form)", "def create_account_form(request, post):\n username = post.get(\"username\")\n first_name = post.get(\"first_name\")\n last_name = post.get(\"last_name\")\n email = post.get(\"email\")\n\n phone_number = post.get(\"phone\")\n\n password = post.get(\"password\")\n\n height = float(post.get(\"height\"))\n weight = float(post.get(\"weight\"))\n sex = post.get(\"sex\")\n\n current_medications = post.get(\"medications\")\n allergies = post.get(\"allergies\")\n medical_conditions = post.get(\"medical_conditions\")\n family_history = post.get(\"family_history\")\n additional_info = post.get(\"additional_info\")\n primary_hospital = Hospital.objects.get(pk=post.get(\"primary_hospital\"))\n\n policy_number = int(post.get(\"policy_number\"))\n company = post.get(\"company\")\n\n if User.objects.filter(username=username).exists():\n messages.add_message(request, messages.ERROR, 'User already exists!')\n return False\n\n else:\n new_user = User.objects.create_user(\n username=username, password=password,\n first_name=first_name, last_name=last_name, email=email\n )\n\n new_user_profile = UserProfile.objects.create(\n user=new_user,\n phone_number=phone_number, status=UserStatus.objects.get(pk=3),\n primary_hospital=primary_hospital\n )\n\n medical_info = MedicalInformation.objects.create(\n height=height, weight=weight, sex=sex,\n medical_conditions=medical_conditions,\n allergies=allergies, medications=current_medications,\n family_history=family_history, additional_info=additional_info,\n user=new_user_profile\n )\n\n insurance = Insurance.objects.create(\n policy_number=policy_number, company=company, medical_information=medical_info,\n )\n\n return True", "def crud_user(request):\n if request.POST['action'] == 'EDIT':\n username = request.POST['username']\n tel = request.POST['tel']\n mobile = request.POST['mobile']\n office = request.POST['office']\n num = request.POST['num']\n user = IMPUser.objects.get(username = username)\n user.tel = tel\n user.mobile = mobile\n user.office = office\n user.num = num\n user.save()\n return redirect(urlresolvers.reverse(\"address_page\"))\n #print(request.POST['tel'])\n #username = request.POST['username']\n #password = request.POST['password']\n #password1 = request.POST['password1']\n #tel = request.POST['tel']\n #try:\n # #if password.equals(password1)\n # User.objects.all().get(username = username) \n # return render(request, \"account/sign_up.html\", context={\"errmsg\":\"Duplicated User!\"})\n #except:\n # print(\"NO USER!\")\n # IMPUser.objects.create_user(username = username, password = password, tel=tel)\n return redirect(urlresolvers.reverse(\"sign_in\"))", "def post(self):\r\n return create_user(request)", "def create_account(self):\r\n logger.info('*' * 20 + ' Starting creating user account ' + '*' * 20)\r\n logger.info(f'\\nfor user {self}')\r\n self.automation.wait.until(EC.presence_of_element_located((By.ID, 'email_create')))\r\n self.automation.driver.find_element_by_css_selector(\"#email_create\").send_keys(self.email) # send email\r\n self.automation.driver.find_element_by_css_selector(\"#SubmitCreate\").click() # 'create an account' btn\r\n\r\n # ##############################################\r\n # 1- mr. or mrs. ?\r\n logger.info(f'Choose title {self.title}')\r\n self.automation.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#account-creation_form div.account_creation div.clearfix')))\r\n if self.title == 'mr.':\r\n gender_selector = \"input#id_gender1\"\r\n\r\n else:\r\n gender_selector = \"input#id_gender2\"\r\n\r\n self.automation.driver.find_element_by_css_selector(gender_selector).click()\r\n self.automation.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight - 2000)\") # scroll down\r\n\r\n # ##############################################\r\n logger.info(f'adding fname {self.fname}')\r\n # 2- first name\r\n self.automation.driver.find_element_by_css_selector(\"#customer_firstname\").send_keys(self.fname)\r\n\r\n # ##############################################\r\n logger.info(f'adding lname {self.lname}')\r\n # 3- last name\r\n self.automation.driver.find_element_by_css_selector(\"#customer_lastname\").send_keys(self.lname)\r\n\r\n # ##############################################\r\n logger.info(f'adding email {self.email}')\r\n # 4- email\r\n email_elem = self.automation.driver.find_element_by_css_selector(\"#email\")\r\n email = email_elem.get_attribute('value')\r\n if not email: # check email is passed or not ?\r\n logger.info('email was not added , add it again ')\r\n email.send_keys(self.email)\r\n\r\n # ##############################################\r\n logger.info(f'adding password')\r\n # 5- password\r\n password = f'document.getElementById(\"passwd\").value=\"{self.password}\";' # js code to change password elm value\r\n self.automation.driver.execute_script(password)\r\n\r\n self.automation.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight - 1000)\") # scroll down\r\n\r\n # ##############################################\r\n # 6- date of birth year-month-day\r\n logger.info(f'adding dob {self.dob}')\r\n self.select_dob()\r\n\r\n # ##############################################\r\n logger.info(f'adding fname#2 {self.fname}')\r\n # 7- fname\r\n get_fname = 'return document.querySelectorAll(\"div.account_creation #firstname\")[0].value;'\r\n fname = self.automation.driver.execute_script(get_fname)\r\n if not fname: # check fname is passed or not ?\r\n fname = f'document.querySelectorAll(\"div.account_creation #firstname\")[0].value=\"{self.fname}\";'\r\n self.automation.driver.execute_script(fname)\r\n\r\n # ##############################################\r\n logger.info(f'adding lname#2 {self.lname}')\r\n # 8- last name\r\n get_lname = 'return document.querySelectorAll(\"div.account_creation #lastname\")[0].value;'\r\n lname = self.automation.driver.execute_script(get_lname)\r\n if not lname: # check lname is passed or not ?\r\n lname = f'document.querySelectorAll(\"div.account_creation #lastname\")[0].value=\"{self.lname}\";'\r\n self.automation.driver.execute_script(lname)\r\n\r\n # ##############################################\r\n # 9- complete profile ( company, city, address, mobile, postalcode, alias address)\r\n logger.info('complete profile with ( company, city, address, mobile, postalcode, alias address)')\r\n logger.info(f'company({self.company}) , city({self.city}) , address({self.address}), mobile({self.phone}) , postalcode({self.postalcode}) , alias address({self.address[0] + self.address[-1]})')\r\n self.complete_profile()\r\n\r\n # ##############################################\r\n # 10- state (randomly choice)\r\n logger.info('choose state randomly')\r\n states = [state.text for state in self.automation.driver.find_elements_by_css_selector('#id_state option')]\r\n Select(self.automation.driver.find_element_by_css_selector('#id_state')).select_by_visible_text(choice(states))\r\n # ###############################################\r\n self.automation.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight - 700)\") # scroll down\r\n self.automation.driver.find_element_by_css_selector('#submitAccount').click() # register btn\r\n # ################ wait to login ###############################\r\n account_lst = self.automation.driver.find_elements_by_css_selector('.myaccount-link-list')\r\n timer = 1\r\n is_login = True\r\n while not account_lst:\r\n if timer == 60:\r\n is_login = False\r\n break\r\n time.sleep(.3)\r\n account_lst = self.automation.driver.find_elements_by_css_selector('.myaccount-link-list')\r\n timer += 1\r\n return is_login", "def new_user():\n pass", "def form_valid(self, form):\n # Associate Job to user if they are logged in\n if self.request.user.is_authenticated():\n form.instance.creator = self.request.user\n return super().form_valid(form)", "def filluserform(form):\n\n## //ACCOUNT TYPE\n## //IF Client account = Client\n## //Show AUTH TYPE\n## //IF Passphrase auth = Passphrase\n## //SHOW NAME, EMAIL, PASS, GROUP, EXPIRY\n## //IF Email auth = Email\n## //SHOW NAME, EMAIL, GROUP, EXPIRY\n## //IF None auth = None\n## //SHOW NAME, GROUP, EXPIRY\n## //IF ADMIN account = Admin\n## //SHOW NAME, EMAIL, PASS\n\n groups = hl.getAllGroups()\n nodes = hl.getAllNodes()\n\n if request.method == 'POST':\n if form == \"AC\":\n #Store Account Type in session variable \n if request.form['accountType1'] == \"Client\":\n session['accountType'] = \"Client\"\n return render_template(\"userform_create_user.html\", postback = 1, account = \"Client\", auth = \"NULL\", groups = groups, nodes = nodes)\n elif request.form['accountType1'] == \"Admin\":\n session['accountType'] = \"Admin\"\n session['authType'] = \"Passphrase\"\n return render_template(\"userform_create_user.html\", postback = 1, account = \"Admin\", auth = \"Passphrase\", groups = groups, nodes = nodes)\n else:\n abort(404)\n \n elif form == \"AU\":\n #Store Auth Type in session variable\n if request.form['authType1'] == \"Passphrase\":\n session['authType'] = \"Passphrase\"\n return render_template(\"userform_create_user.html\", postback = 1, account = \"Client\", auth = \"Passphrase\", groups = groups, nodes = nodes)\n elif request.form['authType1'] == \"Email\":\n session['authType'] = \"Email\"\n return render_template(\"userform_create_user.html\", postback = 1, account = \"Client\", auth = \"Email\", groups = groups, nodes = nodes)\n elif request.form['authType1'] == \"None\":\n session['authType'] = \"None\"\n return render_template(\"userform_create_user.html\", postback = 1, account = \"Client\", auth = \"None\", groups = groups, nodes = nodes)\n else:\n abort(404)\n\n elif form == \"ET\":\n user = session['user']\n return render_template(\"userform_edit_user.html\", postback = 1, username=user[\"Name\"], email=user[\"Email\"], authtype=user[\"Auth_Type\"], accounttype=user[\"Account_Type\"], expire=user[\"Expiry\"], grp = user[\"Grp\"], groups = groups, node = user[\"Node\"])\n \n elif form == \"DE\":\n #MAKE SURE ALL VALUE THAT ARE NOT PART OF REQUEST.FORM DO NOT THROW 400 BAD REQUEST ERROR\n name = request.form['name1']\n\n auth = session['authType']\n session.pop('authType', None)\n \n account = session['accountType']\n session.pop('accountType', None)\n \n \n if auth == \"Passphrase\":\n pwd = randompassword() #Default Generation or Not\n email = request.form['email1']\n elif auth == \"Email\":\n pwd = \"\" \n email = request.form['email1']\n else:\n pwd = \"\"\n email = \"\"\n\n if account == \"Client\":\n group = int(request.form['groupId1'])\n expiry = request.form['expiry1']\n if 'node1' in request.form:\n node = int(request.form['node1'])\n else:\n node = -1\n else:\n group = -1\n expiry = \"\"\n node = -1 \n \n if createNewUser(name, account, auth, email, pwd, group, expiry, node):\n return redirect(url_for('confirm', confirmed = 'New User Created!'))\n else:\n flash(\"User already exists\")\n \n elif form == \"EU\":\n user = {\"Name\" : request.form['name2'], \"Email\" : \"\", \"Auth_Type\" : session[\"user\"][\"Auth_Type\"], \"Account_Type\" : session[\"user\"][\"Account_Type\"], \"Expiry\" : request.form['expiry1'], \"Grp\" : int(request.form['groupId2'])}\n \n if 'email2' in request.form:\n user[\"Email\"] = request.form['email2'] \n \n if 'node2' in request.form:\n user[\"Node\"] = int(request.form['node2']) \n\n if hl.updateUser(session[\"user\"][\"ID\"], user):\n session.pop(\"user\", None)\n return redirect(url_for('confirm', confirmed = 'User Information Successfully Updated'))\n else:\n flash(\"Cannot Update User Information\")\n else: #Must be fake input\n abort(404)\n\n if form == \"CU\":\n return render_template(\"userform_create_user.html\", postback = -1, account = \"NULL\", auth = \"NULL\", groups = groups, nodes = nodes)\n elif hl.getUser(\"ID\", form) != None:\n user = hl.getUser(\"ID\", form)\n session['user'] = user\n return render_template(\"userform_edit_user.html\", postback = -1, authtype=user[\"Auth_Type\"], accounttype=user[\"Account_Type\"])\n #return render_template(\"userform_edit_user.html\", postback = -1, username=user[\"Name\"], email=user[\"Email\"], authtype=user[\"Auth_Type\"], accounttype=user[\"Account_Type\"], grp = user[\"Grp\"], nde = user[\"Node\"], groups = groups, nodes = nodes)\n else: #Must be fake input\n abort(404)", "def user():", "def add_user():\n\n roles = Role.query.all()\n\n user_form = UserForm(request.form)\n user_form.roles.choices = [(i.name,i.name) for i in roles]\n\n if user_form.validate_on_submit():\n\n if not request.form['username'] or request.form['username'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n if not request.form['email'] or request.form['email'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n if not request.form['password'] or request.form['password'] == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n if request.form['password'] != request.form['retype_password']:\n flash(\"Passwords are not the same!\",\"warn\")\n return render_template('user/add_edit_user.html', title='Add User',add=True,\n user_form=user_form)\n\n hashed_password = user_manager.hash_password(user_form.password.data)\n new_user = User(\n username=user_form.username.data,\n email=user_form.email.data,\n password=hashed_password,\n confirmed_at=datetime.datetime.utcnow(),\n is_enabled=user_form.is_enabled.data,\n first_name=user_form.first_name.data,\n last_name=user_form.last_name.data,\n locale=user_form.locale.data,\n timezone=user_form.timezone.data\n )\n\n # Si existe la lista de roles que hemos elegido se anadira al usuario\n if user_form.roles.data:\n for rol in roles:\n if rol.name in user_form.roles.data:\n new_user.roles.add(rol)\n try:\n correct = True\n db.session.add(new_user)\n db.session.commit()\n\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error creating user, make sure username and email are unique','error')\n\n else:\n flash('Congratulations, you have created a new user!','success')\n return redirect(url_for('user_ksat.manage_user'))\n\n\n return render_template('user/add_edit_user.html', title='Add User',add=True,user_form=user_form)", "def estadoProyecto(request,pk):\n\n proyecto_validar=Proyecto.objects.get(id_proyecto=pk)\n\n if validar_permiso(request.user, \"is_gerente\",proyecto_validar)==False: # primero se valida si es gerente en el proyecto actual)\n context = {\n \"mensaje\": \"No eres gerente de proyecto, por lo tanto no puedes cambiar el estado\" ,\n \"titulo\": \"Conflicto de Permiso \",\n \"titulo_b2\": \"Salir\",\n \"boton2\": \"/proyectos/\",\n }\n return render(request, \"Error.html\", context)\n\n\n form=FormProyectoEstados(request.POST)\n p = Proyecto.objects.get(id_proyecto=pk) ##### BUSCA EL PROYECTO CON ID\n if form.is_valid():\n x=form.cleaned_data\n z=x.get(\"estado\")#### ESTADO SELECCIONADO\n #print(z)\n #print(pk)\n\n if(z==\"FINALIZADO\"):\n\n #registrarAuditoriaProyecto(request.user,\" cambio el estado a finalizado \",p.id_proyecto,p.nombre,\"\")\n return redirect('gestion:listar_proyectos')### VUELVE A LISTAR LOS PROYECTOS DEL USUARIO\n elif(z==\"INICIADO\"):\n ok=False\n fase= Fase.objects.all()\n IdFase=0\n\n cantidad=0\n try:\n comite = Comite.objects.filter(id_proyecto=pk)\n cantidad = comite.count()\n except:\n comite = None\n cantidad =0\n\n if(cantidad < 3):\n comite=None\n\n if (comite == None):\n context = {\n \"mensaje\": \"CREE EL COMITE DE CAMBIO PARA CONTINUAR\",\n \"titulo\": \"ERROR NO POSEE COMITE\",\n \"titulo_b1\": \"CREAR COMITE\",\n \"boton1\": \"/AggComite/\" + str(pk),\n \"titulo_b2\": \"CANCELAR\",\n \"boton2\": \"/detallesProyecto/\" + str(pk),\n }\n return render(request, 'Error.html', context)\n\n for i in range(fase.count()):\n if(fase[i].id_Proyecto.id_proyecto==p.id_proyecto):\n ti = TipoItem.objects.all()\n IdFase = fase[i].id_Fase\n for x in range(ti.count()):\n if(ti[x].fase.id_Fase==fase[i].id_Fase):\n ok=True\n print(IdFase)\n if(ok==True):\n registrarAuditoria(request.user,\"cambio el estado del proyecto : \"+str(p.nombre)+ \" a Iniciado\")\n p.estado=z####### SE ASIGNA ESTADO\n p.save()##### SE GUARDA\n registrarAuditoriaProyecto(request.user, \" cambio el estado a iniciado \", p.id_proyecto, p.nombre, \"\")\n return redirect('gestion:detalles_Proyecto',pk)### VUELVE A LISTAR LOS PROYECTOS DEL USUARIO\n\n context = {\n \"mensaje\":\"NO POSEE TIPOS DE ITEM CREE AL MENOS UNO PAARA INICIAR EL PROYECTO\",\n \"titulo\":\"FALTA TI\",\n \"titulo_b1\": \"Crear TI\",\n \"boton1\":\"/crear/TipoItem/\"+str(IdFase),\n \"titulo_b2\":\"Volver a proyectos\",\n \"boton2\":\"/proyectos/\"\n }\n return render(request, 'Error.html', context)\n\n\n elif(z==\"CANCELADO\"):\n if(p.estado != 'FINALIZADO'):\n registrarAuditoriaProyecto(request.user, \" cambio el estado a cancelado \", p.id_proyecto, p.nombre, \"\")\n p.estado=z####### SE ASIGNA ESTADO\n p.save()##### SE GUARDA\n return redirect('gestion:listar_proyectos')### VUELVE A LISTAR LOS PROYECTOS DEL USUARIO\n else:\n context = {\n \"mensaje\": \"EL PROYECTO SE ENCUENTRA FINALIZADO POR ENDE NO SE PUEDE CANCELAR\",\n \"titulo\": \"PROYECTO YA SE FINALIZO\",\n \"titulo_b1\": \"SALIR\",\n \"boton1\": \"/proyectos/\" ,\n \"titulo_b2\": \"\",\n \"boton2\": \"\"\n }\n return render(request, 'Error.html', context)\n\n context={\n \"form\":form,\n \"estado\": p.estado,\n 'proyecto':p,\n 'proyectos': p\n }\n return render(request, 'Menu/estado_proyecto.html',context)", "def adiciona_servico(self, dados={}):\n\n self.user = self.uPersistencia.buscarUsuario(\n id_usuario=dados['id_usuario'])\n\n if not self.user.getId():\n return {'status': False,\n 'msg': 'usuario nao existe',\n 'dados': dados}\n elif self.user.getIdTipo() != 2:\n # apenas usuarios do tipo prestador (2) podem adicionar\n # servicos\n return {'status': False,\n 'msg': 'adicionar servico apenas valido para prestadores',\n 'dados': dados}\n else:\n # instancia da model de servicos\n self.srv = BuscaServicos()\n\n if self.srv.busca_servico_usuario(id_usuario=self.user.getId(),\n id_servico=dados['id_servico']):\n # usuario ja presta o servico solicitado para cadastrar.\n # entao retornamos msg de operacao nao concluida\n return {'status': 0,\n 'msg': (('%s (%d) ja presta esse servico,' +\n ' operacao cancelada.') % (self.getNome(),\n self.getId())),\n 'dados': dados}\n\n res = self.user.adicionar_servico(id_servico=dados['id_servico'])\n\n return {'status': (res and 1 or 0),\n 'msg': (res and 'servico cadastrado com sucesso' or\n 'servico nao cadastrado'),\n 'dados': dados}", "def create_user() -> tuple:\n # created new user\n user_data: dict = request.get_json()\n names: str = user_data.get(\"names\")\n surname: str = user_data.get(\"surname\")\n cell: str = user_data.get(\"cell\")\n email: str = user_data.get(\"email\")\n password: str = user_data.get(\"password\")\n uid: str = user_data.get(\"uid\")\n organization_id: str = user_data.get(\"organization_id\")\n\n # Add User View will perform error checking\n return user_view.add_user(organization_id=organization_id, uid=uid, names=names, surname=surname,\n cell=cell, email=email, password=password)", "def post(self, request, *args, **kwargs):\n request.POST = request.POST.copy()\n request.POST['name']= self.kwargs['proyecto']+'_'+request.POST['name']\n return super(CrearRol,self).post(request,**kwargs)", "def post(self, request, *args, **kwargs):\n usuario=Usuario.objects.get(id=self.kwargs['pk'])\n if request.POST[\"esta_aprobado\"] == 'True':\n CorreoMail(\"Aprobado\",\"Usted fue apobado en el sistema, bienvenido!!\",usuario.user.email )\n return super(ActualizarUser, self).post(request, **kwargs)", "def create_user(prenom: str, nom: str, mail: str,\n date_naissance: Optional[datetime.datetime] = None, adresse: Optional[str] = None):\n columns = 'nom, prenom, date_naissance, mail, adresse'\n list_to_add = (nom, prenom, date_naissance, mail, adresse)\n add = (f\"INSERT INTO User \"\n f\"({columns}) \"\n f\"VALUES (%s, %s, %s, %s, %s)\")\n mycursor.execute(add, list_to_add)\n mydb.commit()\n return f\"L'utilisateur {nom} {prenom} a été créé\"", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def post(self, *args, **kw):\n pp = None\n ctx = \"\"\n if kw[\"id_proyecto\"]:\n ctx = \"id_proyecto\"\n pp = PoseePermiso('crear rol', id_proyecto=int(kw[\"id_proyecto\"]))\n elif kw[\"id_fase\"]: \n ctx = \"id_fase\"\n pp = PoseePermiso('crear rol', id_fase=int(kw[\"id_fase\"]))\n elif kw[\"id_tipo_item\"]:\n ctx = \"id_tipo_item\"\n pp = PoseePermiso('crear rol', id_tipo_item=int(kw[\"id_tipo_item\"]))\n else:\n pp = PoseePermiso('crear rol')\n \n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(self.action)\n\n #en caso de exito\n ok_url = u\"\"\n #url que redirige al new y rellena los parametros que ya ingreso\n error_url = u\"/rolesplantilla/new/\" \n\n if ctx == \"id_proyecto\":\n ok_url = \"/proyectos/%s/edit\" % kw[ctx]\n error_url += \"proyecto?{ctx}={val}\".format(ctx=ctx, val=kw[ctx])\n elif ctx == \"id_fase\":\n ok_url = \"/fases/%s/edit\" % kw[ctx]\n error_url += \"fase?{ctx}={val}\".format(ctx=ctx, val=kw[ctx]) \n elif ctx == \"id_tipo_item\":\n ok_url = \"/tipositems/%s/edit\" % kw[ctx]\n error_url += \"ti?{ctx}={val}\".format(ctx=ctx, val=kw[ctx]) \n else:\n tipo = unicode(kw[\"tipo\"].lower())\n ok_url = \"/rolesplantilla/\"\n if tipo.find(u\"proyecto\") >= 0:\n error_url += \"proyecto?\"\n elif tipo.find(u\"fase\") >= 0:\n error_url += \"fase?\"\n else:\n error_url += \"ti?\"\n \n #agregamos los parametros que ya ingreso el usuario.\n nombre = kw.get(\"nombre_rol\", None).encode(\"utf-8\")\n nombre_q = urllib.quote(nombre)\n desc = kw.get(\"descripcion\", None).encode(\"utf-8\")\n desc_q = urllib.quote(desc)\n params = \"&nombre_rol=\" + nombre_q + \"&descripcion=\" + desc_q\n error_url += params\n \n if not (kw.has_key(\"permisos\") and kw[\"permisos\"]):\n flash(\"Debe seleccionar al menos un permiso\", 'warning')\n redirect(error_url)\n else: \n Rol.crear_rol(**kw)\n flash(u\"El Rol se ha creado correctamente\")\n redirect(ok_url)", "def comite(request,pk):\n\n proyecto = User_Proyecto.objects.filter(proyecto_id=pk)\n gerente = User.objects.get(id=proyecto[0].user_id)\n\n comite = Comite.objects.all()\n form = Usuario.objects.all()\n proyectos=Proyecto.objects.get(id_proyecto=pk)\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n #form.save()\n return redirect('gestion:comite',pk)\n else:\n list=[]\n if(comite != None):\n for i in range(form.count()):\n ok = False\n if form[i].esta_aprobado == True:\n for x in comite:\n if x.id_user == form[i].user.id and x.id_proyecto == pk:\n ok=True\n if ok:\n list.append(form[i].user.id)\n print(list)\n return render(request, 'proyectos/ver_comite.html', {'form': form,'list':list,'pk':pk,'proyectos':proyectos,'idGerente':gerente.id})", "def signup():", "def user(self):", "def new(self, *args, **kw):\n pp = PoseePermiso('crear rol')\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(self.action)\n tmpl_context.widget = self.new_form\n if request.environ.get('HTTP_REFERER') == \"http://\" + request.environ.get('HTTP_HOST',) + \"/\":\n atras = \"../\"\n else:\n atras = \"/roles\"\n return dict(value=kw, page=\"Nuevo Rol\", action=self.action, atras=atras)", "def create_user():\n if request.method == 'POST':\n PLAN.create_user(request.form['fname'],\n request.form['lname'],\n request.form['username'],\n request.form['password'],\n request.form['email'])\n return redirect(url_for('index'))\n return render_template('newuser.html')", "def create(self, request, *args, **kwargs):\n response = super(CreateUserView, self).create(request, *args, **kwargs)\n response.data['message'] = \"Registrado Exitosamente\"\n return response", "def affichage_creation_tournoi():\n nom = \"\"\n lieu = \"\"\n date = \"\"\n nb_tours = 4\n joueurs = []\n temps = \"\"\n note = \"\"\n\n print(\"\\n---------------------------\")\n while len(nom) == 0:\n try:\n nom = str(input(\"\\nNom : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nom valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(lieu) == 0:\n try:\n lieu = str(input(\"\\nLieu : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un lieu valide.\")\n sl(2)\n continue\n\n print(\"\\n---------------------------\")\n while len(date) == 0:\n try:\n date = str(input(\"\\nDate\\nFormat : jj/mm/aaaa : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une date valide.\")\n sl(2)\n continue\n test_date = OutilsControleurs.test_date(date)\n if test_date == 0:\n print(\"\\nVous avez saisi une valeur trop grande.\")\n date = \"\"\n if test_date == 1:\n print(\"\\nVous avez saisi une valeur trop petite.\")\n date = \"\"\n if test_date == 2:\n break\n if test_date == 3:\n print(\"\\nVous avez saisi un format de date incorrect.\")\n date = \"\"\n\n print(\"\\n---------------------------\")\n nb_tours_modif = \"\"\n while nb_tours_modif != 2 or nb_tours_modif != 1:\n try:\n print(\"\\nNombre de tours\\nPar default le nombre est de 4\\nVoulez-vous modifier cette valeur ?\")\n nb_tours_modif = int(input(\"\\n1 - Oui\\n2 - Non\\n\\nVotre choix: \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours_modif == 1:\n while nb_tours == 4:\n try:\n nb_tours = int(input(\"\\nNombre de tours : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sl(2)\n continue\n if nb_tours == 4:\n break\n break\n if nb_tours_modif == 2:\n break\n\n print(\"\\n---------------------------\\n\\nListe des joueurs :\\n\")\n liste_joueurs_tournois = Joueur.joueurs_tournoi()\n if liste_joueurs_tournois == 0:\n print(\"Il n'y a pas ou pas suffisament de joueurs pour organiser un tounois.\")\n print(\"Veuillez ajouter des joueurs via le menu.\")\n input(\"\\nAppuyer sur entrer pour continuer\")\n return\n\n for arg in liste_joueurs_tournois:\n print(arg)\n x = 8\n while x != 0:\n try:\n joueur = int(input(\"Saisir encore {} indice de joueurs : \".format(x)))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un indice valide.\")\n sl(2)\n continue\n if joueur > 0 and joueur <= len(liste_joueurs_tournois):\n if joueur not in joueurs:\n joueurs.append(joueur)\n else:\n print(\"Vous avez deja saisi ce joueur.\")\n x += 1\n else:\n x += 1\n x -= 1\n\n y = 1\n nom_joueurs = []\n for arg in liste_joueurs_tournois:\n arg = arg[:-15]\n nom_joueurs.append(str(arg).replace(\"Indice joueur : {}\\n \".format(y), \"\").replace(\"\\n \", \"\"))\n y += 1\n joueurs = Joueur.get_joueurs_tournoi(joueurs, nom_joueurs)\n\n print(\"\\n---------------------------\")\n temps_choix = 0\n while temps_choix != 1 or temps_choix != 2 or temps_choix != 3:\n try:\n temps_choix = int(input(\"\\nContrôle de temps\\n1 - Bullet\\\n \\n2 - Blitz\\n3 - Coup rapide\\n\\nVotre choix : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if temps_choix == 1:\n temps = \"Bullet\"\n break\n if temps_choix == 2:\n temps = \"Blitz\"\n break\n if temps_choix == 3:\n temps = \"Coup rapide\"\n break\n\n print(\"\\n---------------------------\")\n while len(note) == 0:\n try:\n note = str(input(\"\\nDescription : \"))\n except ValueError:\n print(\"\\nVous n'avez pas saisi une valeur valide.\")\n sl(2)\n continue\n if len(note) == 0:\n break\n return nom, lieu, date, nb_tours, joueurs, temps, note", "def add_nurse(request):\n if request.POST:\n post = request.POST\n username = post.get(\"username\")\n first_name = post.get(\"first_name\")\n last_name = post.get(\"last_name\")\n email = post.get(\"email\")\n password = post.get(\"password\")\n chosen_hospitals = post.getlist(\"chosen_hospitals\")\n\n new_user = User.objects.create_user(\n username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n email=email\n )\n\n new_user_profile = UserProfile.objects.create(\n user=new_user,\n status=UserStatus.objects.get(pk=2)\n )\n\n if new_user:\n for chosen_hospital in chosen_hospitals:\n HospitalStaff.objects.create(user_profile=new_user_profile, hospital=Hospital.objects.get(pk=chosen_hospital))\n\n return redirect('add_nurse')\n\n hospitals = Hospital.objects.all()\n return render(request, 'add_nurse.html', {'hospitals': hospitals})", "def registrati(request):\n form = UserCreationForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n login(request, user)\n my_group = Group.objects.get(name='Common')\n my_group.user_set.add(user)\n messages.add_message(request, messages.SUCCESS, 'Utente creato con successo!')\n return HttpResponseRedirect('/')\n else:\n for msg in form.error_messages:\n messages.add_message(request, messages.ERROR, form.error_messages[msg])\n HttpResponseRedirect('main_page/registration')\n form = UserCreationForm()\n return render(request, 'registration/registrati.html', {'form': form})", "def add_doctor(request):\n if request.POST:\n post = request.POST\n username = post.get(\"username\")\n first_name = post.get(\"first_name\")\n last_name = post.get(\"last_name\")\n email = post.get(\"email\")\n password = post.get(\"password\")\n chosen_hospitals = post.getlist(\"chosen_hospitals\")\n\n new_user = User.objects.create_user(\n username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n email=email\n )\n new_user_profile = UserProfile.objects.create(\n user=new_user,\n status=UserStatus.objects.get(pk=1)\n )\n\n if new_user:\n for chosen_hospital in chosen_hospitals:\n HospitalStaff.objects.create(user_profile=new_user_profile, hospital=Hospital.objects.get(pk=chosen_hospital))\n\n return redirect('add_doctor')\n\n hospitals = Hospital.objects.all()\n return render(request, 'add_doctor.html', {'hospitals': hospitals})", "def confirmarOperacion(self):\n\n if self.tableNC.rowCount() == 0 :\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"No se han agregado productos a la Nota de Crédito\"))\n\n else:\n ok = QtGui.QMessageBox.information(self,QtCore.QString.fromUtf8(\"Confirmación\"),\\\n QtCore.QString.fromUtf8(\"¿Desea generar la Nota Crédito?\"),\\\n QtGui.QMessageBox.Cancel, QtGui.QMessageBox.Accepted)\n\n if (ok==1):\n notaCredito = NotaCreditoModel(NotaCredito.generarNumero(self.sesion))\n notaCredito.guardar(self.sesion)\n for lineaNC, data in enumerate(self.detallesReintegrables):\n detalleNC = DetalleNCModel(notaCredito.numero, lineaNC+1, data[0], data[1])\n detalleNC.setImporte(data[3])\n detalleNC.setDescuento(data[2])\n detalleNC.guardar(self.sesion)\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"La Nota de Crédito ha sido generada con éxito\"))\n self.facturaSeleccionada.setNC(notaCredito.numero)\n self.facturaSeleccionada.modificar(self.sesion)\n\n #Se genera un diccionario con los datos necesarios para imprimir la nota de credito\n data = {}\n data[\"numero\"] = notaCredito.numero\n data[\"fecha\"] = notaCredito.fecha_emision\n data[\"detalles\"] = self.detallesImprimibles\n generarNotaCredito(data)\n self.limpiarVentana()\n\n else:\n QtGui.QMessageBox.information(self,\"Aviso\",QtCore.QString.fromUtf8(\"La Nota de Crédito no ha sido generada\"))", "def step2(request):\n\tform = UserCreationAdminForm(request.POST or None)\n\tif form.is_valid():\n\t\tcomp = form.save()\n\t\tcomp.groups.add(Group.objects.get(name='usuario-admin-compras'))\n\n\t\treturn HttpResponseRedirect(\"/comprador/paso3/\")\n\n\t# crear el user profile\n\t# redireccionar al home\n\ttemplate = 'customerbuy/step2.html'\n\treturn render(request, template,{'form':form})\n\t#return render_to_response(\"customer/signup.html\", {'form': form,}, context_instance=RequestContext(request))", "def create_user(context, params):\n form_user = dict()\n # form_user['edited_by'] = context.user\n if params.get('username'):\n form_user['username'] = params.get('username')\n else:\n form_user['username'] = create_username(params) # 'email_user{}'.format(MISUser.objects.latest('id').id + 1\n form_user['first_name'] = params.get('first_name')\n form_user['last_name'] = params.get('last_name')\n form_person = create_person(params)\n form_user.update(form_person)\n user = User.objects.create(**form_user)\n user.set_password(params.get('password'))\n\n email = {'label': 'Work', 'val': params.get('email'), 'person': user, 'is_main': True}\n create_email(context, email)\n\n user.save()\n return user", "def create(request, template='contacts/regtaller/create.html'):\n\n user = request.user\n\n if not user.is_authenticated():\n try:\n user = User.objects.get(first_name='Anonymous')\n except:\n username = str(random.randint(0,1000000))\n u = User(username=username, first_name='Anonymous', last_name='User')\n u.set_unusable_password()\n u.save()\n user = User.objects.get(first_name='Anonymous')\n\n if request.method == 'POST':\n form = TallerRegistrationCreateForm(request.POST)\n\n\n if form.is_valid():\n\n\n\n p = form.save(commit=False)\n person_list = Person.objects.filter(email_address__iexact=p.email_address)\n person = None\n if person_list.count() > 0:\n for iter_person in person_list:\n if person is None: person = iter_person\n if iter_person.first_name.lower().strip() == p.first_name.lower().strip():\n person = iter_person\n break\n else:\n raise Exception( _(\"This email address doesn't exist in the inscription database\"))\n\n p.person = person\n # delete previous registration\n try:\n regtaller = TallerRegistration.objects.get(person_id__exact=person.id)\n regtaller.delete()\n except TallerRegistration.DoesNotExist:\n pass\n\n p.user_add = user\n p.user_modify = user\n p.date_registration = datetime.now()\n p.save()\n\n # tallers\n order = 0\n for taller_id in form.cleaned_data['tallers'].split(','):\n order = order + 1\n trelation = TallerRelation(taller_id=taller_id, taller_registration_id=p.id,preference_order=order)\n trelation.save()\n\n if user.is_authenticated() and user.first_name != 'Anonymous':\n return HttpResponseRedirect(p.get_absolute_url())\n else:\n # Enviam correu OK + Mostram success\n kwvars = {\n 'object': p\n }\n context = Context(kwvars)\n mailtemplate = 'tll_registration_es' if p.person.lang != '2' else 'tll_registration_ca'\n status = sendTemplateMail(context,mailtemplate,[p.email_address])\n if status == _('Mail sent'):\n mail_ok = True\n else:\n mail_ok = False\n\n return render_to_response('contacts/regtaller/new_success.html', {'object': p, 'mail_ok' : mail_ok, 'mail_status': status}, RequestContext(request))\n\n else:\n form = TallerRegistrationCreateForm()\n\n kwvars = {\n 'form': form\n }\n\n return render_to_response(template, kwvars, RequestContext(request))", "def __str__(self):\n return self.usuario", "def signup(self, request, user):\n pass", "def create_user_start_program_advices_list_empty(self):\n username = 'pseudo'\n email = 'christiane@tests.com'\n password = '00000000'\n user_created = self.user.objects.create_user(id=4, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n weight = 60\n ProfileUser.objects.create(user=user_created, starting_weight=weight,\n actual_goal_weight=10, final_weight=50)\n self.add_user_results(50, user_created, weight)\n user = HistoryUser.objects.get(user=user_created)\n user.start_questionnaire_completed = True\n user.save()\n\n return user_created", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n #By adding wins, it added it to the create_user input #api page.\n wins = defaults['wins']\n user = User(name=request.user_name, email=request.email, wins = wins)\n #user.put() sends the user info that is ndb\n user.put()\n\n for key,val in sorted(craft.items()):\n outmessage =(\"{} : Can be make with {}\".format(key, val))\n return StringMessage(message='User {} created!'.format(\n outmessage))\n #This just returns a message for response at bottom of API\n #screen.", "def crear_registro(request):\n if request.method == 'POST':\n att = AttentionType.objects.get(name=request.POST['tipo_atencion'])\n if request.POST['pin']:\n persona = Persona.objects.get(pin=request.POST['pin'])\n try:\n numero = visualizador(request).content\n\n atencion = InitialAttention.objects.get(\n attention_number=numero,\n attention_type=att,\n created__contains=timezone.now().date()\n )\n\n registro_guardado = Registers.objects.create(\n pin=persona,\n attention_number=atencion,\n priority_attention=False,\n attention_type=att,\n start_attention=timezone.now(),\n observations=request.POST['observaciones'] if request.POST['observaciones'] else '',\n finish_attention=timezone.now(),\n tiempo_espera=Decimal(format((timezone.now()-atencion.created).seconds / 60, '.1f')),\n sellplace=SellPlace.objects.get(id_sellplace=1),\n sucursal=Sucursal.objects.get(id_sucursal=1),\n )\n serializer = RegistersSerializer(registro_guardado)\n except ValueError:\n return JSONResponse('No hay turnos para ser Atendidos!', status=400)\n except ObjectDoesNotExist:\n return JSONResponse('No hay turnos para ser Atendidos!', status=400)\n\n return JSONResponse(serializer.data, status=201)", "def create_user_object(self, request):\r\n user = {\r\n \"first_name\": request.form.get(\"first_name\"),\r\n \"last_name\": request.form.get(\"last_name\"),\r\n \"age\": request.form.get(\"age\"),\r\n \"cpr_number\": request.form.get(\"CPR\"),\r\n \"email\": request.form.get(\"email\"),\r\n \"phone_number\": request.form.get(\"phone_number\"),\r\n \"password\": PasswordHasher().hash(request.form.get(\"password\")),\r\n \"bank_account\": str(BankAccount(\"Savings\", 1000.00).store_account().inserted_id),\r\n \"crypto_wallet\": str(CryptoWallet(\"Bitcoin\", 0.0045).store_account().inserted_id)\r\n }\r\n return user", "def save_model(self, request, obj, form, change):\n obj.propietario = request.user\n obj.save()", "def post(self, request):\n self.context[\"form\"] = AddUserForm(request.POST)\n form = self.context[\"form\"]\n if form.is_valid():\n # Reject input if user already exists\n username = form.cleaned_data[\"username\"]\n if User.objects.filter(username=username).count() > 0:\n # reject\n ev = PiEvent.createEvent(type=PiEvent.ADDUSER_TYPE, status=PiEvent.FAIL_STATUS,\n message=\"User '{}' already exists\".format(username))\n ev.save()\n\n return tryAgain(msg=\"The username '<b>{}</b>' already exists\".format(username),\n url=\"javascript:history.back()\")\n password = form.cleaned_data[\"password\"]\n firstName = form.cleaned_data[\"firstName\"]\n lastName = form.cleaned_data[\"lastName\"]\n email = form.cleaned_data[\"email\"]\n organization = form.cleaned_data[\"organization\"]\n mobilePhone = form.cleaned_data[\"mobilePhone\"]\n workPhone = form.cleaned_data[\"workPhone\"]\n otherPhone = form.cleaned_data[\"otherPhone\"]\n note = form.cleaned_data[\"note\"]\n\n # Create a Django User object\n user = User.objects.create_user(username, email=email, password=password)\n user.first_name = firstName\n user.last_name = lastName\n user.save()\n \n msUser = MSUser(organization=organization,\n work_phone=workPhone,\n mobile_phone=mobilePhone,\n other_phone=otherPhone,\n note=note,\n user=user)\n msUser.save()\n\n ev = PiEvent.createEvent(type=PiEvent.ADDUSER_TYPE, status=PiEvent.SUCCESS_STATUS,\n message=\"User '{}' added\".format(unicode(msUser)))\n ev.save()\n return HttpResponseRedirect(\"/dbkeeper/\")\n\n return render(request, \"dbkeeper/add.html\", self.context)", "def create():\n error = None\n success = False\n if request.method == 'POST':\n nome = request.form['nome']\n if not nome:\n error = 'Nome é obrigatório.'\n else:\n try:\n if verifica_autor_bd(nome):\n error = 'Autor já cadastrado!'\n else:\n db.insert_bd('INSERT INTO autor values (default, \"%s\")' % nome)\n success = True\n except Exception as e:\n print(e)\n return redirect(url_for('error'))\n\n return render_template('autor/create.html', error=error, success=success)", "def create_user_form():\n template_name = \"create_user.html\"\n users = []\n print request.form\n\n flash(request.form['username'])\n flash(request.form['email'])\n\n return render_template(template_name, users=users)", "def mod_user(request, usuario_id):\n user = User.objects.get(username=request.user.username)\n #Validacion de permisos----------------------------------------------\n roles = UsuarioRolSistema.objects.filter(usuario = user).only('rol')\n permisos_obj = []\n for i in roles:\n permisos_obj.extend(i.rol.permisos.all())\n permisos = []\n for i in permisos_obj:\n permisos.append(i.nombre)\n #--------------------------------------------------------------------\n usuario = get_object_or_404(User, id=usuario_id)\n #Datos nuevos del usuario\n if request.method == 'POST':\n form = ModUsuariosForm(request.POST)\n if form.is_valid():\n usuario.first_name = form.cleaned_data['first_name']\n usuario.last_name = form.cleaned_data['last_name']\n usuario.email = form.cleaned_data['email']\n usuario.save()\n return HttpResponseRedirect(\"/usuarios\")\n else:\n form = ModUsuariosForm(initial={'first_name':usuario.first_name, 'last_name': usuario.last_name,'email':usuario.email})\n return render_to_response('admin/usuarios/mod_usuario.html',{'form':form,\n 'user':user,\n 'usuario':usuario,\n 'mod_usuario': 'Modificar usuario' in permisos},context_instance=RequestContext(request))", "def user_register():\n try:\n #username=request.form['username']\n email = request.form['email']\n first_name = request.form['first_name']\n middle_name = request.form['middle_name']\n last_name = request.form['last_name']\n password = request.form['password']\n society_id = request.form['society_id']\n flat_id = request.form['flat_id']\n isadmin = request.form['isadmin']\n user_entity = request.form['user_entity']\n username = request.form['email']\n \n# postgres_insert_query=create_user.format(str(username),str(email),str(first_name),str(middle_name),str(last_name),str(password),str(society_id),str(isadmin))\n \n df = pd.DataFrame({'username': str(username),\n 'email': str(email),\n 'first_name': str(first_name),\n 'middle_name': str(middle_name),\n 'last_name': str(last_name),\n 'password': str(password),\n 'society_id': str(society_id),\n 'isadmin': str(isadmin),\n 'flat_id': str(flat_id),\n 'user_entity': str(user_entity)\n },\n index=[0])\n\n with dbm.dbManager() as manager:\n manager.commit(df, 'visitor_management_schema.user_table')\n return \"User registered Succesfully\"\n except psycopg2.DatabaseError as error:\n errors = {'registeration': False, 'error': error}\n return str(errors)", "def assistenza(request):\n operator_list = User.objects.filter(groups__name='Operators')\n num = random.randint(0, len(operator_list)-1)\n random_operator = operator_list[num]\n form = ContactForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n mittente = request.user\n destinatario = random_operator\n data = form.cleaned_data['date']\n testo = form.cleaned_data['messaggio']\n messaggio = Messaggio(userMittente=mittente, userDestinatario=destinatario, data_ora=data, text=testo)\n messaggio.save()\n messages.add_message(request, messages.SUCCESS, 'Messaggio inviato con successo!')\n return HttpResponseRedirect('/')\n else:\n form = ContactForm()\n return render(request, 'main_page/contact.html', {'form': form, 'operator': random_operator})", "def post(self, *args, **kw):\n pp = PoseePermiso('crear rol')\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(self.action)\n\n if (not kw.has_key('tipo')):\n kw[\"tipo\"] = self.rol_tipo\n Rol.crear_rol(**kw)\n\n redirect(self.action)", "def registro_y_login(self, cuenta, compania, dominio, usuario, password):\n self.login_page.click_desplegar_cuenta()\n self.login_page.wait_fields_load()\n self.login_page.set_cuenta(cuenta)\n self.login_page.set_jdd(compania)\n self.login_page.set_dominio(dominio)\n self.login_page.set_usuario(usuario)\n self.login_page.set_password(password)\n report.write_line(\"Llenando los datos de login\", report.Status.SUCCESS, True)\n self.login_page.click_login()", "def run(self):\r\n # First Name\r\n Label(master=self.root, width=15, text=\"FirstName\",\r\n font=FONT).grid(row=0, column=0)\r\n Entry(master=self.root, textvariable=self.firstname_value,\r\n font=FONT).grid(row=0, column=1)\r\n\r\n # Last Name\r\n Label(master=self.root, width=15, text=\"LastName\",\r\n font=FONT).grid(row=1, column=0)\r\n Entry(master=self.root, text='', textvariable=self.lastname_value,\r\n font=FONT).grid(row=1, column=1)\r\n\r\n # ID Number\r\n Label(master=self.root, width=15, text=\"ID NUMBER\",\r\n font=FONT).grid(row=2, column=0)\r\n Entry(master=self.root, textvariable=self.id_number_value,\r\n font=FONT).grid(row=2, column=1)\r\n\r\n # Country\r\n Label(master=self.root, width=15, text=\"Country\",\r\n font=FONT).grid(row=3, column=0)\r\n Entry(master=self.root, textvariable=self.country_value,\r\n font=FONT).grid(row=3, column=1)\r\n\r\n # Creating Submit button\r\n Button(master=self.root, text=\"Submit\", font=FONT,\r\n command=self.create_user).grid(row=4, column=1)\r\n\r\n # Room Number\r\n self.room_text = Label(master=self.root, width=15, font=FONT)\r\n self.room_text.grid(row=5, column=0)\r\n\r\n self.room_num = Label(master=self.root, width=15, font=FONT)\r\n self.room_num.grid(row=5, column=1)\r\n\r\n self.root.mainloop()", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def create_user(change):\n return change()", "def form_valid(self, form):\n form.instance.user = self.request.user\n return super(TaskCreate, self).form_valid(form)", "def inserir_grupo():\n try:\n if current_user.is_administrator():\n if request.method == 'POST':\n grupo = Grupo(\n grupo_nome = request.form['grupo_nome']\n )\n db.session.add(grupo)\n db.session.commit()\n return listar_usuarios()\n return render_template('admin/inserir_grupo.html')\n return redirect(url_for('main.index'))\n except Exception as e:\n abort(500, e)", "def registerPage(request):\n if request.user.is_authenticated:\n return redirect('indexPage')\n form = PersonalUserCreationForm()\n if request.method == 'POST':\n form = PersonalUserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(\n request, 'Un nouveau compte vient d\\'être créé pour ' + user\n )\n return redirect('loginPage')\n context.update({'form': form})\n return render(request, 'register.html', context)", "def make_new_user():\n return render_template('users/new_user_form.html')", "def post(self, usuario_actual):\n lista_de_reproduccion = ListaDeReproduccion(nombre=self.argumentos['nombre'],\n descripcion=self.argumentos['descripcion'])\n errores_validacion = ValidacionListaDeReproduccion.validar_registro_lista_de_reproduccion(lista_de_reproduccion)\n if len(errores_validacion) > 0:\n return errores_validacion, 400\n lista_de_reproduccion.usuario_id = usuario_actual.id_usuario\n lista_de_reproduccion.guardar()\n return lista_de_reproduccion.obtener_json(), 201", "def cargar_cuota(request):\n usuario = request.user\n if usuario.groups.filter(name='secretaria').exists():\n #if usuario.has_perm('sociedad.add_cuota'):\n if request.method ==\"POST\":\n cuotaForm = CuotaForm(request.POST)\n if cuotaForm.is_valid():\n cuota = cuotaForm.save()\n return redirect(detalle_cuota, pk=cuota.pk)\n else:\n cuotaForm = CuotaForm()\n else:\n return redirect('sinPermisos')\n return render(request, 'sociedad/cargar_cuota.html', {'cuotaForm':cuotaForm,})", "def cargar_cuota(request):\n usuario = request.user\n if usuario.groups.filter(name='secretaria').exists():\n #if usuario.has_perm('sociedad.add_cuota'):\n if request.method ==\"POST\":\n cuotaForm = CuotaForm(request.POST)\n if cuotaForm.is_valid():\n cuota = cuotaForm.save()\n return redirect(detalle_cuota, pk=cuota.pk)\n else:\n cuotaForm = CuotaForm()\n else:\n return redirect('sinPermisos')\n return render(request, 'sociedad/cargar_cuota.html', {'cuotaForm':cuotaForm,})", "def post(self):\n post_data = request.get_json()\n\n # decode token and check if expired\n token = post_data.get('odoo_contact_token')\n odoo_contact_id, expiration_date = decode_token(token)\n\n if datetime.now() > expiration_date:\n return {\n \"error_id\": \"alumni_register_link_expired_error\",\n \"message\": \"Unauthorized: Registration link is expired.\"\n }, 401\n\n # check if such odoo user exists\n filter_list = []\n filter_list.append(['id', '=', odoo_contact_id])\n from app.controllers.odoo_controller import OdooController\n try:\n contacts_number = OdooController.count_number_of_odoo_contacts_by_filter_list(filter_list)\n except OdooIsDeadError as err:\n abort(503, err, error_id='odoo_connection_error')\n\n if contacts_number == 0:\n return {\n \"error_id\": \"odoo_contact_not_found_error\",\n \"message\": \"Odoo contact not found.\"\n }, 404\n\n # create alumni user\n from app.controllers.alumni_controller import AlumniController\n post_data.update({'odoo_contact_id': odoo_contact_id})\n response = AlumniController.create_alumni_user(post_data)\n\n \n if response[1] == 201:\n # delete record in alumni invite status\n from app.controllers.alumni_invite_status_controller import AlumniInviteStatusController\n AlumniInviteStatusController.delete_invite_status_record(odoo_contact_id)\n\n # send email for confirmation\n receiver_email = response[0]['email']\n alumni_uuid = response[0]['alumni_uuid']\n send_confirmation_email(receiver_email, alumni_uuid)\n\n return response", "def add_admin(request):\n if request.POST:\n post = request.POST\n username = post.get(\"username\")\n first_name = post.get(\"first_name\")\n last_name = post.get(\"last_name\")\n email = post.get(\"email\")\n password = post.get(\"password\")\n chosen_hospitals = post.getlist(\"chosen_hospitals\")\n\n new_user = User.objects.create_user(\n username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n email=email\n )\n new_user_profile = UserProfile.objects.create(\n user=new_user,\n status=UserStatus.objects.get(pk=4)\n )\n\n if new_user and new_user_profile:\n for chosen_hospital in chosen_hospitals:\n HospitalStaff.objects.create(user_profile=new_user_profile, hospital=Hospital.objects.get(pk=chosen_hospital))\n\n return redirect('add_admin')\n\n hospitals = Hospital.objects.all()\n\n return render(request, 'add_admin.html', {'hospitals': hospitals})", "def register():\n\n if current_user.is_authenticated:\n return redirect(url_for('general.show_dash'))\n\n form = RegistrationForm()\n\n if form.validate_on_submit():\n\n #Continua con la creacion de un usuario\n hashed_password = user_manager.hash_password(form.password.data)\n new_user = User(\n username=form.username.data,\n email=form.email.data,\n password=hashed_password,\n confirmed_at=datetime.datetime.utcnow(),\n is_enabled=True,\n )\n\n\n role='User'\n role_default = Role.query.filter_by(name=role).first()\n\n if not role_default:\n new_role_default = Role(name = 'User')\n new_user.roles.add(new_role_default)\n else:\n new_user.roles.add(role_default)\n\n try:\n correct = True\n db.session.add(new_user)\n db.session.commit()\n\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error creating user, make sure username and email are unique','error')\n\n else:\n flash('Congratulations, you are now a registered user!','success')\n return redirect(url_for('user.login'))\n return render_template('extensions/flask_user/register.html', title='Register', form=form)", "def create_user(user,con,cur):\n\n values=(user.id,user.name,user.last_name,user.email,user.tel,user.user_name,user.password,user.user_type)\n\n cur.execute(\n \"\"\" insert into users values (%s,%s,%s,%s,%s,%s,%s,%s)\"\"\",values)\n print(\"\"\" insert into users values (%s,%s,%s,%s,%s,%s,%s,%s)\"\"\",values)\n con.commit()", "def _create_nsem_user():\n users = User.objects.filter(username=settings.CWWED_NSEM_USER)\n if users.exists():\n user = users[0]\n else:\n user = User.objects.create_user(settings.CWWED_NSEM_USER, password=settings.CWWED_NSEM_PASSWORD)\n group, _ = Group.objects.get_or_create(name=settings.CWWED_NSEM_GROUP)\n perm_names = [\n 'add_{}'.format(NsemPsa._meta.model_name),\n 'add_{}'.format(NamedStormCoveredDataSnapshot._meta.model_name),\n ]\n perms = Permission.objects.filter(codename__in=perm_names)\n # set permission\n user.user_permissions.set(list(perms))\n group.permissions.set(list(perms))\n # add user to group\n group.user_set.add(user)", "def on_user_create(self, user):", "def buscar_usuario(self, dados={}, tipo_busca=None):\n\n if tipo_busca == 1:\n self.user = self.uPersistencia.buscarUsuario(\n id_usuario=dados['id_usuario'],\n email=dados['email'])\n elif tipo_busca == 2:\n self.user = self.uPersistencia.buscarUsuarioCrendenciais(\n email=dados['email'],\n senha=dados['senha'])\n else:\n self.user = self.uPersistencia.criaUsuario()\n\n if not self.user.getId():\n return {'status': 0,\n 'msg': 'usuario nao encontrado',\n 'dados': self.user.getToString()}\n else:\n return {'status': 1,\n 'msg': 'usuario encontrado',\n 'dados': self.user.getToString()}", "def register_user(self):\n if self.password!=self.confirm_pwd:\n return \"The passwords do not match\"\n for user in users_list:\n if user['email']==self.email:\n return \"The email already exists. Choose another email\"\n hashed_password=generate_password_hash(self.password)\n users_dict={\n \"id\":self.user_id,'firstname':self.first_name,\n 'lastname':self.last_name,'isAdmin':self.isAdmin,\n 'email':self.email,\"phonenumber\":self.phonenumber,\n \"username\":self.username,'password':hashed_password\n }\n users_list.append(users_dict)\n return {\n \"id\":self.user_id,'firstname':self.first_name,\n 'lastname':self.last_name,'isAdmin':self.isAdmin,\n 'email':self.email,\"username\":self.username,\n \"phonenumber\":self.phonenumber\n }", "def save(self, user, **kwargs):\n corporate_membership = super(CorpMembForm, self).save(commit=False)\n corporate_membership.corp_app = self.corp_app\n creator_owner = user\n\n if not self.instance.pk:\n mode = 'add'\n else:\n mode = 'edit'\n\n if mode == 'add':\n anonymous_creator = kwargs.get('creator', None)\n if anonymous_creator:\n corporate_membership.anonymous_creator = anonymous_creator\n if not isinstance(creator_owner, User):\n # if anonymous is creating the corporate membership\n # temporarily use the first admin, the creator will be assigned \n # back to the real user on approval\n tmp_user = User.objects.filter(is_staff=1, is_active=1)[0]\n creator_owner = tmp_user\n\n corporate_membership.creator = creator_owner\n corporate_membership.creator_username = creator_owner.username\n\n if not user.profile.is_superuser:\n corporate_membership.status = 1\n corporate_membership.status_detail = 'pending'\n corporate_membership.join_dt = datetime.now()\n\n corporate_membership.owner = creator_owner\n corporate_membership.owner_username = creator_owner.username\n\n # calculate the expiration dt\n corporate_membership.save()\n for field_obj in self.field_objs:\n if (not field_obj.field_name) and field_obj.field_type not in [\n 'section_break', 'page_break']:\n field_key = \"field_%s\" % field_obj.id\n value = self.cleaned_data[field_key]\n if value and self.fields[field_key].widget.needs_multipart_form:\n if not type(value) is unicode:\n value = fs.save(join(\"forms\",\n str(uuid4()),\n value.name),\n value)\n # if the value is a list convert is to a comma delimited string\n if isinstance(value, list):\n value = ','.join(value)\n if not value:\n value = ''\n\n if hasattr(field_obj, 'entry') and field_obj.entry:\n field_obj.entry.value = value\n field_obj.entry.save()\n else:\n corporate_membership.fields.create(field_id=field_obj.id,\n value=value)\n\n # update authorized domain if needed\n if self.corp_app.authentication_method == 'email':\n update_auth_domains(corporate_membership,\n self.cleaned_data['authorized_domains'])\n\n return corporate_membership", "def user_register():\n \n data = user_obj.user_register(request.forms) \n return data", "def DeleteComite(request,pk):#esta enlazado con la clase FaseForm del archivo getion/forms\n\n proyecto = User_Proyecto.objects.filter(proyecto_id=pk)\n gerente = User.objects.get(id=proyecto[0].user_id)\n print(gerente.username)\n\n proyecto_validar=Proyecto.objects.get(id_proyecto=pk)\n\n if validar_permiso(request.user, \"is_gerente\",proyecto_validar)==False: # primero se valida si es gerente en el proyecto actual)\n messages.error(request, 'No eres gerente de proyecto, por lo tanto no puedes eliminar el comite de cambio')\n return redirect('gestion:comite', pk)\n\n comite = Comite.objects.all()\n form = Usuario.objects.all()\n proyectos=Proyecto.objects.get(id_proyecto=pk)\n\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n some_var=request.POST.getlist('checkbox')\n\n if ((len(some_var)+1)%2==0 or (len(some_var)+1)==1):# SE VALIDA QUE DEBE DE SER IMPAR Y MAYOR A 1\n messages.error(request,'EL NUMERO DE USUARIOS EN EL COMITE DEBE DE SER IMPAR Y MAYOR A UNO')\n return redirect('gestion:DeleteComite',pk)\n\n for id in some_var:\n id_user =id\n usuario = User.objects.get(id=id_user)\n registrarAuditoriaProyecto(request.user, \"Desvinculo del comite de cambio al usuario: \" + str(usuario.username),\n proyectos.id_proyecto, proyectos.nombre, \"\")\n\n desvinculacionComite(request,pk,id_user)\n\n\n return redirect('gestion:comite',pk)\n else:\n list=[]\n if(comite != None):\n for i in range(form.count()):\n ok = False\n if form[i].esta_aprobado == True:\n for x in comite:\n if x.id_user == form[i].user.id and x.id_proyecto == pk:\n ok=True\n if ok:\n list.append(form[i].user.id)\n print(list)\n return render(request, 'proyectos/delete_comite.html', {'form': form,'list':list,'pk':pk,'proyectos':proyectos,'idGerente':gerente.id})", "def management():\n if request.method == 'POST' and request.form['submit'] == 'Add User':\n Storage.save_user(request.form['username'], request.form['password'])\n if request.method == 'POST' and request.form['submit'] == 'Add Client':\n Storage.generate_client()\n return render_template('management.html', users=Storage.all_users(),\n clients=Storage.all_clients())", "def save_click(self):\n acc_name = self.name_entry.get()\n email = self.email_entry.get()\n username = self.user_entry.get()\n password = self.pass_entry.get()\n\n if not acc_name:\n self.error_label.config(text='Introdu numele contului.')\n return\n\n if self.is_new_account and accountdb.account_exists(self.us, acc_name):\n self.error_label.config(text='Un cont cu acest nume există deja.')\n return\n\n # Tell the user what's happening.\n self.error_label.config(text='Se salvează...')\n self.error_label.update()\n\n acc = account.create_account(acc_name, email, username, password, self.us)\n accountdb.change_account(self.us, acc)\n\n self.error_label.config(text='Detaliile contului au fost salvate.')\n\n self.acc = acc\n self.is_new_account = False\n self.load_account_data()", "def new_user(cls, user):\r\n pass", "def cria_receita(request):\n if request.user.is_authenticated:\n if request.method == 'POST':\n nome_receita = request.POST['nome_receita']\n ingredientes = request.POST['ingredientes']\n modo_preparo = request.POST['modo_preparo']\n tempo_preparo = request.POST['tempo_preparo']\n rendimento = request.POST['rendimento']\n categoria = request.POST['categoria']\n foto_receita = request.FILES['foto_receita']#por se tratar de um dado do tipo file\n user = get_object_or_404(User, pk=request.user.id)\n\n if campo_vazio(nome_receita):\n messages.error(request, 'Nome da receita é obrigatório')\n return redirect('cria_receita')\n\n if campo_vazio(ingredientes):\n messages.error(request, 'ingredientes são obrigatórios')\n return redirect('cria_receita')\n\n if campo_vazio(modo_preparo):\n messages.error(request, 'Modo de preparo é obrigatório')\n return redirect('cria_receita')\n\n if campo_vazio(tempo_preparo):\n messages.error(request, 'tempo de preparo é obrigatório')\n return redirect('cria_receita')\n\n if campo_vazio(rendimento):\n messages.error(request, 'rendimento é obrigatórios')\n return redirect('cria_receita')\n\n if campo_vazio(categoria):\n messages.error(request, 'categoria é obrigatório')\n return redirect('cria_receita')\n\n receita = Receita.objects.create(\n pessoa=user,\n nome_receita=nome_receita,\n ingredientes=ingredientes,\n modo_preparo=modo_preparo,\n tempo_preparo=tempo_preparo,\n rendimento=rendimento,\n categoria=categoria,\n publicado=False,\n foto_receita=foto_receita\n )\n receita.save()\n return redirect('dashboard')\n else:\n return render(request, 'cria_receita.html')\n else:\n return redirect('index')", "def validate_usuario(self, data):\n\t\tuser = Usuario.objects.filter(usuario=data)\n\t\t# Si estoy creando (no hay instancia) comprobar si hay usuarios con ese\n\t\t# username\n\t\tif not self.instance and len(user) != 0:\n\t\t\traise ValidationError(u\"Ya existe un usuario con ese usuario\")\n\t\t# Si estoy actualizando (hay instancia) y estamos cambiando el username\n\t\t# y existen usuarios con el nuevo username\n\t\telif self.instance.usuario != data and len(user) != 0:\n\t\t\traise ValidationError(u\"Ya existe un usuario con ese usuario\")\n\t\telse:\n\t\t\treturn data", "def post(self):\n user_id = request.args.get('user_id')\n lastname = request.args.get('lastname')\n return post_new_sukunimi(user_id, lastname)", "def signup():\n signup_form = SignupForm(request.form) # Rempli les champs créer dans le SignupForm avec les valeurs du forumlaire corerspondantes au nom donné au champs\n # Les champs créer dans le SignupForm peuvent être parcouru grâce à la methode __setitem__ et __getitem__.\n if request.method == 'POST':\n if signup_form.validate(): # Utilise les validators renseignés dans SignupForm pour vérifier les valeurs des champs\n email = signup_form.email.data\n last_name = signup_form.last_name.data\n first_name = signup_form.first_name.data\n phone = signup_form.phone.data\n password = signup_form.password.data\n\n if not UserController().exists(email):\n hashed_password = hashlib.sha256(password.encode('utf8')).hexdigest()\n user = UserController().insert(email, hashed_password, last_name, first_name, phone)\n login_user(user)\n return redirect(url_for('main_bp.home'))\n flash('Un utlisateur utilise déjà cette adresse mail')\n return redirect(url_for('auth_bp.signup'))\n\n return render_template('signup.html',\n current_user=current_user,\n form=signup_form)" ]
[ "0.67103773", "0.65624523", "0.64488596", "0.62953645", "0.62946165", "0.62440854", "0.62051827", "0.6188632", "0.60768074", "0.60262233", "0.6007331", "0.5995433", "0.5959869", "0.592796", "0.5894467", "0.58874565", "0.58790886", "0.58777773", "0.5834341", "0.5799069", "0.57986885", "0.57972366", "0.57843083", "0.578276", "0.57685775", "0.5755628", "0.5750619", "0.57418203", "0.5738208", "0.5712419", "0.56946105", "0.5669071", "0.564868", "0.56423", "0.56364346", "0.5621848", "0.561678", "0.5608252", "0.5607104", "0.5604082", "0.5601989", "0.5587424", "0.55785185", "0.55738", "0.55662894", "0.5562642", "0.556211", "0.55618143", "0.5555388", "0.5547069", "0.5544731", "0.55328923", "0.5527329", "0.55188805", "0.55184317", "0.55165607", "0.55142015", "0.55127895", "0.55103517", "0.55062616", "0.5503205", "0.5502078", "0.5498572", "0.5494467", "0.5490331", "0.5487525", "0.548037", "0.54776317", "0.54736614", "0.5473199", "0.5472083", "0.54717815", "0.5461534", "0.5459195", "0.5458574", "0.54567", "0.5447524", "0.5423082", "0.54119366", "0.54094875", "0.54007477", "0.54007477", "0.53977674", "0.5394571", "0.53919125", "0.5390841", "0.53903866", "0.53902763", "0.5384027", "0.53781575", "0.5376038", "0.5376005", "0.53748834", "0.5370953", "0.5365063", "0.53624994", "0.534112", "0.5328256", "0.53264034", "0.5326161" ]
0.5909672
14
Test addition for Complex with Complex, complex, int and float
def test_add(): z = Complex(1, -2) w = Complex(1, 1) assert (z + w) == Complex(2, -1) assert (z + (1+1j)) == Complex(2, -1) assert (z + 2) == Complex(3, -2) assert (z + 2.0) == Complex(3, -2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def _cmplx_add_ ( s , o ) :\n return o + complex ( s )", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)", "def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary", "def __add__(self, other):\n self.sum_complex_num = Complex((self.real + other.real), (self.imaginary + other.imaginary))\n return self.sum_complex_num", "def test_op_add_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_r = offl_a + s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_iadd_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_a += s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def _complex(real, imag):\n real = np.asarray(real)\n imag = np.asarray(imag)\n cplx = 1j * imag \n return cplx + real", "def complex(real, imag):", "def test_op_add_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.2, -1.5)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_iadd_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_complex():\n assert complex(Quantity(1, unit('m'))) == complex(1)", "def __radd__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r + self.r, other.i + self.i)", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_add_returns_correct_result(self):\n result = self.calc.add(2, 2)\n self.assertEqual(4, result)", "def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )", "def test_C_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1 + 1j]]), 1, 10)", "def is_complex(num):\n try:\n complex(num)\n except Exception:\n return False\n return True", "def test_real(self):\n\n real = common_math.real\n\n self.assertTrue(real(3.75) + real(4.75) == real(8.5))\n self.assertTrue(real(2.5) * real(-1.5) == -real(3.75))\n\n pi_1 = to_real(real, Fraction(311, 99))\n pi_2 = to_real(real, Fraction(333, 106))\n pi_3 = to_real(real, Fraction(355, 113))\n\n self.assertTrue(pi_1 < pi_2)\n self.assertTrue(pi_2 < pi_3)", "def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)", "def add(self, number: float) -> float:\n if self.check_type_not_complex(number=number):\n self.__memory += number\n return self.__memory\n return self.__memory", "def test_add_float(self):\n self.assertAlmostEqual(cr.add(2.21, 4.7), 2.21 + 4.7, places=2)", "def test_iadd_with_float_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1.0\n\n a += b\n\n expected_result = Vec3(3, 4, 5)\n\n self.assertEqual(a, expected_result)", "def check_type_not_complex(cls, number: Number) -> None:\n if isinstance(number, complex):\n print(\"Calculator supports arithmetic only with integers\",\n \"and floats but not with complex numbers\")\n return False\n return True", "def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)", "def __mul__(self,other):\n\t\treal = (self.realPart * other.realPart) - (self.imaginaryPart * other.imaginaryPart)\n\t\timaginary = (self.realPart*other.imaginaryPart) + (self.imaginaryPart * other.realPart)\n\n\t\t# create and return complexNumber\n\t\treturn real,imaginary", "def add(self, x):\n if type(x) is int:\n self.real += x\n else:\n self.real = self.real + x.real\n self.imag = self.imag + x.imag", "def test_op_add_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.0, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)", "def complex(real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero", "def test_add_with_float_arg(self):\n\n a = Vec3(2, 3, 4)\n b = 5.0\n\n result = a + b\n\n expected_result = Vec3(7, 8, 9)\n\n self.assertEqual(result, expected_result)", "def test_add():\n circle_a = Circle(4) \n circle_b = Circle(4) \n expected = circle_a + circle_b\n assert expected.radius == Circle(8).radius", "def complex_value(self) -> global___Expression.ComplexValue:", "def test_op_iadd_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_add_integer(self):\n assert cr.add(3, 2) == 3 + 2", "def test_mul():\n z = Complex(1, -2)\n v = Complex(2, 2)\n assert z*v == Complex(6, -2)\n assert v*z == z*v\n assert z*2 == Complex(2, -4)\n assert z*2.0 == Complex(2, -4)\n assert z*(2+2j) == v*z", "def test_add_floats(self):\n print(\"---running test_add_floats\")\n result = some_math.add(10.5, 2)\n assert result == 12.5", "def complex_difference(c_1,c_2):\n return c_1 - c_2", "def __radd__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(value + self._real, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(value._real + self._real, value._imag + self._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n value.__class__.__name__, self.__class__.__name__\r\n )\r\n )", "def test_add_with_int_arg(self):\n\n a = Vec3(2, 3, 4)\n b = 5\n\n result = a + b\n\n expected_result = Vec3(7, 8, 9)\n\n self.assertEqual(result, expected_result)", "def test_add_numbers(self):\n a, b = 5, 6\n expected = a + b\n # check for equality, real vs expected\n self.assertEqual(add(a, b), expected)", "def __eq__(self, other):\n return (self.real+(self.imag*1j)) == (other.real+(other.imag*1j))\n #return (Complex(self.real, self.imag) == Complex(other.real, other.imag))", "def test_op_one_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n offl_a = stream.bind(a)\n offl_a.one()\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == complex(1.0, 0.0)).all(),\n \"Array should be all one.\" + str(a))", "def complex_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Complex):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected complex number, {} got instead.'.format(func, name))", "def test_op_pow_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(0.7, 0.6)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = pow(a, s)\n\n offl_a = stream.bind(a)\n offl_r = pow(offl_a, s)\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertEqualEpsilon(r, expect,\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def __mul__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum * other, self._imNum * other)\n\n if isinstance(other, complex):\n a = self._reNum * other.real\n b = self._reNum * other.imag\n c = self._imNum * other.real\n d = self._imNum * other.imag\n return Complex(a - d, c + b)\n\n a = self._reNum * other._reNum\n b = self._reNum * other._imNum\n c = self._imNum * other._reNum\n d = self._imNum * other._imNum\n return Complex(a - d, c + b)", "def test_add_int(self):\n self.assertEqual(operations.add(3,4), 7)", "def __add__(self, other):\n if isinstance(other, complex):\n return Ohm(self.ohm + other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n ohm_sum = self.ohm + other.ohm\n return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit)", "def test_add3(self):\n self.assertEqual(-5, add(-10 , 5), \"should be -5\")", "def test_add_consistency1(self) -> None:\n a = Constant(\n 'const1',\n Float32(),\n np.zeros([1, 3, 3])\n )\n b = Constant(\n 'const2',\n Float32(),\n np.zeros([3])\n )\n input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}\n Add(\n 'add1',\n [1, 3, 3],\n Float32(),\n input_ops\n )\n\n print(\"Consistency test for 'Add' #1 passed!\")", "def _cmplx_mul_ ( s , o ) :\n return o * complex ( s )", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def test_op_mul_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.3, 1.4)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a * s\n\n offl_a = stream.bind(a)\n offl_r = offl_a * s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_sphere_iadd(): \n sphere_1 = Sphere(2)\n sphere_1 += sphere_1 \n assert sphere_1 == Sphere(4)", "def test_evaluate_add_expression(self):\n value = self.evaluate_common(\"2M add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n value = self.evaluate_common(\"2D add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2F add 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2 add 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n try:\n value = self.evaluate_common(\"2 add '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"2 add null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")", "def complex(self, real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero", "def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )", "def is_complex() -> bool:\n raise NotImplementedError()", "def __init__(self, children):\n super().__init__('complex', children, None)\n self.real = children[0].real\n self.imag = children[1].imag\n self.complex = self.real + self.imag*1j", "def test_sphere_add():\n sphere_1 = Sphere(2) \n sphere_2 = Sphere(2) \n assert (sphere_1 + sphere_2) == Sphere(4)", "def test_add(self):\r\n operation = Operation(3, 4)\r\n result = operation.add()\r\n self.assertEqual(result, 7)", "def __mul__(self, other):\n self.mul_complex_num = Complex((self.real * other.real - self.imaginary * other.imaginary),\n (self.real * other.imaginary + self.imaginary * other.real))\n return self.mul_complex_num", "def test_iadd_with_int_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1\n\n a += b\n\n expected_result = Vec3(3, 4, 5)\n\n self.assertEqual(a, expected_result)", "def test_addition(self):\n\n a1 = points.Point(3, -2, 5)\n a2 = vectors.Vector(-2, 3, 1)\n\n a3 = a1 + a2\n\n self.assertEqual(a3,\n tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 1, 1, 6, 1))\n self.assertEqual(a3, points.Point(1, 1, 6))", "def test_op_isub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_a -= s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_add():\n # Test for addition with scalar Rnode object and float value\n x = Rnode(0.11)\n z = x**2 + x\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value **2 + x.value\n assert x.grad() == sum(weight * var.grad()\n for weight, var in x.children)\n except AssertionError as e:\n print(e)", "def test_add(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a + b\n assert c.x == 4\n assert c.y == 6", "def test_linear_comb() -> None:\n SumStr = FormalLinearCombination(\n str, complex, complex(0, 0), lambda z1, z2: z1+z2)\n flc_1a2b3c = SumStr(\n [(complex(1, 0), \"a\"), (complex(2, 0), \"b\"), (complex(3, 0), \"c\")])\n assert str(flc_1a2b3c) == \"(1+0j)*a+(2+0j)*b+(3+0j)*c\"\n two_flc_1a2b3c = flc_1a2b3c+flc_1a2b3c\n exp_two_flc_1a2b3c = SumStr(\n [(complex(2, 0), \"a\"), (complex(4, 0), \"b\"), (complex(6, 0), \"c\")])\n assert str(two_flc_1a2b3c) == \"(2+0j)*a+(4+0j)*b+(6+0j)*c\"\n assert exp_two_flc_1a2b3c == two_flc_1a2b3c\n assert str(flc_1a2b3c*flc_1a2b3c) == \"(1+0j)*aa+(2+0j)*ab+(3+0j)*ac\"+\\\n \"+(2+0j)*ba+(4+0j)*bb+(6+0j)*bc\"+\\\n \"+(3+0j)*ca+(6+0j)*cb+(9+0j)*cc\"", "def test_add(self):\n self.assertEqual(add(1, 1), 2, \"Wrong answer\")\n self.assertEqual(add(10, 1), 11, \"Wrong answer\")\n self.assertEqual(add(15, 15), 30, \"Wrong answer\")", "def test_add4(self):\n self.assertEqual(-15, add(-10 , -5), \"should be -15\")", "def test_reflected_numerics():\n circle = Circle(2)\n assert circle * 3 == 3 * circle", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def addition(a, b):\n pass", "def get_complex_type(self):\n import numpy\n return numpy.complex128", "def test_add_with_int_arg(self):\n\n from pedemath.vec3 import add_v3\n\n a = Vec3(2, 3, 4)\n b = 5\n\n result = add_v3(a, b)\n\n expected_result = Vec3(7, 8, 9)\n\n self.assertEqual(result, expected_result)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def isScalar(obj):\n # type: (Any) -> bool\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)", "def __add__(self, other):\n if type(other) == int or type(other) == float:\n x = (other * Ccy.currencies[self.unit])\n else:\n x = (other.value / Ccy.currencies[other.unit] * Ccy.currencies[self.unit])\n return Ccy(x + self.value, self.unit)", "def test_add2(self):\n self.assertEqual(5, add(10 , -5), \"should be 5\")", "def test_add_integers(self):\n print(\"---running test_add_integers\")\n result = some_math.add(1, 2)\n assert result == 3", "def test_maths(self):\n\n # Test that basic integers work\n self.assertEqual(int(1) + int(1), int(2), \"Basic addition failed\")\n self.assertNotEqual(int(1) + int(1), int(3), \"Basic addition failed\")\n\n # Test doubles\n # FIXME: Deployment fails for some reason. Maybe bug in CPU? Commenting it out.\n # self.assertEqual(float(0.1) + float(0.2), float(0.3), \"Floating addition failed\")\n self.assertNotEqual(float(1) + float(1), float(3), \"Floating Addition failed\")", "def test_add_with_float_arg(self):\n\n from pedemath.vec3 import add_v3\n\n a = Vec3(2, 3, 4)\n b = 5.0\n\n result = add_v3(a, b)\n\n expected_result = Vec3(7, 8, 9)\n\n self.assertEqual(result, expected_result)", "def test_op_imul_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a * s\n\n offl_a = stream.bind(a)\n offl_a *= s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)", "def test_adding(self):\n adder = Adder()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i + j, adder.calc(j, i))", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def test_op_sub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.3, 1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_r = offl_a - s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_add_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n s = 1.3\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_r = offl_a + s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def testItConstantAdd(self):\n\n\t\toutput = calc.it(self.Z,self.c,self.max_iteration)\n\t\tnotError = (self.I == output).all()\n\t\tself.assertEqual(True, notError)", "def complex_magnitude(c):\n return (c * c.conjugate()) ** 0.5", "def test_div_complex(doctest):", "def test_calculate_order_multiplication_subtraction_adding(self):\n result = self.calcuate.calcuate('11-2+4x3')\n expected_result = \"21\"\n self.assertEqual(expected_result, result)", "def testadd_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tadd_fracX_fracY = fracX + fracY\r\n\t\t\tself.assertEqual ( add_fracX_fracY.toString ().split ()[0], dictAdd ['X+Y'] )", "def test_add1(self):\n self.assertEqual(15, add(10 , 5), \"should be 15\")", "def test_op_zero_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0.0 + 0.0j,\n \"Array should be all zeros.\")", "def test_op_iadd_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n s = 1.3\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_a += s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def complexinfo(a, str=None):\n\n if str:\n print \n print \"\\t\", str\n re = a.real.copy()\n im = a.imag.copy()\n _log.debug(\"\\t%.2e %.2g = re.sum im.sum\" % (re.sum(), im.sum()))\n _log.debug(\"\\t%.2e %.2g = abs(re).sum abs(im).sum\" % (abs(re).sum(), abs(im).sum()))" ]
[ "0.76475245", "0.7613455", "0.73216003", "0.7232131", "0.7204029", "0.7114866", "0.6915121", "0.69098693", "0.6900085", "0.6607692", "0.6509762", "0.6498399", "0.6357027", "0.6335295", "0.63045627", "0.6205945", "0.61870325", "0.6182034", "0.6174292", "0.6170315", "0.61585486", "0.6132917", "0.6125642", "0.6124113", "0.61066985", "0.60958374", "0.6086763", "0.6080302", "0.60616887", "0.6054502", "0.6014153", "0.60039145", "0.60027033", "0.5997926", "0.5978909", "0.5968331", "0.59575224", "0.5952436", "0.59406996", "0.5925226", "0.5908511", "0.5904699", "0.5893072", "0.5890339", "0.5877143", "0.587009", "0.5865818", "0.58585995", "0.5854229", "0.58441126", "0.58438754", "0.5843117", "0.584058", "0.5839917", "0.5838911", "0.5832374", "0.5817731", "0.58026785", "0.57970953", "0.57756346", "0.5768599", "0.5766697", "0.5750423", "0.5746739", "0.5745134", "0.57345384", "0.5713658", "0.57051027", "0.570434", "0.5704332", "0.5704172", "0.5702286", "0.5701226", "0.5700611", "0.56999373", "0.56937337", "0.5690667", "0.5690667", "0.5690667", "0.5689968", "0.5689734", "0.5689216", "0.56803143", "0.5672259", "0.5671596", "0.56475013", "0.5646688", "0.56319326", "0.56161493", "0.56137496", "0.5609906", "0.56025416", "0.56021523", "0.55998474", "0.55979425", "0.5554798", "0.55500996", "0.55466443", "0.5544378", "0.55386686" ]
0.81614006
0
Test subtraction for Complex with Complex, complex, int and float
def test_sub(): z = Complex(1, -2) w = Complex(1, 1) assert (z - w) == Complex(0, -3) assert (z - (1+1j)) == Complex(0, -3) assert (z - 2) == Complex(-1, -2) assert (z - 2.0) == Complex(-1, -2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_difference(c_1,c_2):\n return c_1 - c_2", "def _cmplx_sub_ ( s , o ) :\n return (-o ) + complex ( s )", "def complex(real, imag):", "def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and return complexNumber\n\t\treturn real,imaginary", "def test_op_sub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.3, 1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_r = offl_a - s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def _cmplx_rsub_ ( s , o ) :\n return o - complex ( s )", "def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)", "def test_complex():\n assert complex(Quantity(1, unit('m'))) == complex(1)", "def test_add():\n z = Complex(1, -2)\n w = Complex(1, 1)\n assert (z + w) == Complex(2, -1)\n assert (z + (1+1j)) == Complex(2, -1)\n assert (z + 2) == Complex(3, -2)\n assert (z + 2.0) == Complex(3, -2)", "def test_op_isub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_a -= s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_subtraction():\n assert calculator.subtract(7, 3) == 4\n assert calculator.subtract(7.0, 3.0) == 4.0\n assert calculator.subtract(7, -3) == 10\n assert calculator.subtract(7.0, -3.0) == 10.0", "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def test_C_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1 + 1j]]), 1, 10)", "def complex_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Complex):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected complex number, {} got instead.'.format(func, name))", "def test_op_sub_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.3, -1.4)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_r = offl_a - o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def check_type_not_complex(cls, number: Number) -> None:\n if isinstance(number, complex):\n print(\"Calculator supports arithmetic only with integers\",\n \"and floats but not with complex numbers\")\n return False\n return True", "def test_sub_with_float_arg(self):\n\n a = Vec3(7, 8, 9)\n b = 5.0\n\n result = a - b\n\n expected_result = Vec3(2, 3, 4)\n\n self.assertEqual(result, expected_result)", "def complex(real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero", "def is_complex(num):\n try:\n complex(num)\n except Exception:\n return False\n return True", "def test_mul():\n z = Complex(1, -2)\n v = Complex(2, 2)\n assert z*v == Complex(6, -2)\n assert v*z == z*v\n assert z*2 == Complex(2, -4)\n assert z*2.0 == Complex(2, -4)\n assert z*(2+2j) == v*z", "def complex_magnitude(c):\n return (c * c.conjugate()) ** 0.5", "def complex(self, real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero", "def test_Z_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1]]), 0 + 0j, 10)", "def complex_derivative ( fun , z , h = 0 , I = 3 , err = False , real = True , imag = True ) :\n \n Z = complex ( z )\n \n X = Z.real\n Y = Z.imag\n\n ## few altenatives to calculate the real and imaginary part\n \n if real :\n UX = lambda x : complex ( fun ( complex ( x , Y ) ) ).real\n ## Real part \n re = derivative ( UX , X , h = h , I = I , err = err )\n else :\n VY = lambda y : complex ( fun ( complex ( X , y ) ) ).imag \n ## Real part \n re = derivative ( VY , Y , h = h , I = I , err = err )\n\n if imag : \n VX = lambda x : complex ( fun ( complex ( x , Y ) ) ).imag \n ## Imaginary part \n im = derivative ( VX , X , h = h , I = I , err = err )\n else :\n UY = lambda y : complex ( fun ( complex ( X , y ) ) ).real\n ## Imaginary part \n im = -derivative ( UY , Y , h = h , I = I , err = err )\n \n if not err : return complex ( re , im )\n \n result = complex ( re.value() , im.value() )\n error = ( re.cov2() + im.cov2() ) ** 0.5 \n \n return result , error", "def test_op_isub_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def _cmplx_to_complex_ ( s ) :\n return complex ( s.real , s.imag )", "def test_isub_with_float_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1.0\n\n a -= b\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(a, expected_result)", "def _normalizeComplex(data):\n if hasattr(data, \"dtype\"):\n isComplex = numpy.issubdtype(data.dtype, numpy.complexfloating)\n else:\n isComplex = isinstance(data, numbers.Complex)\n if isComplex:\n data = numpy.absolute(data)\n return data", "def _cmplx_add_ ( s , o ) :\n return o + complex ( s )", "def __neg__(self):\n return Complex(-self._reNum, -self._imNum)", "def test_neg_sub():\n c=[1,2]\n def myfunc(x,y):\n f1=1-x-y-2\n return -f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff': [1,1], 'value': 4}\n\n assert res==expectAns", "def _complex(real, imag):\n real = np.asarray(real)\n imag = np.asarray(imag)\n cplx = 1j * imag \n return cplx + real", "def test_subtract(self):\n self.assertEqual(work_file.subtract(10, 5), 5)\n self.assertEqual(work_file.subtract(-1, 1), -2)\n self.assertEqual(work_file.subtract(-1, -1), 0)", "def subtract(self, number: float) -> float:\n if self.check_type_not_complex(number=number):\n self.__memory -= number\n return self.__memory\n return self.__memory", "def __rsub__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r - self.r, other.i - self.i)", "def complex_value(self) -> global___Expression.ComplexValue:", "def test_real(self):\n\n real = common_math.real\n\n self.assertTrue(real(3.75) + real(4.75) == real(8.5))\n self.assertTrue(real(2.5) * real(-1.5) == -real(3.75))\n\n pi_1 = to_real(real, Fraction(311, 99))\n pi_2 = to_real(real, Fraction(333, 106))\n pi_3 = to_real(real, Fraction(355, 113))\n\n self.assertTrue(pi_1 < pi_2)\n self.assertTrue(pi_2 < pi_3)", "def test_sub_with_int_arg(self):\n\n a = Vec3(7, 8, 9)\n b = 5\n\n result = a - b\n\n expected_result = Vec3(2, 3, 4)\n\n self.assertEqual(result, expected_result)", "def test_csc():\n c=14\n assert {'diff':EF.csc(c).der, 'value': EF.csc(c).val}=={'diff':0, 'value': 1/math.sin(c)}", "def __mul__(self,other):\n\t\treal = (self.realPart * other.realPart) - (self.imaginaryPart * other.imaginaryPart)\n\t\timaginary = (self.realPart*other.imaginaryPart) + (self.imaginaryPart * other.realPart)\n\n\t\t# create and return complexNumber\n\t\treturn real,imaginary", "def _cmplx_to_complex_ ( s ) :\n return complex ( s.real () , s.imag () )", "def test_magnitude(self):\n\n a1 = vectors.Vector(1, 2, 3)\n self.assertEqual(a1.magnitude(), math.sqrt(14))\n\n a1 = vectors.Vector(-1, -2, -3)\n self.assertEqual(a1.magnitude(), math.sqrt(14))\n\n a1 = vectors.Vector(1, 0, 0)\n self.assertEqual(a1.magnitude(), 1)\n\n a1 = vectors.Vector(0, 1, 0)\n self.assertEqual(a1.magnitude(), 1)\n\n a1 = vectors.Vector(0, 0, 1)\n self.assertEqual(a1.magnitude(), 1)", "def test_cot():\n c=0.5\n\n def myfunc(x):\n f1=EF.cot(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n expectAns={'diff':2/(math.cos(c*2)-1), 'value':math.cos(c)/math.sin(c)}\n assert res==expectAns", "def test_calculator_subtract():\n calc = Calculator()\n calc.subtract(1)\n assert calc.get_result() == -1", "def is_complex() -> bool:\n raise NotImplementedError()", "def test_div_complex(doctest):", "def test_minus(self):\n self.assertEqual(1, foo.minus(3, 2))", "def test_reflected_numerics():\n circle = Circle(2)\n assert circle * 3 == 3 * circle", "def test_calculate_subtraction(self):\n result = self.calcuate.calcuate('10-8')\n expected_result = \"2\"\n self.assertEqual(expected_result, result)", "def test_sub_float():\n with pytest.raises(ValueError) as __:\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n num_a.value -= 1.5", "def test_minus(self):\n self.assertEqual(1, minus(3, 2))", "def test_calculate_one_number(self):\n result = self.calcuate.calcuate('-3')\n expected_result = \"-3\"\n self.assertEqual(expected_result, result)", "def __complex__(self):\n return complex(self._reNum, self._imNum)", "def abs(number):\n if isinstance(number,(int,float,complex)): return builtins.abs(number)\n elif isinstance(number,(numpy.float64,numpy.complex128)): return numpy.abs(number)\n else: raise error(\"field_traits.abs executed on unavailable type\")", "def test_subtracting(self):\n subtracter = Subtracter()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i-j, subtracter.calc(j, i))", "def _cmplx_mul_ ( s , o ) :\n return o * complex ( s )", "def test_op_sub_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.3, -1.7)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a - offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def complex_inverse(c1,cr):", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def real_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Real):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected real number, {} got instead.'.format(func, name))", "def test_cosh():\n c=2\n\n def myfunc(x):\n f1=EF.cosh(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff':3.626860407847019,'value':math.cosh(c)}#sinh(x) differ in last digits\n assert res==expectAns", "def test_isub_with_int_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1\n\n a -= b\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(a, expected_result)", "def test_spam_subtract():\n calc = Calculator()\n calc.subtract_number(5)\n assert calc.get_result() == -5", "def test_subtract_all_args_less_zero(self):\n try:\n self.assertEqual(subtract(-18, -5), -13)\n except Exception as error:\n print(error)", "def test_subtract_all_args_greater_zero(self):\n try:\n self.assertEqual(subtract(30, 16), 15)\n except Exception as error:\n print(f'Got error in {inspect.stack()[0][3]}, {error}')", "def __mul__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum * other, self._imNum * other)\n\n if isinstance(other, complex):\n a = self._reNum * other.real\n b = self._reNum * other.imag\n c = self._imNum * other.real\n d = self._imNum * other.imag\n return Complex(a - d, c + b)\n\n a = self._reNum * other._reNum\n b = self._reNum * other._imNum\n c = self._imNum * other._reNum\n d = self._imNum * other._imNum\n return Complex(a - d, c + b)", "def test_subtract_numbers(self):\n self.assertEqual(sub(9, 3),6)", "def test_subtract_numbers(self):\n self.assertEqual(subtract(8, 4), 4)", "def test_sub_with_float_arg(self):\n\n from pedemath.vec3 import sub_v3\n\n a = Vec3(7, 8, 9)\n b = 5.0\n\n result = sub_v3(a, b)\n\n expected_result = Vec3(2, 3, 4)\n\n self.assertEqual(result, expected_result)", "def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )", "def isScalar(obj):\n # type: (Any) -> bool\n return isinstance(obj, numbers.Number) and not isinstance(obj, complex)", "def test_sub_numbers(self):\n a, b = 5, 10\n expected = b - a\n self.assertEqual(subtract(b, a), expected)", "def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary", "def to_float_complex(self):\r\n return eval(str(self))", "def test_cos():\n c=14\n def myfunc(x):\n f1=EF.cos(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n\n expectAns={'diff': -math.sin(c), 'value': math.cos(c)}\n\n assert res==expectAns", "def test_calculator_subtract():\n assert Calculator.subtract_number(1, 2) == -1", "def test_rsub():\n # Test for reverse subtraction with scalar Rnode object and float value\n x = Rnode(0.5)\n z = 0.1 - x\n try:\n assert z.value == x.value - 0.1\n except AssertionError as e:\n print(e)\n raise AssertionError", "def get_complex_type(self):\n import numpy\n return numpy.complex128", "def test_sub(self):\n newvalues = Fraction(1,2)-Fraction(1,2)\n fraction1 = Fraction(newvalues[0],newvalues[1])\n self.assertEqual(str(fraction1),\"0/4\")", "def test_cos_con():\n c=14\n assert {'diff':EF.cos(c).der, 'value': EF.cos(c).val}=={'diff':0, 'value': math.cos(c)}", "def test_subtraction__vector_vector(self):\n\n a1 = vectors.Vector(3, 2, 1)\n a2 = vectors.Vector(5, 6, 7)\n\n a3 = a1 - a2\n\n self.assertEqual(a3, vectors.Vector(-2, -4, -6))", "def test_minus(self):\n print('test_minus');\n self.assertEqual(90, minus(100, 10))", "def __complex__(self):\n return complex(self.q[0], self.q[1])", "def test_calculate_subtraction_adding_subtraction(self):\n result = self.calcuate.calcuate('20-5+8-3')\n expected_result = \"20\"\n self.assertEqual(expected_result, result)", "def test_answer_subtract():\n calc = Calculator()\n calc.subtract_number(5)\n assert calc.get_result() == -5", "def test_op_isub_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_subtract_numbers(self):\n self.assertEqual(subtract(5, 11), 6)", "def test_subtract_numbers(self):\n self.assertEqual(subtract(5, 11), 6)", "def test_sub():\n # Test for subtraction with Rnode object\n x = Rnode(0.11)\n y = Rnode(0.5)\n z = x - y\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value - y.value\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)\n # Test for subtraction with scalar Rnode object and float value\n x = Rnode(0.5)\n z = x - 0.1\n try:\n assert z.value == x.value - 0.1\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)", "def test_subtract_zero_arg(self):\n try:\n self.assertEqual(subtract(0, -6), 7)\n except Exception as error:\n print(f'Got error in {inspect.stack()[0][3]}, {error}')", "def subtract(self,ctSub):\n\n # First confirm eligible for subtraction\n if (not np.array_equal(self.x1_flat,ctSub.x1_flat)) or (not np.array_equal(self.x2_flat,ctSub.x2_flat)):\n raise Exception(\"Can't subtract because not meshed the same\")\n\n ctResult = copy.deepcopy(ctSub)# copy the class\n\n \n # Original method\n # ctResult.u = self.u - ctSub.u\n # ctResult.uMesh = griddata(np.column_stack([ctResult.y, ctResult.z]),ctResult.u,(ctResult.yMesh.flatten(), ctResult.zMesh.flatten()), method='cubic')\n\n # New method\n ctResult.u_mesh = self.u_mesh - ctSub.u_mesh\n ctResult.v_mesh = self.v_mesh - ctSub.v_mesh\n ctResult.w_mesh = self.w_mesh - ctSub.w_mesh\n ctResult.u_cubed = self.u_cubed - ctSub.u_cubed\n\n\n return ctResult", "def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def test_op_zero_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0.0 + 0.0j,\n \"Array should be all zeros.\")", "def test_calculate_correct_negative_num(self):\n result = self.calcuate.calcuate('2-5')\n expected_result = \"-3\"\n self.assertEqual(expected_result, result)", "def __init__(self, children):\n super().__init__('complex', children, None)\n self.real = children[0].real\n self.imag = children[1].imag\n self.complex = self.real + self.imag*1j", "def test_arccos():\n c=0.5\n def myfunc(x):\n f1=EF.arccos(x)\n return f1\n\n f_obj=ADiff(myfunc)\n res=f_obj.Jac(c)\n expectAns={'diff':-1/math.sqrt(1-c**2), 'value':math.acos(c)}\n assert res==expectAns", "def test_op_add_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_r = offl_a + s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def testFtoC(self):\r\n for integer, numeral in self.ftocvalues:\r\n result = conversions_refactored.convert('Fahrenheit', 'Celsius', integer) \r\n self.assertEqual(numeral, result, msg='Incorrect result, calculation error')", "def test_op_one_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n offl_a = stream.bind(a)\n offl_a.one()\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == complex(1.0, 0.0)).all(),\n \"Array should be all one.\" + str(a))", "def complex_abs(data):\n assert data.size(-1) == 2\n return (data ** 2).sum(dim=-1).sqrt()" ]
[ "0.73002094", "0.69852185", "0.6779295", "0.6768346", "0.6679916", "0.6596779", "0.6549099", "0.64673406", "0.6422183", "0.6415886", "0.63986313", "0.628705", "0.6265425", "0.62318534", "0.61975026", "0.6182748", "0.6176905", "0.6081503", "0.60485506", "0.6047534", "0.6038145", "0.59815145", "0.5964033", "0.5960958", "0.5949088", "0.5947505", "0.5940247", "0.5926241", "0.5911554", "0.5905094", "0.5904569", "0.5883107", "0.58667374", "0.58609", "0.5856014", "0.58543617", "0.5845321", "0.58372116", "0.5837072", "0.58355933", "0.5825977", "0.58245003", "0.58119047", "0.58118284", "0.5790935", "0.5784226", "0.5759913", "0.57598513", "0.57579654", "0.5750174", "0.5747264", "0.57411546", "0.573343", "0.57170236", "0.5713367", "0.5703784", "0.5682293", "0.56803745", "0.5680313", "0.56738245", "0.5651787", "0.56483173", "0.5647917", "0.561894", "0.56160176", "0.5606312", "0.5605994", "0.56059223", "0.55992097", "0.55909985", "0.55774575", "0.55665976", "0.5563985", "0.55552936", "0.5550274", "0.55499434", "0.55415887", "0.55394864", "0.55391204", "0.553395", "0.5533605", "0.5525298", "0.5521396", "0.55187124", "0.5517476", "0.5514789", "0.55144733", "0.55144733", "0.5513026", "0.5510446", "0.5505084", "0.55017394", "0.5493237", "0.54867613", "0.5471054", "0.5464537", "0.54618555", "0.54593915", "0.5456879", "0.5450619" ]
0.75719965
0
(12i)(2+2i) = 2 + 2i 4i + 4 = 6 2i
def test_mul(): z = Complex(1, -2) v = Complex(2, 2) assert z*v == Complex(6, -2) assert v*z == z*v assert z*2 == Complex(2, -4) assert z*2.0 == Complex(2, -4) assert z*(2+2j) == v*z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sw(n):\n return 4*n*n + 2*n + 1", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def f(x):\n return ((x[0] - 1) ** 2) + ((x[1] + 3) ** 2)", "def triangular_number_solution():\n return 5 * partial_sum(199) + 3 * partial_sum(333) - 15 * partial_sum(66)", "def triple_step_simplified(n):\n\ta = 0\n\tb = 0\n\tc = 1\n\tfor i in range(n):\n\t\ttemp = a + b + c\n\t\ta, b, c = b, c, temp\n\treturn temp", "def go(n):\n if 0 == n:\n return (1, 0)\n else:\n x, y = go(n // 2)\n if n % 2 == 0:\n return (x+y, y)\n else:\n return (x, x+y)", "def six():\r\n \r\n sum_squared = 0\r\n squared_sum = 0\r\n \r\n for i in range(1, 101):\r\n sum_squared += i**2\r\n squared_sum += i\r\n squared_sum = squared_sum**2\r\n \r\n return squared_sum - sum_squared", "def nw(n):\n return 4*n*n + 1", "def exercise2():\n # You _DO_NOT_ need to modify this code for Lab 10.\n n = easygui.integerbox( \"Enter n:\", \"Input\", lowerbound=0, upperbound=2 ** 31 )\n\n s = summation( n, 1 )\n f = n * ( n + 1 ) // 2\n easygui.msgbox( \"n = {}, summation( n, 1 ) = {}, formula result = {}\".format( n, s, f ) )\n\n s = summation( n, 2 )\n f = n * ( n + 1 ) * ( 2 * n + 1 ) // 6\n easygui.msgbox( \"n = {}, summation( n, 2 ) = {}, formula result = {}\".format( n, s, f ) )\n\n s = summation( n, 3 )\n f = ( n * ( n + 1 ) // 2 ) ** 2\n easygui.msgbox( \"n = {}, summation( n, 3 ) = {}, formula result = {}\".format( n, s, f ) )", "def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6", "def problem(args:int) -> int:\r\n\ta, b, c = 1, 2, 0\r\n\tresult = [a]\r\n\twhile c <= args:\r\n\t\tc = a + b\r\n\t\ta = b\r\n\t\tb = c\r\n\t\tresult.append(a)\r\n\tresult = np.array(result)\r\n\treturn sum(result[result % 2 == 0])", "def f(x):\n \"\"\" Xrhsimopoihste MONO ekfraseis klhshs,\n p.x., stis add, mul, pow, sqrt, truediv, ...\n OXI infix telestes (+, /, ...) \"\"\"\n\n return round(truediv(1,add(add(2,truediv(3,add(x,4))),truediv(1,x))),4)", "def itofm(i):\n return 2 ** (i / 12.0)", "def prob2():\n x, i, j = sy.symbols('x, i, j')\n expr = sy.product(sy.summation(j*(sy.sin(x) + sy.cos(x)), (j, i, 5)), (i, 1, 5))\n return sy.simplify(expr)", "def fn(n):\n if n == 0: return [\"\"]\n if n == 1: return [\"0\", \"1\", \"8\"]\n return [x+y+xx for x, xx in mp for y in fn(n-2)]", "def TestFunc1(x):\r\n return 12*x[0]*x[0] + 4*x[1]*x[1] - 12*x[0]*x[1] + 2*x[1]", "def sumn_pow2(n):\n return (n * (n + 1) * (2 * n + 1)) / 6", "def compute(n):\n if n == 1:\n return 1\n else:\n i = find_i(n)\n return 2 * compute(n - i) + 2 ** i - 1", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def processPart2(inputs):\n total = 0\n inputs = inputs if type(inputs) is list else [inputs]\n for input in inputs:\n [a, b, c] = sorted(map(int, input.split('x')))\n total = total + (a * 2 + b * 2 + (a * b * c))\n return total", "def square_difference(n):\n\n return n*(n+1)*(3*n+2)*(n-1)/12", "def multi_2(cur,p,n):\n\tr=p\n\twhile n>1:\n\t\tr=sum(cur,r,p)\n\t\tn-=1\n\treturn r", "def sumDigit():", "def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None", "def g(n):\n \"*** YOUR CODE HERE ***\"\n if n < 4:\n return n\n else:\n return g(n-1) + 2*g(n-2) + 3*g(n-3)", "def I (self, n):", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def sum_square_difference(n):\n\tdifference = (n-1)*(n)*(n+1)*(3*n+2)/12\n\treturn difference", "def expFromAdd(a,b):\n\tif (b == 0):\n\t\treturn 1\n\tresult = 1\n\tfor c1 in range(0, b):\n\t\ttemp = 0\n\t\tfor c2 in range(0, a):\n\t\t\ttemp += result\n\t\tresult = temp\n\treturn result", "def RecIntMultiplication(x,y,n):\n\n if n == 1 :\n x = int(x)\n y = int(y)\n return x * y \n \n else : \n\n k = ceil(n/2)\n a = x[0:-k]\n b = x[-k:]\n\n if len(a) == 0:\n a = '0'\n \n c = y[0 : -k]\n d = y[-k:]\n\n if len(c)==0:\n c = '0'\n \n ac = RecIntMultiplication(a,c,k)\n ad = RecIntMultiplication(a,d,k)\n bc = RecIntMultiplication(b,c,k)\n bd = RecIntMultiplication(b,d,k)\n\n return 10**n * ac + 10**(k)*(ad+bc) + bd", "def g(n):\n\t\"*** YOUR CODE HERE ***\"\n\tif n <= 3:\n\t\treturn n\n\telse:\n\t\treturn g(n-1) + 2*g(n-2) + 3*g(n-3)", "def solution(n):\n i = 1\n j = 2\n sum = 0\n while j <= n:\n if j % 2 == 0:\n sum += j\n i, j = j, i + j\n\n return sum", "def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2", "def sumOfSeq(a, d, n):\n return a * n + n * (n - 1) * d / 2", "def compute_pattern(n):\n for x in range(1,n):\n for y in range(x, x*2):\n print(y, end= \" \")\n print()", "def minOperations(n):\n if type(n) is not int or n < 2:\n return 0\n\n summation = []\n\n while n % 2 == 0:\n summation.append(2)\n n = n // 2\n\n for i in range(3, n + 1, 2):\n while n % i == 0:\n summation.append(i)\n n = n // i\n\n return (sum(summation))", "def foo_4(x):\n\tresult=1\n\tfor i in range(1, x+1):\n\t\tresult=result * i\n\treturn result", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_sum_pos_4() -> None:\n # Third step, 4th square.\n assert nth(sw.sum_walk(), 2) == 4", "def fn(n):\n if n == 0: return 1\n return sum(fn(i)*fn(n-i-1) for i in range(n))", "def gen_op2(N11, N22, N12, rN11, rN22, rN12):\n raw = 2 * N12 / rN12 / (N11 / rN11 + N22 / rN22)\n print(\"=====\\n2nd method: raw = 2 * N12/rN12 / (N11/rN11 + N22/rN22)\")\n print(\"OP: 1 - raw = %.3f\\n=====\" % (1 - raw))", "def v(i, j, d):\n return 81 * (i - 1) + 9 * (j - 1) + d", "def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))", "def problem():\n for a in range(1, 380):\n for b in range(a):\n if a + b + (a**2 + b**2)**0.5 == 1000:\n return int(a * b * (a**2 + b**2)**0.5)", "def interleaved_sum(n, odd_term, even_term):\n \"*** YOUR CODE HERE ***\"\n if n == 1:\n return 1\n f = odd_term\n if n % 2 == 0:\n f = even_term\n return f(n) + interleaved_sum(n - 1, odd_term, even_term)", "def sum_of_squares(n):\n result = i = 0\n while i < n:\n result += i\n i += 1\n return result", "def n_suma(a1,nr_wyrazu,r):\n return (2*a1+(nr_wyrazu-1))*nr_wyrazu/2", "def g(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n return g(n - 1) + 2 * g(n - 2) + 3 * g(n - 3)", "def g(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n return g(n - 1) + 2 * g(n - 2) + 3 * g(n - 3)", "def s2(n, k):\n if n == 0 or n != 0 and n == k:\n return 1\n if k == 0 or n < k:\n return 0\n return k * s2(n-1, k) + s2(n-1, k-1)", "def cbd(n):\n return reduce(lambda a,b: a+b, \\\n [int(n) * (2 ** int(i)) for n,i in enumerate(str(n)[::-1])])", "def elementary_summand(fixed, i):\n if i < fixed:\n return 0\n elif i == fixed:\n return 2\n else:\n return 1", "def e(i):\n if i==0:\n return 0\n else:\n return gc(2*int(math.floor((i-1)//2)))", "def s2(nj, sj2, M):\n\n return sum((nj - 1)*sj2)/(sum(nj) - M)", "def calculate(x: int) -> int:\n\n digits = list(map(int, list(str(x))))\n return sum(list(map(lambda a: a**2, digits)))", "def solve(n, seq):\n\n return sum(seq) - (n-1) * (n-2) / 2", "def sum_of_squares(seq):\n if len(seq) == 0:\n return 0\n else:\n result = 0\n for num in seq:\n result += num ** 2\n return result", "def n2i(n, M=16):\n return n+M", "def sixn(m):\n if m <= 2:\n return ()\n if m > 2:\n yield 2\n if m > 3:\n yield 3\n for n in count(1):\n x = 6 * n - 1\n y = x + 2\n if x < m:\n yield x\n else:\n break\n if y < m:\n yield y\n else:\n break", "def n_wyraz(a1,nr_wyrazu,r):\n return a1+(nr_wyrazu-1)*r", "def f(i):\n return e(2**N-1-i) ^ 2**(N-1)", "def f2(x):\n return x**2 + 2 * x + 1", "def fn(nums):\n if len(nums) == 1: return nums\n return fn(nums[::2]) + fn(nums[1::2])", "def inner_product(pattern_one, pattern_two):\n\n return np.sum(np.multiply(pattern_one, pattern_two))", "def f2_osyczka2(x1, x2, x3, x4, x5, x6):\n return x1 ** 2 + x2 ** 2 + x3 ** 2 + x4 ** 2 + x5 ** 2 + x6 ** 2", "def sum_of_three_squares(n):\n special = {1:(1, 0, 0), 2:(1, 1, 0), 3:(1, 1, 1), 10: (1, 3, 0), 34: (3, 3, 4), 58:(3, 7, 0),\n 85:(6, 7, 0), 130:(3, 11, 0), 214:(3, 6, 13), 226:(8, 9, 9), 370:(8, 9, 15),\n 526:(6, 7, 21), 706:(15, 15, 16), 730:(1, 27, 0), 1414:(6, 17, 33), 1906:(13, 21, 36),\n 2986: (21, 32, 39), 9634: (56, 57, 57)}\n\n v = 0\n\n if n == 0:\n return (0, 0, 0)\n\n v = multiplicity(4, n)\n n //= 4**v\n\n if n % 8 == 7:\n return\n\n if n in special.keys():\n x, y, z = special[n]\n return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)\n\n s, _exact = integer_nthroot(n, 2)\n\n if _exact:\n return (2**v*s, 0, 0)\n\n x = None\n\n if n % 8 == 3:\n s = s if _odd(s) else s - 1\n\n for x in range(s, -1, -2):\n N = (n - x**2) // 2\n if isprime(N):\n y, z = prime_as_sum_of_two_squares(N)\n return _sorted_tuple(2**v*x, 2**v*(y + z), 2**v*abs(y - z))\n return\n\n if n % 8 in (2, 6):\n s = s if _odd(s) else s - 1\n else:\n s = s - 1 if _odd(s) else s\n\n for x in range(s, -1, -2):\n N = n - x**2\n if isprime(N):\n y, z = prime_as_sum_of_two_squares(N)\n return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)", "def add4(a,b):\n return [a[0]+b[0],a[1]+b[1],a[2]+b[2],a[3]+b[3]]", "def taskOfPairing(freq):\n n_pairs = list(map(lambda x: x // 2, freq))\n remainders = list(map(lambda x: x % 2, freq))\n print(freq)\n print(n_pairs)\n print(remainders)\n\n total = sum(n_pairs)\n print(total)\n i = 1\n while i < len(freq):\n print('i', i, remainders[i], remainders[i - 1])\n if remainders[i] + remainders[i - 1] == 2:\n print('total', total, 'i', i)\n total = total + 1\n i = i + 2\n print('newtotal', total, 'i', i)\n else:\n i += 1\n print(total)\n return total", "def summation_i_squared(n):\n if (type(n) is not int) or (n is None) or (n < 1):\n return None\n else:\n numbers = range(1, n + 1)\n result = 0\n result = map(lambda i: i ** 2, numbers)\n return sum(result)", "def get_partial_sum(n: int, j: int) -> float:\n first_term = sum(\n modulo_exp(16, n - k, 8 * k + j) / (8 * k + j) for k in range(0, n + 1)\n )\n first_term = first_term - math.floor(first_term)\n\n k = n + 1\n second_term = 0\n while True:\n max_rest_term = 16 ** (n - k) / max(8 * k, 1) / 15\n if math.floor(second_term * 16) == math.floor(\n (second_term + max_rest_term) * 16\n ):\n break\n\n second_term += 16 ** (n - k) / (8 * k + j)\n k += 1\n\n res = first_term + second_term\n res = res - math.floor(res)\n\n return res", "def solve():\n sum_of_squares = 0\n for i in range(1, 101):\n sum_of_squares += i * i\n square_of_sum = sum(range(1, 101)) ** 2\n return square_of_sum - sum_of_squares", "def right(i):\r\n return 2 * i + 2", "def square_of_sum(n):\n return ((n * (n+1)) / 2)**2", "def fn(k, i, j):\n if not (0 <= i < N and 0 <= j < N): return 0\n if k == 0: return 1 \n return 1/8*sum(fn(k-1, i+ii, j+jj) for ii, jj in ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)))", "def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2", "def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)", "def a2p(a, N):\n return N + 2 + (a % N) + (a // N) * (N + 1)", "def J (self, n):", "def sumn(n):\n return n * (n + 1) // 2", "def combin(n, k):\n\tif k > n//2:\n\t\tk = n-k\n\tx = 1\n\ty = 1\n\ti = n-k+1\n\twhile i <= n:\n\t\tx = (x*i)//y\n\t\ty += 1\n\t\ti += 1\n\treturn x", "def summation_of_primes():\n \n k=1\n acc=2\n for x in range(2000000):\n if x!=0 and x%2!=0 and x%4!=0 and x%6!=0 and x%8!=0 and x%10!=0:\n k=1\n for m in range(x):\n if x!=1 and m!=0 and m!=1 and x%m==0 and x!=m:\n k=2\n if k==1 and x!=1 and x%2!=0 and x%4!=0: #and y!=2:\n acc=acc+x\n #print str(acc)+' THIS IS ACC\"\"\"\n print x\n return acc", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n i = 3\n x, y, z = 1, 2, 3\n new = 1\n while i < n:\n new = z + (2*y) + (3*x)\n x, y, z = y, z, new \n i += 1\n return new", "def nextpow2(i):\n n = 1\n while n < i:\n n *= 2\n return n", "def get_squares(n):\n\n return sum([i * i for i in range(n)])", "def _algorithm(self, rut):\n suma = 0\n multi = 2\n for r in rut[::-1]:\n suma += int(r) * multi\n multi += 1\n if multi == 8:\n multi = 2\n return u'0123456789K0'[11 - suma % 11]", "def solve(n=1000):\r\n return str(sum(x**x for x in range(1, n + 1)))[-10:]", "def triangle(n):\n\n accumulator = 0\n\n for i in range(1,n+1):\n accumulator += i\n\n return accumulator", "def fn(i, j, mv):\n if not (0 <= i < m and 0 <= j < n): return 1 \n if mv == 0: return 0\n return (fn(i-1, j, mv-1) + fn(i, j-1, mv-1) + fn(i, j+1, mv-1) + fn(i+1, j, mv-1)) % 1_000_000_007", "def polysum(n, s):\n area = 0\n \n #avoiding division by zero\n if n != 0: \n area = (0.25 * n * (s**2)) / math.tan(math.pi / n)\n perimeter = n * s\n \n return (round(area + perimeter**2, 4))", "def solution(s):", "def processed(N:int)->tuple:\n l1= str(N)\n a,b = '',''\n for i in range(len(l1)):\n if l1[i] == '4':\n a+='2'\n b+='2'\n else:\n a+=str(l1[i])\n b+='0'\n return int(a), int(b)", "def eight():\r\n \r\n number = \"73167176531330624919225119674426574742355349194934\\\r\n96983520312774506326239578318016984801869478851843\\\r\n85861560789112949495459501737958331952853208805511\\\r\n12540698747158523863050715693290963295227443043557\\\r\n66896648950445244523161731856403098711121722383113\\\r\n62229893423380308135336276614282806444486645238749\\\r\n30358907296290491560440772390713810515859307960866\\\r\n70172427121883998797908792274921901699720888093776\\\r\n65727333001053367881220235421809751254540594752243\\\r\n52584907711670556013604839586446706324415722155397\\\r\n53697817977846174064955149290862569321978468622482\\\r\n83972241375657056057490261407972968652414535100474\\\r\n82166370484403199890008895243450658541227588666881\\\r\n16427171479924442928230863465674813919123162824586\\\r\n17866458359124566529476545682848912883142607690042\\\r\n24219022671055626321111109370544217506941658960408\\\r\n07198403850962455444362981230987879927244284909188\\\r\n84580156166097919133875499200524063689912560717606\\\r\n05886116467109405077541002256983155200055935729725\\\r\n71636269561882670428252483600823257530420752963450\"\r\n\r\n greatest = 0\r\n \r\n i = 0\r\n while i < len(number) - 12:\r\n product = 1\r\n for j in range(13):\r\n product *= int(number[i + j])\r\n #j += 1\r\n if product > greatest:\r\n greatest = product\r\n i += 1\r\n \r\n return greatest", "def a(n):\r\n if n == 0:\r\n return 1\r\n else:\r\n return n*((-1)**n) +a(n-1)", "def ne(n):\n return 4*n*n - 2*n + 1", "def nine():\r\n \r\n a = 3\r\n b = 4\r\n c = pythagorean(a, b)\r\n \r\n while a + b + c < 1001:\r\n while a + b + c < 1001:\r\n if a + b + c == 1000:\r\n return a * b * c\r\n b += 1\r\n c = pythagorean(a, b)\r\n a += 1\r\n b = a + 1\r\n c = pythagorean(a, b)", "def interleaved_sum(n, odd_term, even_term):\n k = 1\n def odd(k):\n if k > n:\n return 0\n else:\n return odd_term(k) + even(k + 1)\n def even(k):\n if k > n:\n return 0\n else:\n return even_term(k) + odd(k + 1)\n return odd(k)", "def solver_1star(d):\n return sum([math.floor(x / 3) - 2 for x in d])", "def multi_1(cur,p,n):\n\tr=p\n\tfor k in range(0,n-1):\n\t\tr=sum(cur,r,p)\n\treturn r", "def part_2(data: Iterator[str]) -> int:\n return solve(data, 5)", "def sum_series(n,v1=0,v2=1):\n\tL1=v2\n\tL2=v1\n\tif n<0:\n\t\tprint(\"please enter positive int value\")\n\n\telif n==0:\n\t\treturn v1\n\n\telif n==1:\n\t\treturn v2\n\n\telse:\n\t\tfor i in range(n-1):\n\t\t\tC=L1+L2\n\t\t\tL2=L1\n\t\t\tL1=C\n\t\treturn C", "def sqrSum(a, b, i, j):\n return (a - i)**2 + (b - j)**2" ]
[ "0.6762975", "0.61464405", "0.6060709", "0.599007", "0.59566283", "0.59457093", "0.5927478", "0.59023225", "0.5873529", "0.5866177", "0.58424276", "0.58367133", "0.58342266", "0.58271873", "0.58252615", "0.5824011", "0.5807665", "0.58015585", "0.5773675", "0.5770493", "0.57536995", "0.57412434", "0.5740458", "0.5728381", "0.572683", "0.5704375", "0.5678784", "0.5670154", "0.5669005", "0.56673855", "0.56636", "0.56600237", "0.56357324", "0.56349105", "0.563406", "0.56236815", "0.56195617", "0.5600756", "0.5587827", "0.5584904", "0.55816007", "0.5578997", "0.55771106", "0.55720794", "0.55689883", "0.5568514", "0.5547944", "0.55392176", "0.55392176", "0.5535083", "0.55223024", "0.55221385", "0.55175346", "0.5516434", "0.5513", "0.551161", "0.5508188", "0.5505734", "0.5504649", "0.5499189", "0.5490597", "0.5488718", "0.5471832", "0.54683775", "0.54660934", "0.5460034", "0.5453731", "0.545342", "0.54474074", "0.5437813", "0.54377013", "0.5437642", "0.54375166", "0.5436828", "0.5436473", "0.54356706", "0.5432736", "0.5432107", "0.54302096", "0.54267985", "0.54230094", "0.54167825", "0.5415288", "0.54090184", "0.5401576", "0.53920615", "0.5388766", "0.5382486", "0.5381144", "0.53758615", "0.537569", "0.5375267", "0.5373238", "0.53710496", "0.53565174", "0.53549194", "0.53533995", "0.53514844", "0.5351121", "0.53495973", "0.5343754" ]
0.0
-1
Compute LDA model & find perplexity, save topics list for coherence calc
def lda_models(doc_term_matrix, n_topics, vectorizer, rand_start): perplexity_values = [] lda_time = [] topics_list = [] i = rand_start for num_topics in n_topics: # create model t1 = time.time() lda_model = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics, topic_word_prior=0.1, n_jobs=39, random_state = i) lda_model.fit_transform(doc_term_matrix) t2 = time.time() lda_time.append(t2-t1) print(f" Model time: {t2-t1}", flush = True) # compute perplexity perplexity_values.append(lda_model.bound_) # create list of topics topics = list_topics(lda_model.components_, vectorizer, top_n=10) topics_list.append(topics) # output completion message i = i+1 print('Number of topics =', num_topics, "complete.", flush = True) return perplexity_values, lda_time, topics_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n coherence_c_v = []\n coherence_u_mass = []\n print(\"Fitting models\")\n for num_topics in range(self.min_topics, self.max_topics, self.step):\n lda_model = gensim.models.LdaMulticore(corpus=corpus, id2word=self.id2word, num_topics=num_topics,\n random_state=100, chunksize=100, passes=20,\n per_word_topics=True, minimum_probability=0)\n if not os.path.exists(f\"data/intermediate/optimal_testing\"):\n os.mkdir(f\"data/intermediate/optimal_testing\")\n with open(f\"data/intermediate/optimal_testing/lda_model_{num_topics}_topics.pkl\", \"wb\") as file_out:\n pickle.dump(lda_model, file_out)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents, dictionary=self.id2word,\n coherence='c_v')\n coherence = coherence_model_lda.get_coherence()\n print(f\"Topic {num_topics} coherence: {coherence}\")\n coherence_c_v.append(coherence)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents, dictionary=self.id2word,\n coherence='u_mass')\n coherence_u_mass.append(coherence_model_lda.get_coherence())\n return coherence_c_v, coherence_u_mass", "def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n alpha = list(np.arange(0.1, 1, 0.3))\n alpha.append(\"symmetric\")\n beta = copy.deepcopy(alpha)\n alpha.append(\"asymmetric\")\n corpus_sets = [gensim.utils.ClippedCorpus(corpus, int(len(corpus) * 0.75)), corpus]\n corpus_titles = [\"75% corpus\", \"100% corpus\"]\n model_results = {\"Validation_set\": [], \"Topics\": [], \"Alpha\": [], \"Beta\": [], \"Coherence\": []}\n print(\"Fitting models\")\n for i, corpus_set in enumerate(corpus_sets):\n for num_topics in self.topics_to_test:\n for a in alpha:\n for b in beta:\n lda_model = gensim.models.LdaMulticore(corpus=corpus_set, id2word=self.id2word, alpha=a,\n random_state=100, chunksize=100, passes=20,\n num_topics=num_topics,\n per_word_topics=True, minimum_probability=0, eta=b)\n if i == 1: # we only want to save the model if it's a model on the whole corpus\n if not os.path.exists(f\"data/intermediate/hyperparameter_testing\"):\n os.mkdir(f\"data/intermediate/hyperparameter_testing\")\n with open(f\"data/intermediate/hyperparameter_testing/lda_{num_topics}_\"\n f\"topics{a}_alpha_{b}_eta.pkl\", \"wb\") as file_out:\n pickle.dump(lda_model, file_out)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents,\n dictionary=self.id2word, coherence='c_v')\n coherence = coherence_model_lda.get_coherence()\n print(f\"Topic {num_topics}, alpha {a} eta {b} corpus {corpus_titles[i]} coherence: {coherence}\")\n model_results['Validation_set'].append(corpus_titles[i])\n model_results['Topics'].append(num_topics)\n model_results['Alpha'].append(a)\n model_results['Beta'].append(b)\n model_results['Coherence'].append(coherence)\n pd.DataFrame(model_results).to_csv(\"hyperparamter_tuning_results.csv\", index=False)", "def _lda(self):\n self.ldamodel = gensim.models.ldamodel.LdaModel(self.gensim_corpus, \n num_topics=self.n_topics, \n id2word=self.id_map, \n passes=self.n_passes,\n random_state=42)\n \n self.topic_matrix = self.ldamodel.print_topics(num_topics=self.n_topics, \n num_words=self.n_words)", "def train_lda_topic_model_with_mallet(texts, path_mallet,\n terms_to_remove=[], num_topics=50,\n no_below=10, no_above=0.9,\n scoring=False, start=2, step=3):\n preprocessed_corpus = []\n print ('training of gensim corpus began')\n for i, text in enumerate(texts):\n if i == 0:\n # todo filter here\n text = text.split()\n\n # Additional filtering steps #\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n\n dct = initialize_gensim_dictionary([text])\n else:\n text = text.split()\n # Additional filtering steps\n\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n add_documents_to_gensim_dictionary(dct, [text])\n # todo:this is to be integrated to the building process\n\n if len(terms_to_remove) > 0:\n for term in terms_to_remove:\n dct.filter_tokens(bad_ids=[dct.token2id[term]])\n\n dct.filter_extremes(no_below=no_below, no_above=no_above)\n\n gensim_corpus = [dct.doc2bow(bag_of_word.split()) for bag_of_word in texts]\n print ('gensim corpus done')\n if scoring:\n\n coherence_values = []\n\n for n in range(start, num_topics, step):\n\n lda = LdaMallet(constants.PATH_TO_MALLET,\n gensim_corpus, id2word=dct,\n num_topics=n)\n coherencemodel = CoherenceModel(model=lda,\n texts=preprocessed_corpus,\n dictionary=dct, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return coherence_values\n\n else:\n lda = LdaMallet(constants.PATH_TO_MALLET, gensim_corpus,\n id2word=dct, num_topics=num_topics)\n # Visualize LDA results, poor results obtained.\n # from gensim.models.wrappers import ldamallet\n # lda_model = ldamallet.malletmodel2ldamodel(lda)\n # vis = pyLDAvis.gensim.prepare(lda_model, gensim_corpus, dct)\n # pyLDAvis.save_html(vis , 'test.html')\n return {'model': lda, 'corpus': gensim_corpus}", "def build_model(num_topics=30):\n data = utils.read_wiki(\"wiki.train.tokens\")\n\n # preprocessing: remove too frequent words, stopwords ...\n logger.info(\"Start preprocessing, this will take quite some time ...\")\n list_of_tokens, bigrams = preprocess(data)\n\n id2word = corpora.Dictionary(list_of_tokens)\n id2word.filter_extremes(no_below=5, no_above=0.6, keep_n=VOCAB_SIZE)\n logger.info(f\"Done processing dataset len, vocab len {len(id2word.keys())}, {len(list_of_tokens)}\")\n \n # convert data into df vectors\n corpus = [id2word.doc2bow(tokens) for tokens in list_of_tokens]\n\n for num_topics in range(10, 100, 6):\n lda_model = LdaModel(corpus, num_topics=num_topics,\n id2word=id2word,\n passes=20,\n iterations=400,\n # alpha=[0.01]*num_topics,\n alpha=\"auto\",\n # eta=[0.01] * VOCAB_SIZE,\n eta=\"auto\")\n \n # save the model\n path = pathlib.Path(f\"{SAVING_DIR}/lda_topic_{num_topics}\")\n path.mkdir(parents=True, exist_ok=True)\n path = path / \"lda.model\"\n lda_model.save(str(path.absolute()))\n id2word.save(UNIGRAM_FILE)\n bigrams.save(BIGRAM_FILE)\n\n # visualize topics by LDAviz\n vis = gensimvis.prepare(topic_model=lda_model, corpus=corpus, dictionary=id2word)\n pathlib.Path(\"lda_vizs\").mkdir(parents=True, exist_ok=True)\n pyLDAvis.save_html(vis, f'lda_vizs/lda_visualization_{num_topics}.html')\n return id2word, bigrams, lda_model", "def modelOpti(corpus, dictionary, limit, start=2, step=2):\n cohVals = []\n modelList = []\n for num_topics in range(start, limit, step):\n model = gensim.models.LdaMulticore(corpus, num_topics = num_topics, id2word = dictionary, chunksize = 700, passes = 15, workers = 8, eval_every = None)\n modelList.append(model)\n cohLDA = CoherenceModel(model = model, corpus = corpus, dictionary = dictionary, coherence = 'u_mass', processes = 8)\n cohVals.append(cohLDA.get_coherence())\n \n return modelList, cohVals", "def f(DATA_LINK, DATA_COLUMN_NAME, STOPWORD_CHOICE, STOPWORD_LINK, NGRAM_CHOICE,NGRAM_NUM, TestData,topic_number_user,fetchArray):\r\n data = pd.read_csv(DATA_LINK)\r\n df=data[DATA_COLUMN_NAME]\r\n ######################################################################\r\n if (STOPWORD_CHOICE):\r\n stopwords=prepare_stopwords(STOPWORD_LINK)\r\n else:\r\n stopwords=prepare_stopwords(link='stopwords.csv')\r\n ######################################################################\r\n\r\n df=clean(df)\r\n\r\n processed_docs = []\r\n\r\n for doc in df:\r\n processed_docs.append(preprocess(doc,stopwords))\r\n ############################################################################\r\n if NGRAM_CHOICE:\r\n ngram=[]\r\n ngram_mod=[]\r\n for i in range(NGRAM_NUM):\r\n if(i==0):\r\n ngram.append(gensim.models.Phrases(processed_docs[0:10000], min_count=5, threshold=100)) # higher threshold fewer phrases\r\n else:\r\n ngram.append(gensim.models.Phrases(ngram[i-1][processed_docs[0:10000]], min_count=5, threshold=100)) # higher threshold fewer phrases\r\n ngram_mod.append(gensim.models.phrases.Phraser(ngram[i]))\r\n \r\n ###########################################################################\r\n\r\n ################################################################################\r\n if NGRAM_CHOICE:\r\n # Form Ngrams\r\n data_words_ngrams = make_ngrams(processed_docs,NGRAM_NUM,ngram_mod)\r\n\r\n # Do lemmatization keeping only noun, adj, vb, adv\r\n data_lemmatized=[]\r\n for i in range(len(data_words_ngrams)):\r\n data_lemmatized.append(lemmatization(data_words_ngrams[i]))\r\n else:\r\n data_lemmatized=processed_docs\r\n ################################################################################\r\n \r\n\r\n dictionary = gensim.corpora.Dictionary(data_lemmatized)\r\n\r\n dictionary.filter_extremes(no_below=15, no_above=0.1, keep_n= 100000)\r\n\r\n bow_corpus = [dictionary.doc2bow(doc) for doc in data_lemmatized]\r\n\r\n lda_model = gensim.models.LdaMulticore(bow_corpus, \r\n num_topics = topic_number_user, \r\n id2word = dictionary, \r\n passes = 10, workers = 2)\r\n\r\n for idx, topic in lda_model.print_topics(-1):\r\n print(\"Topic: {} \\nWords: {}\".format(idx, topic ))\r\n print(\"\\n\")\r\n lda_model.save('turk_lda.gensim')\r\n\r\n unseen_document = TestData\r\n\r\n rx = re.compile('\\W+')\r\n unseen_document = rx.sub(' ', unseen_document).strip()\r\n\r\n\r\n # Data preprocessing step for the unseen document\r\n bow_vector = dictionary.doc2bow(preprocess(unseen_document,stopwords))\r\n\r\n topics = []\r\n for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]):\r\n print(\"Score: {}\\t Topic: {}\".format(score, lda_model.print_topic(index, 5)))\r\n # rslt = result(str(score), str(lda.print_topic(index,5)))\r\n rslt = result(str(score), str(re.findall('\"([^\"]*)\"', str(lda_model.print_topic(index,5)))))\r\n topics.append(rslt)\r\n\r\n fetchArray.put(topics)", "def main(self, words_docs, cleaned_sentences, lang, model_dir, number_of_clusters, embedding_model, model_id):\n\t\ttry:\n\t\t\tif embedding_model == \"tfidf\": text_vector = self.create_tfidf_vectors(cleaned_sentences)\n\t\t\telif embedding_model == \"word2vec\": text_vector = self.create_w2v_vectors(words_docs)\n\t\t\tmodel, pred_dict = self.train_model(cleaned_sentences, text_vector, number_of_clusters, lang, model_id, model_dir)\n\t\t\tdf_dominant_topic = self.evaulate_clusters(pred_dict, model_dir)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"\\n Error in main : \",e)\n\t\t\tprint(\"\\n Error details : \", traceback.format_exc())\n\n\t\treturn df_dominant_topic", "def maximization_step(self, number_of_topics, verbose):\n if verbose:\n print(\"M step:\")\n\n self.topic_word_prob = np.zeros((number_of_topics, len(self.vocabulary)))\n self.topic_word_prob_collection_specific = []\n\n for k in range(self.number_of_collections):\n topic_word_prob_collection_specific = np.zeros((number_of_topics, len(self.vocabulary)))\n for i in range(self.number_of_documents):\n # update P(w | z)\n\n # ############################\n\n self.topic_word_prob = np.add(self.topic_word_prob,\n np.transpose(np.multiply(np.multiply(np.multiply(self.term_doc_matrix[k][i], 1 - self.topic_prob_B[k][i]), self.topic_prob_j[k][i]), self.topic_prob_C[k][i])))\n\n topic_word_prob_collection_specific = np.add(self.topic_word_prob,\n np.transpose(np.multiply(np.multiply(np.multiply(self.term_doc_matrix[k][i], 1 - self.topic_prob_B[k][i]), self.topic_prob_j[k][i]), 1 - self.topic_prob_C[k][i])))\n\n # update P(z | d)\n\n # ############################\n\n matrix = np.dot(np.transpose(self.term_doc_matrix[k][i]), self.topic_prob_j[k][i])\n self.document_topic_prob[k][i] = normalize_row(matrix)\n topic_word_prob_collection_specific = normalize_row(topic_word_prob_collection_specific)\n self.topic_word_prob_collection_specific.append(topic_word_prob_collection_specific)\n\n self.topic_word_prob = normalize_row(self.topic_word_prob)\n\n #print(\"pi:\")\n #print(self.document_topic_prob)\n #print(\"p(w|theta):\")\n #print(self.topic_word_prob)", "def optimize(self):\n scores = []\n n_topics = np.arange(self.topic_range[0], self.topic_range[1]+1)\n print('Running optimization with topic range from {0} to {1}'.format(\n self.topic_range[0],self.topic_range[1]))\n self._preproc()\n\n # Perform LDA for topic_range\n for n in n_topics:\n self.n_topics = n\n self._lda()\n if self.verbose:\n print('LDA completed for {0} topics.'.format(n))\n self._evaluate()\n scores.append(self.score)\n \n # Visualize results\n print('Optimization completed, plotting results...')\n fig1, ax1 = plt.subplots()\n ax1.plot(n_topics, np.asarray(scores))\n ax1.set_title('Coherence for topic range from {0} to {1}'.format(\n self.topic_range[0], self.topic_range[1]), fontsize= 16)\n ax1.set_xlabel('n_topics')\n ax1.set_ylabel('score')\n ax1.set_xticks(n_topics)\n plt.show()", "def model(self, doc_list=None):\r\n\r\n # eta => prior for the per-topic word distribution\r\n eta = torch.ones(self.V)\r\n\r\n with pyro.plate(\"topics\", self.K):\r\n\r\n # Beta => per topic word distribution\r\n Beta = pyro.sample(f\"beta\", dist.Dirichlet(eta))\r\n\r\n # alpha => prior for the per-doc topic vector\r\n alpha = torch.ones(self.K) / self.K\r\n\r\n X_List, Theta = [], []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # theta => per-doc topic vector\r\n theta = pyro.sample(f\"theta_{d}\", dist.Dirichlet(alpha))\r\n\r\n doc = None if doc_list is None else doc_list[d]\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]):\r\n\r\n # assign a topic\r\n z_assignment = pyro.sample(\r\n f\"z_assignment_{d}\",\r\n dist.Categorical(theta)\r\n )\r\n\r\n # from that topic vec, select a word\r\n X = pyro.sample(\r\n f\"w_{d}\",\r\n dist.Categorical(Beta[z_assignment]),\r\n obs=doc\r\n )\r\n\r\n X_List.append(X)\r\n Theta.append(theta)\r\n\r\n Theta = torch.stack(Theta)\r\n\r\n return X_List, Beta, Theta", "def generate_lda_model(self, B, topics, beta=0.01):\n # Suggestions from plda:\n # https://code.google.com/p/plda/wiki/PLDAQuickStart\n alpha = 50/float(topics)\n\n # Se o modelo LDA ainda nao existe para o fold, inferi-lo\n path_base = './exp/'\n path_base_lda = '%slda/' % path_base\n path_test_data = '%stest_data.txt' % (path_base_lda)\n\n m, n = B.shape\n try:\n os.makedirs(path_base_lda)\n except OSError:\n pass\n\n results = []\n for user in range(1, m):\n attractions = list(B[user].nonzero()[0])\n attractions.append(\"\") # para colocar o ultimo 1\n attractions = [ str(a) for a in attractions ]\n results.append(\" 1 \".join(attractions))\n\n a = open(path_test_data, 'w')\n a.write(\"\\n\".join(results))\n a.close()\n\n comando_modelo = (\n '../plda/lda --num_topics %(topics)s --alpha %(alpha)s --beta %(beta)s '\n '--training_data_file %(path)stest_data.txt '\n '--model_file %(path)slda_model.txt --burn_in_iterations 250 '\n '--total_iterations 300'\n ) % {'path': path_base_lda, 'beta': beta, 'alpha': alpha, 'topics': topics}\n\n print comando_modelo\n output = subprocess.check_output(shlex.split(comando_modelo),\n stderr=subprocess.STDOUT)\n print output\n\n comando_inferencia = (\n '../plda/infer --alpha %(alpha)s --beta %(beta)s '\n '--inference_data_file %(path)stest_data.txt '\n '--inference_result_file %(path)sinference_result.txt '\n '--model_file %(path)slda_model.txt --total_iterations 300 '\n '--burn_in_iterations 250'\n ) % {'path': path_base_lda, 'beta': beta, 'alpha': alpha}\n\n print comando_inferencia\n output = subprocess.check_output(shlex.split(comando_inferencia),\n stderr=subprocess.STDOUT)\n print output\n\n # Handling LDA for attractions\n lda_attractions = {}\n for l in open('%slda_model.txt' % path_base_lda):\n attraction, data = l.split('\\t')\n data = data.split(' ')\n data = [ Decimal(d) for d in data ]\n s = sum(data)\n data = [ d/s for d in data ]\n lda_attractions[attraction] = data\n\n # Handling LDA for each user\n lda_users = {}\n user = 0\n for l in open('%sinference_result.txt' % path_base_lda):\n user += 1\n data = l.split(' ')\n data = [ Decimal(d) for d in data ]\n s = sum(data)\n data = [ d/s for d in data ]\n lda_users[user] = data\n\n self.model = {\n 'users': lda_users,\n 'attractions': lda_attractions,\n }", "def post_process_result_of_lda_topic_model(lda_model, gensim_corpus,\n document_collection,\n document_collection_filtered,\n n_closest=25):\n # Prepare containers to store results\n # Container to keep the document topic matrix\n n_closest = - n_closest\n document_topic_matrix = []\n # Container to keep topics and the closest texts to each topic\n topic_closest_doc_with_topics_words = []\n # Container to keep topics\n all_topics = lda_model.show_topics(50)\n\n # Create an LDA corpus from the original gensim corpus\n lda_corpus = lda_model[gensim_corpus]\n\n # Iterate through the lda corpus and create the document topic matrix\n for i, documents in enumerate(lda_corpus):\n # Data returned is not proper numpy matrix\n document_topic_matrix.append(\n np.array([elements[1]for elements in documents]))\n\n # Create the proper numpy matrix\n document_topic_matrix = np.vstack(document_topic_matrix)\n\n # Find the closest texts to a given topic\n # Iterate through the transpose of the document topic matrix\n for i, element in enumerate(document_topic_matrix.T):\n # Identify the id of 15 closest texts of each topic\n closest = element.argsort(axis=0)[n_closest:][::-1]\n # Create a container to keep each text with the id above\n texts = []\n for element in closest:\n texts.append({'matched_text':\n document_collection_filtered[element],\n 'matched_text_words':\n document_collection[element]['match_word'],\n 'testimony_id': document_collection[element]\n ['testimony_id']})\n\n # Append them to container\n topic_closest_doc_with_topics_words.append({'texts': texts,\n 'topic_words':\n all_topics[i]})\n\n return {'topic_documents': topic_closest_doc_with_topics_words,\n 'document_topic_matrix': document_topic_matrix}", "def compute(self, topics, save_filename):\n texts = []\n\n tokenizer = RegexpTokenizer(r'\\w+')\n\n # create English stop words list\n en_stop = stopwords.words('english')\n\n # Create p_stemmer of class PorterStemmer\n p_stemmer = PorterStemmer()\n\n for i in self.__doc_set:\n # clean and tokenize document string\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n\n # remove stop words from tokens\n stopped_tokens = [i for i in tokens if not i in en_stop]\n\n # stem tokens\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n\n # add tokens to list\n texts.append(stemmed_tokens)\n\n # turn our tokenized documents into a id <-> term dictionary\n dictionary = gensim.corpora.Dictionary(texts)\n\n # convert tokenized documents into a document-term matrix\n corpus = [dictionary.doc2bow(text) for text in texts]\n\n # generate LDA model\n lsi_model = gensim.models.LsiModel(corpus, num_topics=topics, id2word=dictionary)\n\n save_filename += \"_{}\".format(topics)\n\n dictionary.save(save_filename + \".dict\")\n gensim.corpora.MmCorpus.save_corpus(save_filename + \".mm\", corpus, id2word=dictionary)\n lsi_model.save(save_filename + \".model\")\n\n return lsi_model, corpus, dictionary", "def learn(self, docs, labels, alpha=1.0):\n assert len(docs)==len(labels)\n labelCounts = {l: 0 for l in self.CLASSES}\n wordCounts = {l: Counter() for l in self.CLASSES}\n totalWordCounts = {l: 0 for l in self.CLASSES}\n # iterate over documents in order to record\n for i in range(0, len(labels)):\n # count(y) in labelCounts\n l = labels[i]\n labelCounts[labels[i]] +=1\n # count(y,w) for all words in totalWordCounts\n totalWordCounts[labels[i]] += len(docs[i])\n words = docs[i]\n # count(y,word) in wordCounts,\n \n for word in words:\n wordCounts[labels[i]][word] += 1\n # and to store the training vocabulary in self.trainVocab\n self.trainVocab.add(word)\n # compute and store prior distribution over classes\n # (unsmoothed) in self.priorProbs\n print(\"Label,priorProbs,Label Count\", file=sys.stderr)\n for l in self.priorProbs:\n self.priorProbs[l] = np.divide(labelCounts[l], len(labels))\n print(l +\",\"+str(self.priorProbs[l])+\",\"+str(labelCounts[l]), file=sys.stderr) #This was for part one\n for word in self.trainVocab: \n self.likelihoodProbs[l][word] = np.divide(wordCounts[l][word]+self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n self.likelihoodProbs[l]['**OOV**'] = np.divide(self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n # Sanity checks--do not modify\n assert len(self.priorProbs)==len(self.likelihoodProbs)==len(self.CLASSES)>2\n assert .999 < sum(self.priorProbs.values()) < 1.001\n for y in self.CLASSES:\n assert .999 < sum(self.likelihoodProbs[y].values()) < 1.001,sum(self.likelihoodProbs[y].values())\n assert 0 <= self.likelihoodProbs[y]['**OOV**'] < 1.0,self.likelihoodProbs[y]['**OOV**']", "def learn_topic_model_activities(self):\n print \"\\nLearning a topic model with LDA:\"\n\n doc_topic, topic_word = tm.run_topic_model(self.accu_path, self.config['lda'])\n\n tm.dump_lda_output(self.lda_path, doc_topic, topic_word)\n print \"Topic Modelling - done.\\n\"\n return True", "def investigate_topics(model, loaded_data, labels, videos, prob_of_words, language_indices, _lambda, n_top_words = 30):\n\n topic_word = model.topic_word_\n doc_topic = model.doc_topic_\n code_book, graphlets_, uuids, miss_labels = loaded_data\n print \"1\"\n import pdb; pdb.set_trace()\n\n true_labels = labels\n vocab = [hash for hash in list(code_book)]\n graphs = loaded_data[1]\n # ****************************************************************************************************\n # Relevance\n # ****************************************************************************************************\n names_list = [i.lower() for i in ['Alan','Alex','Andy','Amy','Michael','Ben','Bruno','Chris','Colin','Collin','Ellie','Daniel','Dave','Eris','Emma','Helen','Holly','Jay','the_cleaner','Jo','Luke','Mark','Louis','Laura', 'Kat','Matt','Nick','Lucy','Rebecca','Jennifer','Ollie','Rob','Ryan','Rachel','Sarah','Stefan','Susan']]\n\n relevant_words = {}\n for i, phi_kw in enumerate(topic_word):\n\n phi_kw = threshold(np.asarray(phi_kw), 0.00001)\n log_ttd = [_lambda*math.log(y) if y!=0 else 0 for y in phi_kw]\n log_lift = [(1-_lambda)*math.log(y) if y!=0 else 0 for y in phi_kw / probability_of_words]\n relevance = np.add(log_ttd, log_lift)\n\n # cnt = 0\n # import pdb; pdb.set_trace()\n # for h, g in zip(np.asarray(vocab)[relevance >2.1], graphs[relevance >2.1]):\n # o, s, t = object_nodes(g)\n # if \"hand\" in o and \"object_14\" in o and len(s) == 2:\n # print h, s, t\n # cnt+=1\n # print cnt\n # genome_rel(relevance, i)\n\n inds = np.argsort(relevance)[::-1]\n # top_relevant_words_in_topic = np.array(vocab)[inds] #[:-(n_top_words+1):-1]\n # pdb.set_trace()\n relevant_language_words_in_topic = []\n\n for ind in inds:\n word = vocab[ind]\n\n #todo: somehting is wrong here.\n if relevance[ind] <= 1.0 and word.isalpha() and word not in names_list:\n relevant_language_words_in_topic.append(word)\n # pdb.set_trace()\n relevant_words[i] = relevant_language_words_in_topic[:10]\n\n # print(\"\\ntype(topic_word): {}\".format(type(topic_word)))\n # print(\"shape: {}\".format(topic_word.shape))\n print \"objects in each topic: \"\n topics = {}\n for i, topic_dist in enumerate(topic_word):\n objs = []\n top_words_in_topic = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n\n #print('Topic {}: {}'.format(i, ' '.join( [repr(i) for i in top_words_in_topic] )))\n # for j in [graphlets[k] for k in top_words_in_topic]:\n # objs.extend(object_nodes(j)[0])\n topics[i] = objs\n print('Topic {}: {}'.format(i, list(set(objs))))\n print top_words_in_topic\n\n # #Each document's most probable topic\n restricted_labels, restricted_videos = [], []\n pred_labels = []\n\n for n in xrange(doc_topic.shape[0]):\n #print [p for p in doc_topic[n] if p >= 0.0] # each document probabilities to each topic\n if max(doc_topic[n]) > class_thresh:\n # print true_labels[n]\n # print doc_topic[n]\n # print doc_topic[n].argmax()\n # doc_topic[n][doc_topic[n].argmax()] = 0\n restricted_labels.append(true_labels[n])\n restricted_videos.append(videos[n])\n topic_most_pr = doc_topic[n].argmax()\n pred_labels.append(topic_most_pr)\n\n #if dbg: print(\"doc: {} topic: {}\".format(n, topic_most_pr))\n true_labels = restricted_labels\n videos = restricted_videos\n print \"2\"\n import pdb; pdb.set_trace()\n\n return true_labels, pred_labels, videos, relevant_words", "def evaluate_lda(model, dictionary, corpus, texts, calculate_coherence=True, use_multicore=False):\n # perplexity = model.log_perplexity(corpus)\n coherence_lda = None\n if calculate_coherence:\n coherence_model_lda = CoherenceModel(model=model, texts=texts, dictionary=dictionary,\n coherence='c_v', processes=N_WORKERS if use_multicore else 1)\n coherence_lda = coherence_model_lda.get_coherence()\n return 0, coherence_lda", "def __getitem__(self, doc):\n lda_model = ldamodel.LdaModel(\n num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)\n lda_model.topics = np.zeros((self.vocab_len, self.num_topics))\n ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)\n\n time_lhoods = []\n for time in range(self.num_time_slices):\n lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice\n lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)\n time_lhoods.append(lhood)\n\n doc_topic = ldapost.gamma / ldapost.gamma.sum()\n # should even the likelihoods be returned?\n return doc_topic", "def elbow_lda(self, corpus, num_iter):\n coherence_values = []\n model_list = []\n for num_topics in range(self.start, self.stop, self.step):\n print('Topics Tested: ' + str(num_topics)) \n model = lda.LDA(corpus, num_topics, num_iter)\n model_list.append(model)\n coherence = model.get_coherence_score(corpus)\n coherence_values.append(coherence)\n return model_list, coherence_values", "def build_model_gensim(corpus, id2word, num_topics=20, validset=None):\n\n # Build LDA model\n lda_model = gensim.models.ldamulticore.LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics,\n random_state=100,\n eval_every=5,\n chunksize=10000, #nb of docs in each training chunk\n passes=50,\n iterations=500,\n alpha=0.001,\n per_word_topics=True,\n workers=4,)\n\n print(\"eta\",lda_model.eta)\n print(\"alpha\",lda_model.alpha)\n\n if validset:\n valid_corpus, valid_id2word, valid_data_lemmatized = validset\n print(lda_model.log_perplexity(valid_corpus, len(valid_corpus)))\n\n return lda_model", "def lda(documents_as_bag_of_words=[], topics_num=TOPICS_NUM, write_results_to=FILE_DEFAULT_GENSIM_LDA_RESULTS):\n\n\timport gensim\n\n\ttemp_files = generate_temp_files_for_lda_or_lsa(documents_as_bag_of_words)\n\n\tlda_model = gensim.models.ldamodel.LdaModel(corpus=temp_files['corpus'], id2word=temp_files['id2word'], num_topics=TOPICS_NUM, update_every=1, chunksize=10000, passes=1)\n\n\ttopics = lda_model.print_topics(TOPICS_NUM)\n\n\twith open(write_results_to, 'w', encoding='utf-8') as f:\n\t\tfor topic in topics:\n\t\t\tf.write('{}: {}\\n'.format(str(topic[0]), topic[1]))\n\t\tf.close()", "def investigate_topics(model, code_book, labels, videos, prob_of_words, _lambda, n_top_words = 30):\n\n topic_word = model.topic_word_\n doc_topic = model.doc_topic_\n # code_book, graphlets, uuids, miss_labels = loaded_data\n # print \"1\"\n # import pdb; pdb.set_trace()\n\n true_labels = labels\n vocab = [hash for hash in list(code_book)]\n\n # ****************************************************************************************************\n # Relevance\n # ****************************************************************************************************\n # names_list = [i.lower() for i in ['Alan','Alex','Andy','Amy','Michael','Ben','Bruno','Chris','Colin','Collin','Ellie','Daniel','Dave','Eris','Emma','Helen','Holly','Jay','the_cleaner',\n # 'Jo','Luke','Mark','Louis','Laura', 'Kat','Matt','Nick','Lucy','Rebecca','Jennifer','Ollie','Rob','Ryan','Rachel','Sarah','Stefan','Susan']]\n\n relevant_words = {}\n for i, phi_kw in enumerate(topic_word):\n\n phi_kw = threshold(np.asarray(phi_kw), 0.00001)\n log_ttd = [_lambda*math.log(y) if y!=0 else 0 for y in phi_kw]\n log_lift = [(1-_lambda)*math.log(y) if y!=0 else 0 for y in phi_kw / prob_of_words]\n relevance = np.add(log_ttd, log_lift)\n\n # cnt = 0\n # import pdb; pdb.set_trace()\n # for h, g in zip(np.asarray(vocab)[relevance >2.1], graphs[relevance >2.1]):\n # o, s, t = object_nodes(g)\n # if \"hand\" in o and \"object_14\" in o and len(s) == 2:\n # print h, s, t\n # cnt+=1\n # print cnt\n # vis.genome_rel(relevance, i)\n\n inds = np.argsort(relevance)[::-1]\n # top_relevant_words_in_topic = np.array(vocab)[inds] #[:-(n_top_words+1):-1]\n # pdb.set_trace()\n relevant_language_words_in_topic = []\n\n for ind in inds:\n word = vocab[ind]\n\n #todo: somehting is wrong here.\n if relevance[ind] <= 1.0 and word.isalpha() and word not in names_list:\n relevant_language_words_in_topic.append(word)\n # pdb.set_trace()\n relevant_words[i] = relevant_language_words_in_topic[:10]\n\n # print(\"\\ntype(topic_word): {}\".format(type(topic_word)))\n # print(\"shape: {}\".format(topic_word.shape))\n # print \"objects in each topic: \"\n topics = {}\n for i, topic_dist in enumerate(topic_word):\n objs = []\n top_words_in_topic = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n\n #print('Topic {}: {}'.format(i, ' '.join( [repr(i) for i in top_words_in_topic] )))\n # for j in [graphlets[k] for k in top_words_in_topic]:\n # objs.extend(object_nodes(j)[0])\n topics[i] = objs\n # print('Topic {}: {}'.format(i, list(set(objs))))\n # print top_words_in_topic\n\n # #Each document's most probable topic\n restricted_labels, restricted_videos = [], []\n pred_labels = []\n\n for n in xrange(doc_topic.shape[0]):\n #print [p for p in doc_topic[n] if p >= 0.0] # each document probabilities to each topic\n if max(doc_topic[n]) > class_thresh:\n # print true_labels[n]\n # print doc_topic[n]\n # print doc_topic[n].argmax()\n # doc_topic[n][doc_topic[n].argmax()] = 0\n restricted_labels.append(true_labels[n])\n restricted_videos.append(videos[n])\n topic_most_pr = doc_topic[n].argmax()\n pred_labels.append(topic_most_pr)\n\n #if dbg: print(\"doc: {} topic: {}\".format(n, topic_most_pr))\n true_labels = restricted_labels\n videos = restricted_videos\n # print \"2\"\n # import pdb; pdb.set_trace()\n\n return true_labels, pred_labels, videos, relevant_words", "def lda_description(review_text, min_topic_freq=0.05,topic_model_file='lda_model_10'):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n \n # parse the review text with spaCy\n parsed_review = nlp(review_text)\n \n # lemmatize the text and remove punctuation and whitespace\n unigram_review = [token.lemma_ for token in parsed_review\n if not punct_space(token)]\n \n # apply the first-order and secord-order phrase models\n bigram_review = bigram_model[unigram_review]\n trigram_review = trigram_model[bigram_review]\n \n # remove any remaining stopwords\n trigram_review = [term for term in trigram_review\n if not term in spacy.lang.en.STOP_WORDS]\n #print('bow:',trigram_review)\n \n # create a bag-of-words representation\n review_bow = sents_dict.doc2bow(trigram_review)\n \n \n # create an LDA representation\n lda = LdaMulticore.load(joinp(pilot_path, topic_model_file)) # my addition\n review_lda = lda[review_bow]\n \n \n # mine\n if topic_model_file=='lda_model_25':\n topic_names=topic_names_25\n elif topic_model_file=='lda_model_10':\n topic_names=topic_names_10\n #\n \n # sort with the most highly related topics first\n #review_lda = sorted(review_lda, key=lambda topic_number,freq: freq)\n listt=[]\n for topic_number, freq in review_lda:\n if freq < min_topic_freq:\n break\n \n # print the most highly related topic names and frequencies\n #print('{:10} {}'.format(topic_names[topic_number],round(freq, 3))) ## for now not putting yet topic names\n #print('{:25} {}'.format(topic_number,round(freq, 3))) \n x=[topic_number,topic_names[topic_number],np.round(freq, 3)]\n listt.append(x)\n return(listt)", "def run_lda(args, corpus, pre, dictionary=None, workers=None, docs=None, num_files=None):\n MALLET_PATH = os.environ.get(\"MALLET_PATH\", \"lda-tools/ext/mallet/bin/mallet\")\n if args.gensim:\n lda = gensim.models.wrappers.LdaMallet\n model = lda(MALLET_PATH, corpus, num_topics=args.num_topics,\n id2word=dictionary, optimize_interval=args.optimize_interval,\n workers=workers, iterations=args.num_iterations,\n prefix=pre)\n else:\n rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '-'\n prefix = os.path.join(tempfile.gettempdir(), rand_prefix)\n mallet_corpus = prefix + 'corpus'\n\n print('Generating topic model.')\n form = 'tsv' if args.tsv_corpus else \"text\"\n tsv_corpus = None\n if not args.tsv_corpus:\n os.makedirs(mallet_corpus)\n corpus.export(mallet_corpus, abstract=False, form=form)\n elif args.year_split != -1:\n year, lines = docs\n os.makedirs(mallet_corpus)\n tsv_corpus = os.path.join(mallet_corpus, str(year) + \"-tmp.tsv\")\n with open(tsv_corpus, 'w') as f:\n f.write(\"\\n\".join(lines))\n else:\n tsv_corpus = args.tsv_corpus\n\n mallet_corpus = None if args.tsv_corpus else mallet_corpus\n model = Mallet(MALLET_PATH, mallet_corpus, num_topics=args.num_topics,\n iters=args.num_iterations, bigrams=args.bigrams_only,\n topical_n_grams=args.topical_n_grams,\n remove_stopwords=(not args.topical_n_grams), prefix=pre,\n print_output=True, file=tsv_corpus, min_df=args.min_df,\n max_df=args.max_df, num_files=num_files)\n return model", "def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-8,\n lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):\n\n self.init_lda_post()\n # sum of counts in a doc\n total = sum(count for word_id, count in self.doc)\n\n model = \"DTM\"\n if model == \"DIM\":\n # if in DIM then we initialise some variables here\n pass\n\n lhood = self.compute_lda_lhood()\n lhood_old = 0\n converged = 0\n iter_ = 0\n\n # first iteration starts here\n iter_ += 1\n lhood_old = lhood\n self.gamma = self.update_gamma()\n\n model = \"DTM\"\n\n if model == \"DTM\" or sslm is None:\n self.phi, self.log_phi = self.update_phi(doc_number, time)\n elif model == \"DIM\" and sslm is not None:\n self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)\n\n lhood = self.compute_lda_lhood()\n converged = np.fabs((lhood_old - lhood) / (lhood_old * total))\n\n while converged > LDA_INFERENCE_CONVERGED and iter_ <= lda_inference_max_iter:\n\n iter_ += 1\n lhood_old = lhood\n self.gamma = self.update_gamma()\n model = \"DTM\"\n\n if model == \"DTM\" or sslm is None:\n self.phi, self.log_phi = self.update_phi(doc_number, time)\n elif model == \"DIM\" and sslm is not None:\n self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)\n\n lhood = self.compute_lda_lhood()\n converged = np.fabs((lhood_old - lhood) / (lhood_old * total))\n\n return lhood", "def model_topics(df):\n\n data = df.text.values.tolist()\n data_words = list(sent_to_words(data))\n\n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n\n # Remove Stop Words\n data_words_nostops = remove_stopwords(data_words)\n\n # Form Bigrams\n data_words_bigrams = make_bigrams(data_words_nostops,bigram_mod)\n\n # Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n nlp = spacy.load('en', disable=['parser', 'ner'])\n\n # Do lemmatization keeping only noun, adj, vb, adv\n data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_lemmatized)\n\n # Create Corpus\n texts = data_lemmatized\n\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n # Perform Topic Modeling for number of topics ranging from 5 to 50 in steps of 5\n model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=5, limit=50, step=5)\n\n return model_list,coherence_values,corpus,id2word", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df", "def topics_score_per_doc(lda_model, list_lemma):\n #Création d'un dictionnaire gensim\n array_lemma = np.array(list_lemma)\n dictionary = gensim.corpora.Dictionary(array_lemma)\n\n #Création d'un \"bag of words\" avec la fonction doc2bow\n bow_corpus = [dictionary.doc2bow(doc) for doc in array_lemma]\n\n for i in range(len(list_lemma)):\n print(\"\\nFor document {}\".format(i+1))\n for index, score in sorted(lda_model[bow_corpus[0]], key=lambda tup: -1*tup[1]):\n print(\"\\nScore: {}\\t \\nTopic: {}\".format(score, lda_model.print_topic(index, 10)))", "def compute_coherence_lda(dictionary, corpus, texts, limit, start=2, step=3):\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = gensim.models.ldamodel.LdaModel(\n corpus=corpus,\n random_state=0,\n num_topics=num_topics,\n id2word=dictionary,\n# minimum_probability=0.3, \n alpha = 'auto', \n eta = 'auto')\n model_list.append(model)\n coherencemodel = CoherenceModel(\n model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values", "def __init__(self,corpus,topic_number=10,iteration_number=1000,burn_in=500,update_cycle=100,alpha=None,beta=None):\n # documents, key: id of document, value: list of word in an specific document.\n self.documents = corpus.documents\n # number of iteration when using Gibbs Sampling.\n self.iteration_number = iteration_number\n self.topic_number = topic_number\n self.burn_in = burn_in\n self.update_cycle = update_cycle\n # number of terms.\n self.term_number = len(corpus.word_id)\n # number of documents.\n self.document_number = len(self.documents)\n # if alpha and beta is None, then assign values to them.\n if alpha == None:\n self.alpha = [2.0] * self.topic_number\n else:\n self.alpha = alpha\n if beta == None:\n self.beta = [0.5] * self.term_number\n else:\n self.beta = beta\n # The sum of elements in beta.\n self.sum_beta = sum(self.beta)\n # The sum of elements in alpha.\n self.sum_alpha = sum(self.alpha)\n # counter, [m][k] refers to the number of times that topic k has been observed with a word in document m.\n self.document_topic_count_matrix = {}\n # counter, [k][t] refers to the number of times that term t has been observed with topic k.\n self.topic_term_count_matrix = {}\n # distribution matrix, [m][k] refers the probability that assigning topic k to document m.\n self.document_distribution_over_topic = {}\n # distribution matrix, [k][t] refers the probability that assigning topic k to term t.\n self.topic_distribution_over_term = {}\n # counter, [m] refers the number of times that all topics have been observed with a word in document m.\n # also, [m] equals to the number of words in document m.\n self.sum_document_by_topic_count = {}\n # counter, [k] refers the number of times that all terms have been observed with topic k.\n self.sum_topic_by_term_count = {}\n # topic assigned to an word in a document. [m][n] refers to the topic that assigned to the n th word in document\n # m.\n self.word_topic_assignment = {}\n # the number of times that the distribution has been updated.\n self.update_number = 0.0", "def test_model(docs, labels,model, log_writer:LogWriter,test_name):\n stats = []\n topic_indexes, topics_of_index = connect_topic_id_to_topics(model,prep_docs_for_assesment(docs,labels),log_writer)\n distribution = []\n for index, article in enumerate(docs):\n analysis_res = model.analyse_text(article)\n if len(analysis_res) == 0:\n print(\"nothing found\")\n continue\n res = max(analysis_res, key=lambda item: item[1])\n if res[0] not in topics_of_index:\n topics_of_index[res[0]] = [labels[index]]\n topic_indexes[labels[index]] = res[0]\n print(\"continuing\")\n continue\n distribution.append(res[0])\n stats.append(1 if labels[index] in topics_of_index[res[0]] else 0)\n # self.log_writer.add_log(\"Article with topic {} was assigned {} with {} certainty.\".format(article[0], \"correctly\" if res[0] == self.topic_positions[article[0]] else \"wrong\", res[1]))\n accuracy = sum(stats) / len(stats)\n log_writer.add_log(\"{} got accuracy {}\".format(test_name,accuracy))\n log_writer.add_log(\"Real distribution was {}\".format(dict(Counter(labels))))\n log_writer.add_log(\"Predicted distribution was {}\".format(dict(Counter(distribution))))\n return accuracy", "def lda_predict_df(df, col_name, lda_model, dictionary, lda_topic_name_dict=None, only_best_prediction=True):\n# for index, score in sorted(LDAmodel_lang[bow_vector], key=lambda tup: -1*tup[1]):\n# print(\"Score: {}\\t Topic: {}\".format(score, lda_model.print_topic(index, 5)))\n cols = list(df.columns)\n df['bow'] = list(map(lambda doc: dictionary.doc2bow(doc), df[col_name]))\n if only_best_prediction:\n if lda_topic_name_dict is None:\n df['prediction'] = df['bow'].apply(PredictTopicFromBOW,lda_model=lda_model)\n df[['pred_probability','pred_index']] = pd.DataFrame(df.prediction.values.tolist(), index= df.index)\n else:\n df['prediction'] = df['bow'].apply(PredictTopicFromBOW,lda_model=lda_model, lda_topic_name_dict=lda_topic_name_dict)\n df[['pred_probability','pred_index','pred_label']] = pd.DataFrame(df.prediction.values.tolist(), index= df.index)\n df.drop(['prediction'], axis=1)\n else:\n num_topics = len(lda_model.get_topics())\n for i in range(num_topics):\n df[i] = df['bow'].apply(PredictTopicFromBOW,lda_model=lda_model, prediction_index=i)\n# df[['pred_probability','pred_index']] = pd.DataFrame(df.prediction.values.tolist(), index= df.index)\n\n # Unpivot values, and split predictions\n values = [i for i in range(num_topics)]\n df = pd.melt(df, id_vars=cols, value_vars=values)\n df = df[df['value'].isnull()==False].sort_values(by=[col_name])\n df.rename(columns={'variable':'index','value':'prediction'}, inplace=True)\n df[['pred_probability','pred_index']] = pd.DataFrame(df.prediction.values.tolist(), index=df.index)\n \n return df", "def maximization_step(self, number_of_topics):\n print(\"M step:\")\n #print(\"Maximization_step has started >>>>>\"+number_of_topics)\n \n \n\t\t \n for topicVal in range(0, number_of_topics):\n for wordVal in range(0, self.vocabulary_size):\n createdDataMat = 0\n for docVal in range(0,self.number_of_documents):\n createdDataMat = createdDataMat+ (self.term_doc_matrix[docVal][wordVal]*self.topic_prob[docVal][topicVal][wordVal])\n self.topic_word_prob[topicVal][wordVal] = createdDataMat\n self.topic_word_prob = normalize(self.topic_word_prob)\n for docVal in range(0,self.number_of_documents):\n for topicVal in range(0,number_of_topics):\n self.document_topic_prob[docVal][topicVal] = 0\n for wordVal in range(0, self.vocabulary_size):\n self.document_topic_prob[docVal][topicVal] = self.document_topic_prob[docVal][topicVal] + (self.term_doc_matrix[docVal][wordVal]*self.topic_prob[docVal][topicVal][wordVal])\n self.document_topic_prob = normalize(self.document_topic_prob)\n #print(\"Document Topic Problem >>> \"+self.document_topic_prob)", "def guide(self, doc_list=None):\r\n\r\n with pyro.plate(\"topics\", self.K) as k_vec:\r\n\r\n # Lambda => latent variable for the per-topic word q distribution\r\n Lamda = torch.stack([\r\n pyro.param(\r\n f\"lamda_q_{k}\",\r\n (1 + 0.01*(2*torch.rand(self.V)-1)),\r\n constraint=constraints.positive)\r\n for k in k_vec\r\n ])\r\n\r\n # Beta_q => per-topic word q distribtion\r\n Beta_q = pyro.sample(f\"beta\", dist.Dirichlet(Lamda))\r\n\r\n Theta_q = []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # gamma => q for the per-doc topic vector\r\n gamma = pyro.param(f\"gamma_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n\r\n # theta_q => posterior per-doc topic vector\r\n theta_q = pyro.sample(f\"theta_{d}\", dist.Dirichlet(gamma))\r\n\r\n phi = pyro.param(\r\n f\"phi_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive\r\n )\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]) as w_vec:\r\n\r\n phi = torch.stack([\r\n pyro.param(\r\n f\"phi_q_{d}_{w}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n for w in w_vec\r\n ])\r\n\r\n # assign a topic\r\n pyro.sample(f\"z_assignment_{d}\", dist.Categorical(phi))\r\n\r\n Theta_q.append(theta_q)\r\n\r\n Theta_q = torch.stack(Theta_q)\r\n\r\n return Beta_q, Theta_q", "def plsa(data_paths=[PATH_TO_RAW_DATA], topics_num=TOPICS_NUM, write_results_to=FILE_DEFAULT_PLSA_RESULTS):\n\t\n\timport plsa\n\timport glob\n\timport os\n\tcorpus = plsa.Corpus() # instantiate corpus\n\tdocument_paths = data_paths\n\tfor document_path in document_paths:\n\t for document_file in glob.glob(os.path.join(document_path, '*.txt')):\n\t document = plsa.Document(document_file) # instantiate document\n\t document.split(list_stopwords(lang='en')) # tokenize\n\t corpus.add_document(document) # push onto corpus documents list\n\n\tcorpus.build_vocabulary()\n\tcorpus.plsa(TOPICS_NUM, 1)\n\n\tV = len(corpus.vocabulary) \n\tassert(TOPICS_NUM < V)\n\tf = open(write_results_to, \"w\")\n\tfor k in range(TOPICS_NUM):\n\t word_prob = corpus.topic_word_prob[k, :]\n\t word_index_prob = []\n\t for i in range(V):\n\t word_index_prob.append([i, word_prob[i]])\n\t word_index_prob = sorted(word_index_prob, key=itemgetter(1), reverse=True) # sort by word count\n\t f.write(\"Topic #\" + str(k) + \":\\n\")\n\t for i in range(TOPICS_NUM):\n\t index = word_index_prob[i][0]\n\t f.write(corpus.vocabulary[index] + \" \")\n\t f.write(\"\\n\")\n\t \n\tf.close()", "def test_lda_topic_model_generator_dimensions( ):\n N = 1000\n D = 1000\n K = 10\n W = 100\n\n tm = LDATopicModel.generate( K, D, a0 = 15 )\n assert( tm.topics.shape == (D, K) )\n assert( tm.weights.shape == (K,) )\n assert( sc.allclose( tm.alphas.sum(), 15 ) )\n\n docs = tm.sample( N, words = W, n_views = 3 )\n # Each document is a row\n for v in docs:\n assert( v.shape == (N, D) )", "def compute_topic_model(year_from=1900, year_to=2020, venues_filter=None, n_topics=100, use_lemmer=True,\n min_df=2, max_df=0.8):\n start = time.time()\n out_fileprefix = get_output_fileprefix(year_from, year_to, venues_filter, n_topics)\n\n corpus, tf_features_names = get_corpus_gensim_for_learning(year_from, year_to, venues_filter, use_lemmer, min_df, max_df)\n execute_lda_gensim(corpus, tf_features_names, n_topics, out_fileprefix)\n\n end = time.time()\n return year_from, year_to, n_topics, (end - start)", "def train_lda(obs):\n print('Training LDA model...')\n lda = LatentDirichletAllocation(n_topics=42, max_iter=100, \n doc_topic_prior=0.0001,\n learning_method='online',\n learning_offset=50., \n topic_word_prior=0.001,\n random_state=0)\n lda.fit_transform(obs)\n pickle.dump(lda, open(\"ilda.data\", \"wb\" ))\n return lda", "def __find_topics(self, concepts):\n\n # Set up\n found_topics = dict() # to store the matched topics\n explanation = dict()\n\n # finding matches\n for concept in concepts:\n evgrams = everygrams(concept.split(), 1, 3) # list of unigrams, bigrams, trigrams\n for grams in evgrams:\n gram = \"_\".join(grams)\n gram_without_underscore = \" \".join(grams)\n #### Finding similar words contained in the model\n\n list_of_matched_topics = []\n\n if self.fast_classification:\n list_of_matched_topics = self.__get_similar_words_from_cached_model(gram,grams)\n else:\n list_of_matched_topics = self.__get_similar_words_from_full_model(gram, grams)\n\n\n for topic_item in list_of_matched_topics:\n\n topic = topic_item[\"topic\"]\n str_sim = topic_item[\"sim_t\"]\n wet = topic_item[\"wet\"]\n sim = topic_item[\"sim_w\"]\n\n\n if str_sim >= self.min_similarity and topic in self.cso.topics_wu:\n\n\n if topic in found_topics:\n #tracking this match\n found_topics[topic][\"times\"] += 1\n\n found_topics[topic][\"gram_similarity\"].append(sim)\n\n #tracking the matched gram\n if gram in found_topics[topic][\"grams\"]:\n found_topics[topic][\"grams\"][gram] += 1\n else:\n found_topics[topic][\"grams\"][gram] = 1\n\n #tracking the most similar gram to the topic\n if str_sim > found_topics[topic][\"embedding_similarity\"]:\n found_topics[topic][\"embedding_similarity\"] = str_sim\n found_topics[topic][\"embedding_matched\"] = wet\n\n else:\n #creating new topic in the result set\n found_topics[topic] = {'grams': {gram:1},\n 'embedding_matched': wet,\n 'embedding_similarity': str_sim,\n 'gram_similarity':[sim],\n 'times': 1,\n 'topic':topic}\n\n\n\n if sim == 1:\n found_topics[topic][\"syntactic\"] = True\n\n\n\n primary_label_topic = self.cso.get_primary_label_wu(topic)\n if primary_label_topic not in explanation:\n explanation[primary_label_topic] = set()\n\n explanation[primary_label_topic].add(gram_without_underscore)\n\n return found_topics, explanation", "def process_topics(self):\n self._init_lda()\n f = open(self.OUTPUT_PATH, \"w\")\n for link in self.electrical_links:\n try:\n self.logger.info(\"processing: {0}\".format(link))\n page = wikipedia.page(link)\n title = gensim.parsing.preprocess_string(page.title)\n content = gensim.parsing.preprocess_string(page.content)\n\n title_bow = self.dictionary.doc2bow(title)\n content_bow = self.dictionary.doc2bow(content)\n\n new_bag_of_words = title_bow + content_bow\n self.lda.update([content_bow])\n topics = self.get_sorted_topics(new_bag_of_words)\n f.write(\"{0}:: {1}\\n\".format(link, topics))\n except UnicodeError:\n self.logger.info(\"PROCESSING FAILED!\")\n continue\n\n f.close()\n self.lda.save(self.MODEL_PATH)\n return True", "def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3, mallet_lda = False):\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n print(\"Trying LDA model with\",num_topics,\"topics.\")\n mallet_path = '../mallet-2.0.8/bin/mallet' # update this path\n if mallet_lda:\n model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=dictionary)\n else:\n model = gensim.models.ldamodel.LdaModel(corpus=corpus,id2word=dictionary,num_topics=num_topics,\n random_state=100,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='auto',\n per_word_topics=True)\n\n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values", "def knn(X,Y):\n \n # Transform all X data by PCA. Note that PCA was fit on the testing set as well as training.\n pca = PCA(n_components=100)\n X_r = pca.fit(X).transform(X)\n \n # Transform all X data by LDA. Same problem as above.\n lda = LDA()\n X_r2 = lda.fit(X, Y).transform(X)\n \n # Vary k.\n for k in [1,2,4,8,16,32, 64, 128, 256, 512]:\n \n # Training set was fixed at first 2000 vectors. This was for a smaller dataset at the time\n \n # No feature extraction\n knn = neighbors.KNeighborsClassifier(k)\n knn.fit(X[:2000], Y[:2000])\n \n # PCA\n knn2 = neighbors.KNeighborsClassifier(k)\n knn2.fit(X_r[:2000], Y[:2000])\n \n # LDA\n knn3 = neighbors.KNeighborsClassifier(k)\n knn3.fit(X_r2[:2000], Y[:2000])\n \n #Prediction results. Rather ugly way to code this looking back.\n predict = []\n predict2 = []\n predict3 = []\n for i in range(2000, len(X)):\n predict += [ knn.predict(X[i]) == Y[i] ]\n predict2 += [ knn2.predict(X_r[i]) == Y[i] ]\n predict3 += [ knn3.predict(X_r2[i]) == Y[i] ]\n \n \n # Plot accuracy. R= no feature extraction, G= PCA, B= LDA \n pylab.scatter(k, float(sum(predict))/len(predict), c='r')\n pylab.scatter(k, float(sum(predict2))/len(predict2), c='g')\n pylab.scatter(k, float(sum(predict3))/len(predict3), c='b')", "def __init__(self, num_topics, corpus, stop_words, alpha=None, eta=0.1, max_iteration=10):\n if alpha:\n self.alpha = alpha\n else:\n self.alpha = float(50 / num_topics)\n self.eta = eta\n self.K = num_topics\n self._corpus = corpus\n self.max_iteration = max_iteration\n self.word2id = {}\n self.id2word = {}\n self.document = []\n index = 0\n for doc in corpus:\n word_count = {}\n temp_doc = []\n for word in doc:\n word = word.lower()\n if word not in stop_words and len(word) > 1 and not re.search(r'[0-9]', word):\n temp_doc.append(word)\n if word not in self.word2id.keys():\n self.word2id[word] = index\n self.id2word[index] = word\n index += 1\n if word in word_count.keys():\n word_count[word] += 1\n else:\n word_count[word] = 1\n self.document.append(temp_doc)\n # number of docs\n self.M = len(self._corpus)\n # number of words\n self.N = len(self.word2id)\n self.doc_topic_matrix = np.zeros([self.M, self.K], dtype=np.int8)\n self.topic_word_matrix = np.zeros([self.K, self.N], dtype=np.int8)\n self.topic_matrix = np.zeros(self.K, dtype=np.int8)\n self.current_word_topic_matrix = []", "def try_latent_topics_intro_model(k):\n highest_f1 = 0\n print \"start time: {}\".format(datetime.now())\n print \"using {} latent topics\".format(k)\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = topic_features\n X_train, y_train = prep.subset(features)\n print \"regular data prep complete\"\n print topic_features\n\n\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n\n mc = ModelChooser([rf, gb])\n mc.fit_predict(X_train, y_train)\n mc.print_results()\n\n for i, score in enumerate(mc.f1_scores):\n if score > highest_f1:\n highest_f1 = score\n best_n_latent_features = k\n if i == 0:\n best_model_type = \"Random Forest\"\n else:\n best_model_type = \"Gradient Booster\"\n\n\n print \"end time: {}\".format(datetime.now())\n print \"-\"*10\n results = \"f1 score was {} with {} latent features on {} model\".format(highest_f1, best_n_latent_features, best_model_type)\n print results\n return results", "def predict(self, X) -> List[str]:\n # Get docID of nearest neighbours\n nn = self.vsm.search(X, limit=self.k)\n\n # Create list of concatenation of all topics, including duplicates\n topics = []\n for docID in nn:\n index = self.docIDs_train[self.docIDs_train == docID].index[0]\n topics += self.Y_train.iloc[index]\n\n # Assign prediction as most common topics that make up at least 50% of the topic labels\n n = len(topics)\n total_prob = 0\n results = []\n topics = Counter(topics).most_common()\n for (topic, count) in topics:\n results.append(topic)\n total_prob += count / n\n if total_prob > 0.5:\n break\n\n return results", "def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics", "def topic(df, num_topics=5):\r\n# X, y = df[df.columns[:-1]], df[df.columns[-1]]\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)", "def fit(self, X, f=None):\n \n perplexity = float(\"inf\")\n K = self.K # number of topics\n alpha = self.alpha\n M, V = X.shape\n\n nr_terms = X.sum(axis=1)\n nr_terms = np.array(nr_terms).squeeze()\n\n # model parameters\n beta = np.random.rand(K, V)\n \n # multimodal model parameters\n if f is not None: \n S = len(np.unique(f))\n eta = np.random.rand(K, S)\n \n # initialize the parallel processing pool\n par = Parallel(n_jobs=self.n_jobs, backend=\"multiprocessing\")\n\n # slice the documents for multiprocessing\n slices = get_slices(M, self.n_jobs)\n perplexities = []\n\n for epoch in xrange(self.nr_em_epochs):\n log_w = 0.\n \n # TODO: calculate bound function and check EM convergence \n # E-step\n print \"Epoch:\", epoch\n\n # initialize variables\n gamma = np.zeros((K, M)) + alpha + (nr_terms/float(K)) # mth document, i th topic\n beta_acc = np.zeros((K, V))\n \n\n # work on each slice in parallel\n if f is not None:\n eta_acc = np.zeros((K, S))\n res = par(delayed(_slice_doc_update)(X, gamma, beta, alpha, slice, eta, f) for slice in slices)\n else:\n res = par(delayed(_slice_doc_update)(X, gamma, beta, alpha, slice) for slice in slices)\n \n # do things in series - for profiling purposes\n # res = [_slice_doc_update(X, gamma, beta, alpha, slice) for slice in slices]\n\n # sync barrier\n for ix, r in enumerate(res):\n gamma[:, slices[ix]] = r[1] # update gammas\n beta_acc += r[0] # update betas\n log_w += r[3]\n if f is not None:\n eta_acc += r[4]\n\n # M-step\n beta = self._m_step(beta_acc)\n if f is not None:\n eta = self._m_step(eta_acc)\n\n # quality - p(w) is the normalizing constant of the posterior\n # and it is intractable - bound gives an estimate\n perplexity = self._perplexity(X, log_w)\n perplexities.append(perplexity)\n print \"Perplexity:\", perplexity\n\n return_tuple = (perplexities[1:], beta, gamma)\n if f is not None:\n return_tuple += (eta,)\n \n return return_tuple # the parameters learned", "def get_papers_per_topic(topic_model, topic_id_to_consider, year_from, year_to, papers_to_cite=None, debug=False,\n out=sys.stdout, tf_matrix_dump_filename=None):\n\n # get papers from mongo that match the filters\n if debug: out.write('Extract papers...\\n')\n query = {'year': {'$gte': year_from, '$lte': year_to}, 'cleaned_venue': {'$in': sv.considered_venues},\n 'acm_id': {'$ne': None},\n '$and': [{'abstract': {'$ne': None}}, {'abstract': {'$ne': ''}}]}\n select = {'authors': 1, 'acm_id': 1, 'citations': 1, 'title': 1, 'abstract': 1}\n\n if papers_to_cite == []:\n out.write('[WARNING] The list of papers_to_cite is empty. The query will be empty!\\n')\n\n if papers_to_cite is not None:\n query['citations'] = {'$in': papers_to_cite}\n\n papers = papers_collection.find(query, select)\n papers = [p for p in papers]\n\n if debug: out.write('Extracted {0} papers.\\n'.format(len(papers)))\n\n # preprocess the title + abstract (!! keep attention on min_df and max_df parameters,\n # should be 2 and 0.9 respectively in case of a small amount of papers)\n abstracts = [p['title'] + ' ' + p['abstract'] for p in papers]\n\n if debug: out.write('Compute topics assignments...\\n')\n\n # it's important to update the model dictionary in case of new words in unseen documents\n all_topics_assignments = compute_topic_assignment(topic_model, abstracts,\n tf_matrix_dump_filename=tf_matrix_dump_filename)\n\n # print_topic_assignment(all_topics_assignments, topic_model)\n\n if topic_id_to_consider is not None:\n if debug: out.write('Search for papers that are related to topic #{0}...\\n'.format(topic_id_to_consider))\n\n topic_related_papers = []\n index = 0\n\n for topic_assignment in all_topics_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index == topic_id_to_consider:\n topic_related_papers.append(papers[index])\n index += 1\n\n if debug: out.write('Found {0} papers.\\n'.format(len(topic_related_papers)))\n\n else:\n if debug: out.write('Count papers assigned to each topic...\\n')\n topic_related_papers = get_paper_counter_per_topic_id(all_topics_assignments)\n\n return topic_related_papers", "def useLDAmodel(self, kinetics, pos, model, up, down ):\n\n print \"From use LDA model.\\n\"\n\n res = np.zeros((up + down + 1, 6))\n ind = 0\n\n # range from -down to +up\n for offset in range(-down, (up + 1)):\n a = pos + offset\n\n std = kinetics[a][\"tErr\"] * sqrt( kinetics[a][\"coverage\"] )\n mErr = 0.01 + 0.03 * kinetics[a][\"modelPrediction\"] + 0.06 * kinetics[a][\"modelPrediction\"] ** 2\n den = sqrt( mErr **2 + std **2 )\n t0 = ( kinetics[a][\"tMean\"] - kinetics[a][\"modelPrediction\"] ) / den\n\n res[ind, ] = [kinetics[a][\"tMean\"], kinetics[a][\"modelPrediction\"], std, np.exp( t0 ) - 0.01, kinetics[a][\"ipdRatio\"], den]\n ind += 1\n\n predictors = np.hstack(np.log(res + 0.01).transpose())\n tmp = sum( np.multiply( predictors, model[1:] )) + model[0]\n\n return tmp", "def __init__(self, topics_corpus_fname, k = 5):\n \n with open(topics_corpus_fname, 'rb') as topics_corpus_file:\n self.topics_corpus = pickle.load(topics_corpus_file)\n \n self.k = k\n # Add all articles from spectrum viewpoints as training data\n self.X = np.vstack([topics for _, _, topics in self.topics_corpus[1:]])\n # Labels are -1 for spectrum viewpoint 1, 1 for spectrum viewpoint 2\n self.y = [-1] * len(self.topics_corpus[1][1]) + \\\n [1] * len(self.topics_corpus[2][1])\n \n # Create index of lang, title pairs so that we can tell which articles\n # KNN model is returning\n self.lang_title_index = []\n for lang, titles, _ in self.topics_corpus[1:]:\n for title in titles:\n self.lang_title_index.append((lang, title))\n \n # Map from target lang titles to topic distributions\n self.target_lang_topics = {}\n for row, title in enumerate(self.topics_corpus[0][1]):\n self.target_lang_topics[title] = self.topics_corpus[0][2][row]\n \n self.fit()", "def inference(id2word=None, bigrams=None, lda_model=None, num_topics=30):\n \n if not id2word:\n id2word = corpora.Dictionary.load(UNIGRAM_FILE)\n \n if not bigrams:\n bigrams = Phrases.load(BIGRAM_FILE)\n \n if not lda_model:\n path = pathlib.Path(f\"{SAVING_DIR}/lda_topic_40\") # there are also other models\n path = path / \"lda.model\"\n lda_model = LdaModel.load(str(path))\n\n\n data = utils.read_text_file(\"test.txt\")\n list_of_tokens, _ = preprocess([data], bigrams)\n text2bow = [id2word.doc2bow(text) for text in list_of_tokens]\n\n utils.plot_document_dist(lda_model, text2bow, num_topics)", "def train(self, documents):\n prior_log_prob, label_to_col = self.get_prior_log_probabilities(documents)\n self.my_model[\"vocabulary\"] = make_vocabulary(documents)\n\n # find frequencies of features\n num_classes = len(label_to_col)\n num_features = len(self.extract_f_vector(documents[0]))\n features_freq = np.zeros((num_features, num_classes))\n for doc in documents:\n f_vector = self.extract_f_vector(doc)\n col_for_f_vector = label_to_col[doc.label]\n features_freq[:, col_for_f_vector] += f_vector\n\n # laplace smoothing\n total_per_label = np.sum(features_freq, axis=0)\n features_freq += np.ones(total_per_label.shape, int)\n normalizer = total_per_label + np.full(total_per_label.shape, num_features, int)\n features_freq /= normalizer\n\n # stack all probabilities to one matrix and take log\n # result: self.all_log_prob\n # |-----------------------------------|\n # | log P(f1|C1) | ... | log P(f1|Cn) |\n # | log P(f2|C1) | ... | log P(f2|Cn) |\n # | . | . | . |\n # | . | . | . |\n # | . | . | . |\n # | log P(fm|C1) | ... | log P(fm|Cn) |\n # | log P(C1) | ... | log P(Cn) |\n # |-----------------------------------|\n likelihood_log_prob = np.log(features_freq)\n all_log_prob = np.vstack((likelihood_log_prob, prior_log_prob))\n self.my_model[\"all_log_prob\"] = all_log_prob", "def main():\n if len(sys.argv) != 5:\n exit(\"Usage: python oneta.py train-corpus test-corpus kernel-size output\")\n\n w1Words = dict()\n w2Words = dict()\n W1 = 0\n W2 = 0\n\n D1 = int(sys.argv[3])\n\n sys.stderr.write(\"First scan of training data\\n\")\n \n J = 0\n # Read through the corpus to decided which words are in the dense set and which in the sparse set\n corpus = open(sys.argv[1],\"r\")\n for line in corpus:\n tokens = word_tokenize(line)\n for token in tokens:\n tk_decoded = token.decode(\"utf-8\")\n if J < D1 and tk_decoded not in w1Words:\n w1Words[tk_decoded] = W1\n W1 += 1\n elif J >= D1 and tk_decoded not in w2Words:\n w2Words[tk_decoded] = W2\n W2 += 1\n J += 1\n corpus.close()\n\n D2 = J - D1\n\n # Partition the corpus into a L-shaped matrix\n sys.stderr.write(\"Building matrices\")\n At = lil_matrix((D1,W1))\n B = lil_matrix((W1,D2))\n Ct = lil_matrix((D2,W2))\n\n corpus = open(sys.argv[1],\"r\")\n\n j = 0\n for line in corpus:\n sys.stderr.write(\".\")\n tokens = word_tokenize(line)\n docsq = 0.\n for token in tokens:\n tk_decoded = token.decode(\"utf-8\")\n if j < D1: # tk_decoded in w1words\n tkId = w1Words[tk_decoded]\n docsq += (At[j,tkId]+1)**2 - (At[j,tkId])**2\n At[j,tkId] += 1.\n elif tk_decoded in w1Words:\n tkId = w1Words[tk_decoded]\n docsq += (B[tkId,j-D1]+1)**2 - (B[tkId,j-D1])**2\n B[tkId,j-D1] += 1.\n else:\n tkId = w2Words[tk_decoded]\n docsq += (Ct[j-D1,tkId]+1)**2 - (Ct[j-D1,tkId])**2\n Ct[j-D1,tkId] += 1.\n if j < D1:\n At[j,:] /= math.sqrt(docsq)\n else:\n for w in range(0,W1):\n B[w,j-D1] /= math.sqrt(docsq)\n Ct[j-D1,:] /= math.sqrt(docsq)\n j += 1\n\n sys.stderr.write(\"\\nBuild Cn\\n\")\n Cn = zeros((D2,1))\n Ct = Ct.tocsr()\n for i in range(0,D2):\n v = ((Ct[i,:] * Ct[i,:].transpose())[0,0])\n if v == 0:\n Cn[i,0] = 1.\n else:\n Cn[i,0] = v\n\n # Building real matrices\n sys.stderr.write(\"Calculating ATA\\n\")\n ATA = (At * At.transpose()).todense() # D1 x D1\n At = At.tocsr()\n B = B.tocsc()\n\n sys.stderr.write(\"Solve inverse\\n\")\n ATAi = linalg.inv(ATA)\n\n # The real calculation is that if we have input vector [ d_1 d_2 ] ^ T \n # We yield [ (A^T * A)^-1 * A^T ( d1^T - B * (C^T * d2 / Cn) ) (C^T * d2 / Cn)\n sys.stderr.write(\"Calculating projected vectors\\n\")\n\n out = open(sys.argv[4],\"w\")\n testDocs = open(sys.argv[2],\"r\")\n for testDoc in testDocs:\n sys.stderr.write(\".\")\n corpus = open(sys.argv[1],\"r\")\n d1 = zeros((W1,1))\n d2 = zeros((W2,1))\n tokens = word_tokenize(testDoc)\n for token in tokens:\n tk_decoded = token.decode(\"utf-8\")\n if tk_decoded in w1Words:\n d1[w1Words[tk_decoded],0] += 1\n elif tk_decoded in w2Words:\n d2[w2Words[tk_decoded],0] += 1\n norm = sqrt(sum(d1**2) + sum(d2**2))\n d1 /= norm \n d2 /= norm\n v2 = (Ct * d2) / Cn\n v1 = ATAi * (At * (d1 - B * v2))\n for j in range(0,D1+D2):\n out.write(str(j) + \" \")\n out.write(\"||| \")\n for j in range(0,D1):\n out.write(str(v1[j,0]) + \" \")\n for j in range(0,D2):\n out.write(str(v2[j,0]) + \" \")\n out.write(\"\\n\")\n\n out.flush()\n out.close()\n sys.stderr.write(\"\\n\")", "def _initialize(self):\n self.VT = len(self.corpus.topicDictionary)\n self.VO = len(self.corpus.opinionDictionary)\n self.DT = len(self.corpus)\n self.DO = np.array([len(p.opinionCorpus)\n for p in self.corpus.perspectives], dtype=np.int)\n self.maxDocLengthT = max([p.topicCorpus.maxDocLength\n for p in self.corpus.perspectives])\n self.maxDocLengthO = np.array([p.opinionCorpus.maxDocLength\n for p in self.corpus.perspectives],\n dtype=np.int)\n\n # topics\n self.z = np.zeros((self.DT, self.maxDocLengthT), dtype=np.int)\n self.ndk = np.zeros((self.DT, self.nTopics), dtype=np.int)\n self.nkw = np.zeros((self.nTopics, self.VT), dtype=np.int)\n self.nk = np.zeros(self.nTopics, dtype=np.int)\n self.ntd = np.zeros(self.DT, dtype=np.float)\n\n # opinions\n self.x = np.array([np.zeros((self.DO[i], self.maxDocLengthO[i]),\n dtype=np.int)\n for i, p in enumerate(self.corpus.perspectives)])\n self.nrs = np.zeros((self.nPerspectives, self.nTopics, self.VO),\n dtype=np.int)\n self.ns = np.zeros((self.nPerspectives, self.nTopics), dtype=np.int)\n\n # loop over the words in the corpus\n for d, persp, d_p, doc in self.corpus:\n for w_id, i in self.corpus.words_in_document(doc, 'topic'):\n topic = np.random.randint(0, self.nTopics)\n self.z[d, i] = topic\n self.ndk[d, topic] += 1\n self.nkw[topic, w_id] += 1\n self.nk[topic] += 1\n self.ntd[d] += 1\n\n for w_id, i in self.corpus.words_in_document(doc, 'opinion'):\n opinion = np.random.randint(0, self.nTopics)\n self.x[persp][d_p, i] = opinion\n self.nrs[persp, opinion, w_id] += 1\n self.ns[persp, opinion] += 1\n logger.debug('Finished initialization.')", "def get_topic_quality():\n model.eval() \n with torch.no_grad():\n alpha = model.mu_q_alpha\n beta = model.get_beta(alpha) \n print('beta: ', beta.size())\n\n print('\\n')\n print('#'*100)\n print('Get topic diversity...')\n num_tops = 25\n\n TD_all = _diversity_helper(beta, num_tops) \n \n TD = np.mean(TD_all)\n print('Topic Diversity is: {}'.format(TD))\n\n print('\\n')\n print('Get topic coherence...')\n print('train_tokens: ', train_tokens[0])\n \n TC_all, cnt_all = get_topic_coherence(beta.cpu().detach().numpy(), train_tokens, vocab)\n\n TC_all = torch.tensor(TC_all)\n cnt_all = torch.tensor(cnt_all)\n TC_all = TC_all / cnt_all\n TC_all[TC_all<0] = 0\n\n TC = TC_all.mean().item()\n print('Topic Coherence is: ', TC)\n print('\\n')\n\n print('Get topic quality...')\n TQ = TC * TD\n print('Topic Quality is: {}'.format(TQ))\n print('#'*100)\n\n return TQ, TC, TD", "def evaluate(m, source, tc=False, td=False):\n\n m.eval()\n with torch.no_grad():\n if source == 'val':\n indices = torch.split(torch.tensor(range(args.num_docs_valid)), args.eval_batch_size)\n tokens = valid_tokens\n counts = valid_counts\n else: \n indices = torch.split(torch.tensor(range(args.num_docs_test)), args.eval_batch_size)\n tokens = test_tokens\n counts = test_counts\n\n\n ### do dc and tc here\n acc_loss = 0\n cnt = 0\n\n # Get parameter given econ variables \n theta = model.get_theta(econ_test)\n beta = model.get_beta()\n\n \n # Get prediction loss given the text \n normalized_data_batch, data_batch = batch(vocab_size, args.num_docs_test, test_tokens, test_counts, device)\n \n sums_2 = data_batch.sum(1).unsqueeze(1)\n res = torch.mm(theta, beta)\n preds = torch.log(res)\n recon_loss = -(preds * data_batch).sum(1)\n \n loss = recon_loss / sums_2.squeeze()\n loss = loss.mean().item()\n acc_loss += loss\n cnt += 1\n \n \n cur_loss = acc_loss / cnt\n ppl_dc = round(math.exp(cur_loss), 1)\n print('*'*100)\n print('{} Doc Completion PPL: {}'.format(source.upper(), ppl_dc))\n print('*'*100)\n if tc or td:\n beta = beta.data.cpu().numpy()\n if tc:\n print('Computing topic coherence...')\n get_topic_coherence(beta, train_tokens, vocab)\n if td:\n print('Computing topic diversity...')\n get_topic_diversity(beta, 25)\n return ppl_dc", "def show_topic_model_textually(seed_gensim_topic_model, seed_gensim_corpus,\n texts_to_analyze, num_topics):\n print(\"alpha =\", seed_gensim_topic_model.alpha)\n print(seed_gensim_topic_model)\n print(seed_gensim_topic_model.print_topics(num_topics))\n print()", "def explore_topic(topic_number, topn=25, model=10):\n #\n if model==25:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_25'))\n topicname=topic_names_25[topic_number]\n gensimSTR=''\n elif model==15:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_15'))\n topicname=topic_names_15[topic_number]\n gensimSTR=''\n elif model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)\n ##", "def fit(self, corpus, **kwargs):\n if not len(corpus.dictionary):\n return None\n self.reset_model(corpus)\n self.running = True\n self.update(corpus.ngrams_corpus, **kwargs)\n self.topic_names = ['Topic{} ({})'.format(i, ', '.join(words))\n for i, words in enumerate(self._topics_words(3), 1)]\n self.running = False", "def table(X,Y):\n \n # Split training/testing\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y)\n \n # Fit and transform with LDA\n lda = LDA().fit(X_train, Y_train)\n X_train_lda = lda.transform(X_train)\n X_test_lda = lda.transform(X_test)\n \n # Change this list depending on which classifiers you want to compare.\n clfs = [\n (neighbors.KNeighborsClassifier(n_neighbors=50, warn_on_equidistant=False), \"k-NN50\"),\n (svm.SVC(kernel=\"linear\"), \"SVM\"),\n #(ensemble.AdaBoostClassifier(n_estimators=5), \"AdaBoost - 5\"),\n #(ensemble.AdaBoostClassifier(n_estimators=10), \"AdaBoost - 10\"),\n #(ensemble.AdaBoostClassifier(n_estimators=25), \"AdaBoost - 25\"),\n (ensemble.AdaBoostClassifier(n_estimators=50), \"AdaBoost - 50\"),\n #(ensemble.AdaBoostClassifier(n_estimators=100), \"AdaBoost - 100\"),\n #(ensemble.AdaBoostClassifier(n_estimators=500), \"AdaBoost - 500\"),\n ]\n \n # First element is classifier, second is name\n for clf, name in clfs: \n \n # With LDA then without\n for isLDA in [True, False]:\n \n #Switch training and testing X depending\n test = X_test_lda if isLDA else X_test\n train = X_train_lda if isLDA else X_train\n \n # Fit the classifier\n model = clf.fit(train, Y_train) \n\n # Get predictions\n train_result = model.prediction(train) == Y_train\n test_result = model.predictions(test) == Y_test\n \n # Print results\n print name, sum(train_result)*1./len(train_result), sum(test_result)*1./len(test_result)", "def test_model(self, model, test_name):\n statistics = []\n stats = []\n for item in model.get_topics():\n statistics.append(item)\n statistics.append([\"Article topic\", \"Model topic index\"])\n self.connect_topic_id_to_topics(model)\n\n for article in self.testing_docs:\n analysis_res = model.analyse_text(article[1])\n if len(analysis_res) == 0:\n print(\"nothing found\")\n continue\n res = max(analysis_res, key=lambda item: item[1])\n statistics.append([article[0], res[0]])\n if res[0] not in self.topics_of_index:\n self.topics_of_index[res[0]] = [article[0]]\n self.topic_indexes[article[0]] = res[0]\n print(\"continuing\")\n continue\n\n stats.append(1 if article[0] in self.topics_of_index[res[0]] else 0)\n topic_number_index = self.topic_numbers.index(article[0])\n\n if article[0] in self.topics_of_index[res[0]]:\n guessed_topic_number_index = self.topic_numbers.index(article[0])\n else:\n guessed_topic_number_index = self.topic_numbers.index(self.topics_of_index[res[0]][0])\n self.confusion_matrix[guessed_topic_number_index][topic_number_index] += 1\n self.confusion_matrix_true[res[0]][topic_number_index] += 1\n #self.log_writer.add_log(\"Article with topic {} was assigned {} with {} certainty.\".format(article[0], \"correctly\" if res[0] == self.topic_positions[article[0]] else \"wrong\", res[1]))\n\n self.log_writer.write_2D_list(test_name, statistics)\n self.add_descriptions_to_confusion_matrix()\n self.log_writer.write_2D_list(test_name+\"\\\\confusion-matrix\", self.confusion_matrix)\n self.log_writer.write_2D_list(test_name+\"\\\\confusion-matrix-true\", self.confusion_matrix_true)\n return sum(stats)/len(stats)", "def perplexity(self):\n\n\t\treturn samplers_lda.perplexity_comp(self.docid,self.tokens,self.tt,self.dt,self.N,self.K,self.samples)", "def perplexity(self):\n\n\t\treturn samplers_lda.perplexity_comp(self.docid,self.tokens,self.tt,self.dt,self.N,self.K,self.samples)", "def score_intro_model():\n k = 100\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n mc = ModelChooser([model])\n dp = DataPrep(training=False)\n dp.prepare(n_components=k, use_cached_nmf='/home/ubuntu/ca_bills_project/data/extra/nmf_100_05-23-17-08-23.pkl',\n use_cached_tfidf=\"/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl\", cache_tfidf=True, test=True)\n X_test, y_test = dp.subset(features)\n\n\n mc.score(X_test, y_test)", "def main():\n vocab = str.split(file(sys.argv[1]).read())\n testlambda = numpy.loadtxt(sys.argv[2])\n testlambda = topN(testlambda, int(sys.argv[3]))\n words_per_topic = 20\n\n for k in range(0, len(testlambda)):\n lambdak = list(testlambda[k, :])\n lambdak = lambdak / sum(lambdak)\n temp = zip(lambdak, range(0, len(lambdak)))\n temp = sorted(temp, key=lambda x: x[0], reverse=True)\n\n print 'topic %d:' % (k)\n # feel free to change the \"53\" here to whatever fits your screen nicely.\n for i in range(0, words_per_topic):\n print '%s:%.4f' % (vocab[temp[i][1]], temp[i][0])\n print", "def word_to_perplexity(model, timeseries, indices, words, args):\n accum = 0\n words_len = len(words)-args.window_size\n batches = math.floor(words_len / args.batch_size)\n print(batches)\n for start in range(0, batches):\n idx = start*args.batch_size\n inp = np.array([timeseries[i:i+args.window_size] for i in range(idx, idx+args.batch_size)])\n label = np.asarray([indices[x] for x in words[idx+args.window_size:idx+args.window_size+args.batch_size]]) \n \n pred = model.predict(inp, batch_size=128)\n lp = np.log(pred)\n for i, ent in enumerate(lp):\n accum += ent[label[i]]\n if start % 5 == 0:\n print(\"{} / {}. Perplexity so far: {}\".format(start, batches, np.exp(-accum / (start*args.batch_size+1))))\n accum = -accum\n print(accum)\n avg = accum / words_len \n print(avg)\n perplex = np.power(avg, 2)\n print(perplex)", "def _evaluate(self):\n coherence = gensim.models.coherencemodel.CoherenceModel(model=self.ldamodel,\n corpus=self.gensim_corpus,\n dictionary=self.ldamodel.id2word,\n coherence='u_mass')\n self.score = coherence.get_coherence()\n if self.verbose:\n print('LDA achieved a coherence (u_mass) of: ', self.score)", "def fit(self, pnos, texts = None, from_loaded = False):\n self.pnos = pnos\n assert((texts is not None) or from_loaded)\n if texts is not None:\n self._process_texts(texts)\n else:\n assert(self.has_vocab and self.has_corpus)\n self._lda_model = ldamodel.LdaModel(\n corpus=self.corpus, \n id2word=self.vocab,\n num_topics=self.K\n\n )\n self.is_trained = True\n _ = self.parse_topics()", "def fit_lda_seq_topics(self, topic_suffstats):\n lhood = 0\n\n for k, chain in enumerate(self.topic_chains):\n logger.info(\"Fitting topic number %i\", k)\n lhood_term = sslm.fit_sslm(chain, topic_suffstats[k])\n lhood += lhood_term\n\n return lhood", "def compute_coherence_values(dictionary, corpus, texts, limit, start=5, step=5):\n coherence_values = []\n model_list = []\n pdb.set_trace()\n for num_topics in range(start, limit, step):\n start=time.time()\n model = gensim.models.ldamulticore.LdaMulticore(corpus=corpus,\n id2word=dictionary,\n num_topics=num_topics, \n random_state=100,\n chunksize=10000,\n passes=1,\n per_word_topics=True)\n \n print(f'Topic modeling for {num_topics} topics took {time.time()-start} seconds.')\n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values", "def fit_lda(X, vocab):\n print('fitting lda...')\n return LdaModel(matutils.Sparse2Corpus(X, documents_columns=False), num_topics=100, passes=1, iterations=500,\n chunksize=1000, update_every=1, id2word=dict([(i, s) for i, s in enumerate(vocab)]))", "def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=id2word, workers = 2, random_seed = 256)\n model_list.append(model)\n model.save(\"LDA_withmallet_num_topics/ldamodel_num_{}.{}\".format(num_topics,'lda'))\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values", "def evaluate_topic_models(data, varying_parameters, constant_parameters=None, n_max_processes=None, return_models=False,\n metric=None, **metric_kwargs):\n mp_eval = MultiprocEvaluationRunner(MultiprocEvaluationWorkerLDA, AVAILABLE_METRICS, data,\n varying_parameters, constant_parameters,\n metric=metric, metric_options=metric_kwargs,\n n_max_processes=n_max_processes, return_models=return_models)\n\n return mp_eval.run()", "def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)", "def create_topic_model(seed_arguments, list_responses):\n topic_model_dictionary, texts_to_analyze = create_topic_model_dictionary(\n list_responses)\n # convert tokenized documents into a document-term matrix, or the corpus\n topic_model_corpus = [\n topic_model_dictionary.doc2bow(text) for text in texts_to_analyze\n ]\n # generate LDA model from the texts_to_analyze and the topic_model_dictionary\n lda_model = gensim.models.ldamodel.LdaModel(\n topic_model_corpus,\n id2word=topic_model_dictionary,\n num_topics=seed_arguments.num_topics,\n passes=seed_arguments.num_passes,\n alpha=seed_arguments.alpha,\n eta=seed_arguments.eta)\n return lda_model, topic_model_corpus, topic_model_dictionary, texts_to_analyze", "def guess_topic(lda, query, features_vec, irrelevant, verbose=True):\n query_doc = []\n doc_topic = []\n topic_most_pr = None\n if isinstance(query,str):\n query = clean(query)\n query = n_grammize(query)\n for term in query:\n weight = set_weight(term, irrelevant)\n if term in features_vec:\n query_doc.append(weight * array(features_vec[term]))\n elif isinstance(query,tuple):\n if query in features_vec:\n weight = set_weight(query, irrelevant)\n query_doc.append(weight * array(features_vec[query]))\n elif isinstance(query,list):\n for term in query:\n weight = set_weight(term, irrelevant)\n if term in features_vec:\n query_doc.append(weight * array(features_vec[term]))\n X = array(query_doc)\n if len(X)==1:\n X = X.reshape(1,-1)\n if len(X)==0:\n return topic_most_pr\n doc_topic = lda.transform(X)\n sum_topics = numpy.zeros(len(doc_topic[0]))\n for i in range(len(doc_topic)):\n sum_topics = sum_topics + doc_topic[i]\n topic_most_pr = sum_topics.argmax()\n if verbose == True:\n if topic_most_pr in legend:\n return legend[topic_most_pr]\n else:\n return topic_most_pr\n else:\n return topic_most_pr", "def inference(self):\n for m, doc in enumerate(self.docs):\n # Be careful followings are views\n # So self.hoge will be change, when changing variant\n zs_j = self.zs_m_j[m]\n zk_j = self.zk_m_j[m]\n n_m_zs = self.n_m_zs[m]\n n_m_zk = self.n_m_zk[m]\n for j, t in enumerate(doc):\n # discount for n-th word t with topic z\n zs = zs_j[j]\n zk = zk_j[j]\n n_m_zs[zs] -= 1\n n_m_zk[zs, zk] -= 1\n self.n_zk_t[zk, t] -= 1\n self.n_zk[zk] -= 1\n\n # sampling topic new_z for t\n \"\"\"\n n_s = n_m_zs + self.alphas # mth doc, S vec\n p_s = n_s / np.sum(n_s)\n n_k = n_m_zk + self.alphask # mth doc, SxK matrix\n p_k = n_k / n_s.reshape(len(n_s), 1)\n n_v = self.n_zk_t[:, t] + self.beta\n p_v = n_v / (self.n_zk + self.beta)\n\n p_zsk = p_s.reshape(len(p_s), 1) * p_k * p_v # SxK matrix\n \"\"\"\n\n p_zsk = (n_m_zk + self.alphask) * self.n_zk_t[:, t] \\\n / (np.sum(n_m_zs + self.alphas) * self.n_zk)\n\n p_zs = np.sum(p_zsk, axis=1) / np.sum(p_zsk)\n p_zk = np.sum(p_zsk, axis=0) / np.sum(p_zsk)\n\n new_zs = np.random.multinomial(1, p_zs).argmax()\n new_zk = np.random.multinomial(1, p_zk).argmax()\n\n # print(\"arg\", np.argmax(p_s), np.argmax(p_k, axis=1),\n # np.argmax(p_k, axis=0), np.argmax(p_zk))\n # print('probs', p_s, p_zs)\n # print('probk', p_k, p_zk)\n # print('old', zs, zk)\n # print('new', new_zs, new_zk)\n\n # set z the new topic and increment counters\n zs_j[j] = new_zs\n zk_j[j] = new_zk\n n_m_zs[new_zs] += 1\n n_m_zk[new_zs, new_zk] += 1\n self.n_zk_t[new_zk, t] += 1\n self.n_zk[new_zk] += 1", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def topic(df, num_topics=5):\r\n\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)", "def generate_xy(self, num_topics, num_days, keep_app=True, delay=60):\n self.num_topics = num_topics\n self.num_days = num_days\n self.keep_app = keep_app\n self.delay = delay\n\n # get available topics\n NUM_TOPICS = glob.glob(r'data/nlp/*.gensim')\n NUM_TOPICS = [int(re.findall(r'\\d+', s)[0]) for s in NUM_TOPICS]\n NUM_DAYS = list(range(2, 61))\n\n assert num_topics in\\\n NUM_TOPICS, f'LDA model not found for {num_topics} topics'\n assert num_days in\\\n NUM_DAYS, f'{num_days} outside scope of admissible days'\n\n # create input and output of models\n # load patent data\n patents = pd.read_csv('data/patents/clean/patents.csv.gz',\n compression='gzip')\n patents['text'] = patents['text'].apply(lambda x: x.split(' '))\n\n # load returns\n stocks = pd.read_csv('data/returns/clean/adj_stock_returns.csv',\n index_col=0)\n stocks.index = pd.to_datetime(stocks.index)\n\n # load expenses\n expenses = pd.read_csv('data/returns/clean/expenditures.csv',\n index_col=0)\n expenses.index = pd.to_datetime(expenses.index)\n\n # load lda model\n ldamodel = gensim.models.ldamodel.LdaModel.load(\n f'data/nlp/model{num_topics}.gensim')\n\n # remove columns from patents\n keep_col = ['app_number', 'cited_patent_number', 'num_inventor',\n 'patent_num_claims', 'ticker', 'text',\n 'app_date', 'patent_date']\n X = patents[keep_col]\n\n X.loc[:, 'app_date'] = pd.to_datetime(X.loc[:, 'app_date'])\n X.loc[:, 'patent_date'] = pd.to_datetime(X.loc[:, 'patent_date'])\n\n # create feature with number of patent applications filed\n # in the last {delay} days.\n X['num_app_prior'] = np.nan\n for ticker, x in X.groupby('ticker'):\n for i, patent in x.iterrows():\n app_date_delayed = patent['app_date'] - timedelta(days=delay)\n num_app_prior = len(x[(x['app_date'] >= app_date_delayed) &\n (x['app_date'] < patent['app_date'])]\n )\n X.at[i, 'num_app_prior'] = num_app_prior\n\n date_col = ''\n remove = ''\n if keep_app:\n date_col = 'app_date'\n remove = 'patent_date'\n else:\n date_col = 'patent_date'\n remove = 'app_date'\n # rename to date and remove column that we don't need\n X = X.drop(columns=[remove])\n X = X.rename(columns={date_col: 'date'})\n X = X.sort_values(by='date')\n\n # create feature with scaled R&D expenses\n X['rd_exp'] = np.nan\n for i, (d, row) in enumerate(expenses.iterrows()):\n if i < len(expenses)-1:\n xd = X[(X['date'] >= d) & (X['date'] < expenses.index[i+1])]\n X.at[xd.index, 'rd_exp'] = xd['ticker'].apply(\n lambda x: row.get(x))\n\n # create corpora and dictionary\n dictionary = pickle.load(open('data/nlp/dictionary.pkl', 'rb'))\n # dictionary = corpora.Dictionary(list(patents['text'].values))\n\n # create feature with topic extraction from lda modela\n X[[f'topic{t}' for t in range(num_topics)]] = 0.0\n\n for i, x in X.iterrows():\n new_doc_bow = dictionary.doc2bow(x['text'])\n t_prob = ldamodel.get_document_topics(new_doc_bow,\n minimum_probability=0.0)\n for t in t_prob:\n X.at[i, 'topic'+str(t[0])] = t[1]\n\n # drop columns that are not needed\n X = X.drop(columns=['text'])\n\n # remove all dates that are on 2010-01-04 (starting day of stocks)\n # since we need to get 1 day of returns prior to date\n X = X[X['date'] > '2010-01-04']\n\n # define y output\n y = pd.DataFrame(index=X.index, columns=['y_ret', 'y_bin'],\n data=np.nan)\n\n for i, x in X.iterrows():\n # get returns one day before date\n sub = stocks[x['ticker']]\n ret_before = sub[sub.index < x['date']].iloc[-1:]\n ret_after = sub[sub.index >= x['date']].iloc[:num_days-1]\n ret = pd.concat([ret_before, ret_after], axis=0)\n ret = (ret + 1).cumprod(axis=0).iloc[-1]-1\n y.at[i, 'y_ret'] = ret\n y.at[i, 'y_bin'] = 0 if ret < 0 else 1\n\n # remove nans in y (some stocks were not publicly traded before\n # some date)\n index_drop = y[y['y_ret'].isna()].index\n y = y.drop(index=index_drop)\n X = X.drop(index=index_drop)\n\n # get X for model and X containing info of X\n self.X_info = X[['app_number', 'ticker', 'date']]\n self.X = X.drop(columns=['app_number', 'ticker', 'date'])\n self.y = y\n\n return X, y", "def compareToManual2(reviews, vectorized_revs, coded_reviews_df,\n vectorized_coded_revs, n_topics=20, alpha=0.1, eta=0.01):\n n = n_topics\n a = alpha\n b = eta\n\n ldalda = lda.LDA(n_topics=n, alpha=a, eta=b, refresh=1000, random_state=1)\n ldalda.fit(vectorized_revs)\n\n cat_array = ldalda.transform(vectorized_coded_revs)\n\n topic_array = []\n for i in range(n):\n coded_reviews_df['topic_'+str(i)] = cat_array[:, i]\n topic_array += ['topic_'+str(i)]\n best_prec_scores = {}\n for col in pos_neg_cols:\n try:\n best_prec = 0\n best_topic = None\n for topic in topic_array:\n prec = average_precision(coded_reviews_df[col], coded_reviews_df[topic])\n if prec > best_prec:\n best_prec = prec\n best_topic = topic\n best_prec_scores[col] = [best_topic, best_prec]\n except Exception:\n pass\n\n best_2prec_scores = {}\n for col in pos_neg_cols:\n try:\n best_2prec = 0\n best_2topic = None\n for it in itertools.combinations(topic_array, 2):\n combo = coded_reviews_df[it[0]] + coded_reviews_df[it[1]]\n prec = average_precision(coded_reviews_df[col], combo)\n if prec > best_2prec:\n best_2prec = prec\n best_2topic = it\n best_2prec_scores[col] = [best_2topic, best_2prec]\n except Exception:\n pass\n\n # best_3prec_scores = {}\n # for col in pos_neg_cols:\n # try:\n # best_3prec = 0\n # best_3topic = None\n # for it in itertools.combinations(topic_array, 3):\n # combo = coded_reviews_df[it[0]] + coded_reviews_df[it[1]] + \\\n # coded_reviews_df[it[2]]\n # prec = prec_auc_score(coded_reviews_df[col], combo)\n # if prec > best_3prec:\n # best_3prec = prec\n # best_3topic = it\n # best_3prec_scores[col] = [best_3topic, best_3prec]\n # except Exception:\n # pass\n\n top_scores = []\n for col in pos_neg_cols:\n try:\n tops = [best_prec_scores[col], best_2prec_scores[col] \\\n # , best_3prec_scores[col]\n ]\n arg = np.argmax([each[1] for each in tops])\n print col, tops[arg]\n top_scores += [(col, tops[arg])]\n except Exception:\n pass\n print \"Done with n:{}, a:{}, b:{}\".format(n, a, b)\n return top_scores", "def train(self, data):\r\n pos_list = [\r\n \"adj\",\r\n \"adv\",\r\n \"adp\",\r\n \"conj\",\r\n \"det\",\r\n \"noun\",\r\n \"num\",\r\n \"pron\",\r\n \"prt\",\r\n \"verb\",\r\n \"x\",\r\n \".\",\r\n ]\r\n print(\"Train\")\r\n\r\n wordpos_list = [\r\n tuple([line[0][i], line[1][i]])\r\n for line in data\r\n for i in range(len(line[0]))\r\n ]\r\n\r\n pos_dict = {pos: {} for pos in pos_list}\r\n\r\n wordpos_count = Counter(wordpos_list)\r\n\r\n for w in wordpos_count:\r\n pos_dict[w[1]].update({w[0]: wordpos_count[w]})\r\n posterior_prob = {}\r\n emission_prob = {}\r\n for pos in pos_dict.keys():\r\n posterior_prob[pos] = float(sum(pos_dict[pos].values())) / len(wordpos_list)\r\n for pos in pos_dict.keys():\r\n emission_prob[pos] = {\r\n word: float(pos_dict[pos][word]) / sum(pos_dict[pos].values())\r\n for word in pos_dict[pos].keys()\r\n }\r\n\r\n trans_count = {}\r\n transition_prob = {}\r\n for pos in pos_list:\r\n trans_count[pos] = {}\r\n transition_prob[pos] = {}\r\n pair_list = [\r\n tuple([line[1][i], line[1][i + 1]])\r\n for line in data\r\n for i in range(len(line[1]) - 1)\r\n ]\r\n\r\n unique_list = list(set(pair_list))\r\n\r\n for element in unique_list:\r\n trans_count[element[0]].update({element[1]: pair_list.count(element)})\r\n\r\n for pos in pos_list:\r\n transition_prob[pos] = {pos: (1 / float(10 ** 8)) for pos in pos_list}\r\n for key, value in trans_count[pos].items():\r\n transition_prob[pos].update(\r\n {key: (value / float(sum(trans_count[pos].values())))}\r\n )\r\n initial_list = [line[1][0] for line in data]\r\n initial_count = Counter(initial_list)\r\n initial_prob = {\r\n pos: float(initial_count[pos]) / sum(initial_count.values())\r\n for pos in initial_count.keys()\r\n }\r\n self.position_list = pos_list\r\n self.emission_probability = emission_prob\r\n self.transition_probability = transition_prob\r\n self.initial_probability = initial_prob\r\n self.posterior_probability = posterior_prob", "def fit(self, texts):\n print('Processing text and fitting LDA...')\n\n texts = preprocess_text(texts)\n stemmed_texts = [\n list(set(self.tokenizer.stem(text))) for text in texts]\n self.lda_dictionary = Dictionary(stemmed_texts)\n lda_corpus = [\n self.lda_dictionary.doc2bow(text) for text in stemmed_texts]\n self.lda = LdaModel(lda_corpus, num_topics=self.n_topics)\n print('Done.')\n\n return self", "def grid_search_intro_model_with_latent_topics(k):\n if k == 100: # there exists a saved file already if using 100 latent topics\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n else:\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl', save=True)\n\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', u'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No', u'taxlevy_Yes']\n features += topic_features\n X_train, y_train = prep.subset(features)\n\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n ada = AdaBoostClassifier()\n\n mc = ModelChooser([rf, gb, ada])\n\n tuning_params = [ {'max_features': [.1, .5, .7], 'max_depth': [5, 8, 10], 'n_estimators': [100000]},\n {'learning_rate': [.1, .05], 'max_depth': [2, 4], 'n_estimators': [100, 500]},\n {'learning_rate': [.1, .05], 'n_estimators': [100, 500]}]\n\n mc.grid_search(X_train, y_train, tuning_params)", "def predict(self, texts):\n topic_max = []\n for text in texts:\n topic_probs = self.lda[\n self.lda_dictionary.doc2bow(self.tokenizer.stem(text))]\n prob = 0.0\n for topic_prob in topic_probs:\n if topic_prob[1] > prob:\n topic = topic_prob[0]\n prob = topic_prob[1]\n topic_max.append(topic)\n\n return np.array(topic_max)", "def classify(self, documents):\n predictions = []\n for doc in documents:\n\n score_sod = math.log(self.priorSOD)\n score_pop = math.log(self.priorPOP)\n for term in doc.tokens:\n if term in self.cond_prob_sod.keys():\n score_sod += math.log(self.cond_prob_sod[term])\n if term in self.cond_prob_pop.keys():\n score_pop += math.log(self.cond_prob_pop[term])\n if(score_pop >= score_sod): #defaults to ham if score = even \n predictions.append('pop')\n else:\n predictions.append('sod')\n \n return predictions \n pass", "def calculate_result(self, reviewer_data, article_data, people_data,\n coi_data,\n min_rev_art, max_rev_art, min_art_rev, max_art_rev):\n\n cur_progress = 0\n max_progress = 100\n\n article_data = pd.DataFrame(article_data)\n people_data = pd.DataFrame(people_data)\n coauthors_df = pd.DataFrame([[r.PaperID, co_author]\n for _, r in article_data.iterrows()\n for co_author in r.PersonIDList.split(';')],\n columns = ['PaperID', 'PersonID'])\n\n if reviewer_data is None:\n # extract reviewer data from articles\n coauthor_articles = coauthors_df.merge(article_data)[['PersonID', 'Abstract']]\n coauthor_abtracts = coauthor_articles.groupby('PersonID').\\\n agg({'Abstract': lambda x: ''.join(x)})\n reviewer_data = pd.DataFrame(zip(coauthor_abtracts.index,\n coauthor_abtracts.Abstract),\n columns=['PersonID', 'Abstract'])\n else:\n reviewer_data = pd.DataFrame(reviewer_data)\n reviewer_data.PersonID = reviewer_data.PersonID.apply(str)\n\n if coi_data is not None:\n coi_data = pd.DataFrame(coi_data)\n\n update_frequency = 1\n cur_progress += int(max_progress/6.)\n self.update_progress(\n cur_progress,\n max_progress,\n update_frequency=update_frequency,\n )\n\n\n # this performs the topic modeling (LSA)\n a = prm.compute_affinity(reviewer_data.Abstract, article_data.Abstract)\n cur_progress += int(max_progress/6.)\n self.update_progress(\n cur_progress,\n max_progress,\n update_frequency=update_frequency,\n )\n\n # if coi_data available, then add as if they were co-authors\n if coi_data is not None:\n coi_data.PersonID = coi_data.PersonID.apply(str)\n coauthors_df = pd.concat((coauthors_df, coi_data))\n\n\n # articles\n article_data2 = article_data.copy()\n article_data2.index = article_data2.PaperID\n article_data2['id'] = range(article_data2.shape[0])\n coi_row = np.array(article_data2.loc[coauthors_df.PaperID].id.tolist())\n\n # persons\n reviewer_data2 = reviewer_data.copy()\n reviewer_data2.index = reviewer_data2.PersonID\n reviewer_data2['id'] = range(reviewer_data2.shape[0])\n coi_column = np.array(reviewer_data2.loc[coauthors_df.PersonID].id.tolist())\n\n for i, j in zip(coi_row, coi_column):\n a[i, j] = -1000.#np.inf\n\n v, A, d = prm.create_lp_matrices(a, min_rev_art, max_rev_art,\n min_art_rev, max_art_rev)\n v = v.flatten()\n d = d.flatten()\n\n cur_progress += int(max_progress/6.)\n self.update_progress(\n cur_progress,\n max_progress,\n update_frequency=update_frequency,\n )\n\n solver = pywraplp.Solver('SolveReviewerAssignment',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n infinity = solver.Infinity()\n n, m = A.shape\n x = [[]]*m\n c = [0]*n\n\n for j in range(m):\n x[j] = solver.NumVar(-infinity, infinity, 'x_%u' % j)\n\n # state objective function\n objective = solver.Objective()\n for j in range(m):\n objective.SetCoefficient(x[j], v[j])\n objective.SetMaximization()\n\n # state the constraints\n for i in range(n):\n c[i] = solver.Constraint(-infinity, d[i])\n\n # update status bar\n if np.mod(i, int(n/10)) == 0:\n cur_progress += 3\n self.update_progress(\n cur_progress,\n max_progress,\n update_frequency=update_frequency,\n )\n\n for j in A.col[A.row == i]:\n c[i].SetCoefficient(x[j], A.data[np.logical_and(A.row == i, A.col == j)][0])\n\n result_status = solver.Solve()\n if result_status != 0:\n print \"The final solution might not converged\"\n\n x_sol = np.array([x_tmp.SolutionValue() for x_tmp in x])\n\n #x = prm.linprog_solve(v, ne, d)\n x_sol = (x_sol > 0.5)\n\n cur_progress += int(max_progress/6.)\n self.update_progress(\n 4*int(max_progress/6.),\n max_progress,\n update_frequency=update_frequency,\n )\n\n b = prm.create_assignment(x_sol, a)\n self.update_progress(\n 5*int(max_progress/6.),\n max_progress,\n update_frequency=update_frequency,\n )\n\n assignment_df = article_data[['PaperID', 'Title']]\n assignment_df['Reviewers'] = ''\n assignment_df['ReviewerIDs'] = ''\n for i in range(b.shape[0]):\n paper_reviewers = np.where(b[i, :])[0]\n assignment_df.Reviewers.iloc[i] = ', '.join(list(people_data.FullName.iloc[paper_reviewers].copy()))\n # assignment_df.ReviewerIDs.iloc[i] = ', '.join(list(people_data.PersonID.iloc[paper_reviewers].copy()))\n self.update_progress(\n 6*int(max_progress/6.),\n max_progress,\n update_frequency=update_frequency,\n )\n\n # transform to ascii\n assignment_df.Title.apply(lambda x: unicode(x))\n assignment_df.Reviewers.apply(lambda x: unicode(x))\n\n # , 'result': assignment_df.to_csv(None, na_rep='', index=False)\n # return {'task': {'status': 'SUCCESS'}}\n return assignment_df.to_csv(None, na_rep='', index=False, encoding='utf-8')", "def latent_topics_predict_party():\n k = 100\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare_predict_party(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = ['party'] + topic_features\n X_train, y_train = prep.subset(features, dep_var='party')\n\n baseline = DummyClassifier(strategy='stratified')\n\n ada = AdaBoostClassifier(learning_rate=0.1)\n\n mc = ModelChooser([baseline, ada])\n mc.fit_predict(X_train, y_train)\n mc.print_results()", "def extract_lda(papers, clean_up = True):\n assert len(papers) <= MAX_PROCESSING_LENGTH\n paper_abstracts = [_get_word_list(paper.abstract) for paper in papers]\n\n data = map(_word_count, paper_abstracts)\n temp_dir = _to_data_file(map(_word_counter_to_matrix_text, data))\n lda_result = _lda(temp_dir)\n\n if clean_up:\n shutil.rmtree(temp_dir)\n\n return _grade(lda_result, data)", "def semanticSearch(model, topics, index, idx_to_docid, k=1000):\r\n run = {}\r\n topic_nums = [topic for topic in topics]\r\n queries = [topics[topic]['title'] for topic in topics]\r\n encoded_queries = model.encode(queries)\r\n labels, distances = index.knn_query(encoded_queries, k=k)\r\n for i,topic in enumerate(topic_nums):\r\n run[topic] = []\r\n # considers highest passage match only for a document\r\n added_docids = []\r\n sim = [1-x for x in distances[i]]\r\n scored_run = zip(labels[i], sim)\r\n for i, (passageidx, dist) in enumerate(scored_run):\r\n docid = idx_to_docid[passageidx]\r\n \r\n if docid not in added_docids:\r\n run[topic].append((docid, dist))\r\n added_docids.append(docid)\r\n run[topic] = run[topic][:1000]\r\n return run", "def _predict_doc(self, x, flag):\n\n if flag == 1:\n denom = self.X.num_positive()\n else:\n denom = self.X.num_negative()\n denom += self.X.vocab_size()\n\n # multiply word probabilities for all words in x\n words = tokenize(x)\n # prob = 1.0\n # for word in words:\n # wi = self._doc_count_for_word(word, flag=flag)\n # # utilize the Laplace Smooth\n # prob *= ((float(wi)+1.0) / (float(denom)+2.0))\n\n prob = math.log(self.X.priors[str(flag)])\n for word in words:\n wi = self._doc_count_for_word(word, flag=flag)\n # utilize the Laplace Smooth\n prob += math.log((float(wi)+1.0) / (float(denom)+2.0))\n\n # prob *= math.log(self.X.priors[str(flag)])\n\n return prob", "def forward(self, doc):\n out = torch.tensor([]).float().to(self.device)\n\n for i in range(len(doc)):\n sentences_raw = sentencesplit(cleantxt(doc[i]))\n sentences_ready = torch.tensor([]).float().to(self.device)\n for sentence in sentences_raw:\n sentence = sentence.split()\n if sentence == []:\n continue\n lookup_tensor = torch.tensor([]).long().to(self.device)\n for word in sentence:\n if word in self.embedd_dict:\n lookup_tensor = torch.cat((lookup_tensor,\n torch.LongTensor([self.embedd_dict[word]])), 0)\n else:\n lookup_tensor = torch.cat((lookup_tensor, torch.LongTensor([0])), 0)\n # Word embedding\n xw = self.word_embedding(lookup_tensor).view(1, -1, self.embedding_dim).to(self.device)\n # Word GRU\n self.hidden_gru_words = self.init_hidden_words()\n hw, self.hidden_gru_words = self.gru_word(xw, self.hidden_gru_words)\n # Word MLP\n uw = nn.Tanh()(self.MLP_word(hw)).to(self.device)\n # Word attention\n attention_score = torch.matmul(uw, self.attention_word).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(uw.size(0), uw.size(1), 1).to(self.device)\n scored_x = (hw * attention_score).to(self.device)\n s = torch.sum(scored_x, dim=1).to(self.device)\n #collecting sentences\n sentences_ready = torch.cat((sentences_ready, s), 0)\n # Sentence GRU\n if len(sentences_ready) == 0:\n out = torch.cat((out,\n torch.randn(1, self.number_cat).to(self.device)), 0).to(self.device)\n continue\n sentences_ready_gru = sentences_ready.view(1, -1, self.embedding_dim).to(self.device)\n self.hidden_gru_sentences = self.init_hidden_sentences()\n hs, self.hidden_gru_sentences = self.gru_sentence(torch.tensor(sentences_ready_gru), self.hidden_gru_sentences)\n # SENTENCE MLP\n us = nn.Tanh()(self.MLP_sentence(hs)).to(self.device)\n # Sentence attention\n attention_score = torch.matmul(us, self.attention_sentence).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(us.size(0), us.size(1), 1).to(self.device)\n scored_x = (hs * attention_score).to(self.device)\n v = torch.sum(scored_x, dim=1).to(self.device)\n # classification\n p = self.MLP_classification(v).to(self.device)\n out = torch.cat((out, p.float()), 0).float().to(self.device)\n return out", "def __init__(self, corpus, epsilon=7):\n # TODO your code here'\n self.v = 0\n self.total=0\n self.epsilon=epsilon\n self.vocab = defaultdict(lambda:defaultdict(lambda:0))\n self.word_counts= defaultdict(lambda:0)\n self.train(corpus)", "def test_classifiers(train_docs, train_target, test_docs, test_target, min_docs, K, K2, removeStopWords):\n # test_classifiers(train_docs, train_target, test_docs, test_targets, i, 3)\n X_train_counts, X_train_tfidf, X_test_counts, X_test_tfidf = extract_text_features(train_docs, test_docs, min_docs, removeStopWords)\n \n \n num_docs, vocab_size = X_train_counts.shape\n print('Number of (training) documents =',num_docs)\n print('Vocabulary size =',vocab_size)\n \n\n # Now evaluate the classifiers on the test data\n # Print out the accuracy as a percentage for each classifier.\n # np.mean() can be used to calculate the accuracy. Round the accuracy to 2 decimal places.\n\n #predict according to different classifier--evaluate results \n predicted_multNB = fit_and_predict_multinomialNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_bernNB = fit_and_predict_BernoulliNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_counts, train_target, X_test_counts)\n predicted_KNN = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K)\n predicted_KNN2 = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K2)\n \n predicted_base = np.array([FreqDist(test_target).most_common(1)[0][0]]*len(test_target))\n\n # count num of correct predictions / total\n np_test_target = np.array(test_target)\n base = np.sum(predicted_base == np_test_target)/len(np_test_target)*100\n multNB = np.sum(predicted_multNB == np_test_target)/len(np_test_target)*100\n bernNB = np.sum(predicted_bernNB == np_test_target)/len(np_test_target)*100\n LR = np.sum(predicted_LR == np_test_target)/len(np_test_target)*100\n KN = np.sum(predicted_KNN == np_test_target)/len(np_test_target)*100\n KN2 = np.sum(predicted_KNN2 == np_test_target)/len(np_test_target)*100\n\n \n print('\\tBase Accuracy: {:.3f}'.format(base))\n print('\\tAccuracy with multinomial naive Bayes: {:.2f}'.format(multNB))\n print('\\tAccuracy with Bernoulli naive Bayes: {:.2f}'.format(bernNB))\n print('\\tAccuracy with logistic regression: {:.2f}'.format(LR))\n print('\\tAccuracy with kNN, k={} classifier: {:2f}'.format(K, KN))\n print('\\tAccuracy with kNN, k={} classifier: {:.2f}'.format(K2, KN2))", "def train(\n train_texts: List[str],\n train_labels: List[str],\n pretrain_params: Any = None) -> Any:\n train_texts = preprocessing(train_texts)\n train_tokenized_texts = text_to_tokens(train_texts)\n\n train_pos = [train_tokenized_texts[i] for i in range(len(train_labels)) if train_labels[i] == 'pos']\n train_neg = [train_tokenized_texts[i] for i in range(len(train_labels)) if train_labels[i] == 'neg']\n \n cnt_pos_docs = len(train_pos)\n cnt_neg_docs = len(train_neg)\n\n\n all_words_freq = defaultdict(int)\n all_words = set()\n\n pos_dict = defaultdict(int)\n neg_dict = defaultdict(int)\n sum_len_pos = 0\n sum_len_neg = 0\n\n for text in train_pos:\n for token in text:\n all_words.add(token)\n all_words_freq[token] += text[token]\n pos_dict[token] += text[token]\n sum_len_pos += text[token]\n \n for text in train_neg:\n for token in text:\n all_words.add(token)\n all_words_freq[token] += text[token]\n neg_dict[token] += text[token]\n sum_len_neg += text[token]\n \n alpha = 1 #For additive smoothing\n M = len(all_words)\n sum_len = 0\n print(\"____________\")\n print(\"Sum of text lens\", sum_len)\n print(\"____________\")\n print(\"Words quantity\", M)\n print(\"____________\")\n\n token_probs_pos = defaultdict(int)\n token_probs_neg = defaultdict(int)\n print(\"Calculate probablity for\", M, \"tokens\")\n\n i = 0\n for token in all_words:\n if (i % 5000 == 0):\n print(\"__________\")\n print(\"Calculated\", i, \"tokens\")\n print(\"__________\")\n token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n i += 1\n \n return {\n \"token_probs_pos\": token_probs_pos,\n \"token_probs_neg\": token_probs_neg,\n \"all_words\": all_words,\n \"sum_len_pos\": sum_len_pos,\n \"sum_len_neg\": sum_len_neg,\n \"cnt_pos_docs\": cnt_pos_docs,\n \"cnt_neg_docs\": cnt_pos_docs,\n \"pos_dict\": pos_dict,\n \"neg_dict\": neg_dict\n }", "def __init__(self, n_components=3, doc_topic_prior=None,\n topic_word_prior=None, iterations=1000, verbose=True):\n\n self.loaded = False\n self.verbose = verbose\n\n self.n_components = n_components\n self.iterations = iterations\n self.doc_topic_prior = \\\n doc_topic_prior if doc_topic_prior else 1/n_components*0.5\n self.topic_word_prior = \\\n topic_word_prior if topic_word_prior else 1/n_components*0.1\n\n self.num_docs = None\n self.num_words = None\n self.dict_word2ind = None\n self.list_ind2word = None\n self.corpus = None\n\n self.alpha = None\n self.beta = None\n self.beta_sum = None\n\n self.n_mk = None\n self.n_kt = None\n self.n_k = None\n self.n_m = None\n self.z_mn = None # current z sampling state\n\n self.theta = None # best theta\n self.phi = None # best phi\n self.z_best = None # best z\n self.ll_best = None # best ll\n self.log = None # record log likelihood during training", "def predict(self, data, max_iteration=20, tol=1e-16):\n doc_topic_matrix = np.zeros([len(data), self.K], dtype=np.float)\n word_index_list = []\n for word in data:\n word_index_list.append(self.word2id[word])\n for i in range(max_iteration + 1):\n doc_topic_matrix_new = self.topic_word_matrix[:, word_index_list].T\n doc_topic_matrix_new = doc_topic_matrix_new.astype(np.float)\n doc_topic_matrix_new *= (doc_topic_matrix_new.sum(axis=0) - doc_topic_matrix + self.alpha)\n doc_topic_matrix_new /= doc_topic_matrix_new.sum(axis=1)[:, np.newaxis]\n delta_naive = np.abs(doc_topic_matrix_new - doc_topic_matrix).sum()\n doc_topic_matrix = doc_topic_matrix_new\n if delta_naive < tol:\n break\n theta_doc = doc_topic_matrix.sum(axis=0) / doc_topic_matrix.sum()\n return theta_doc" ]
[ "0.7421184", "0.73544043", "0.72307", "0.7002529", "0.6943727", "0.673741", "0.6723259", "0.6671794", "0.6657565", "0.6648194", "0.6644296", "0.66000706", "0.6559571", "0.6551705", "0.65513426", "0.65454745", "0.6489818", "0.64809227", "0.6480784", "0.6443412", "0.63931346", "0.63751763", "0.63607985", "0.63553685", "0.6346438", "0.6317345", "0.62599593", "0.62528116", "0.62517923", "0.6201099", "0.6199986", "0.61828995", "0.61768436", "0.6175558", "0.61600864", "0.61384326", "0.6108588", "0.6106484", "0.6105638", "0.60752153", "0.60747033", "0.6073933", "0.6068633", "0.6047469", "0.60413784", "0.6011669", "0.60095054", "0.6003097", "0.60028017", "0.6000461", "0.5996692", "0.5996433", "0.5995892", "0.5981565", "0.5977845", "0.595635", "0.5952053", "0.59518343", "0.59180534", "0.5909811", "0.58975273", "0.58970076", "0.5896565", "0.58917725", "0.5888381", "0.5888381", "0.5881695", "0.5875864", "0.5874054", "0.5847565", "0.58420306", "0.58297974", "0.58162856", "0.5806219", "0.5805869", "0.58046716", "0.5799202", "0.5797889", "0.5797276", "0.57805353", "0.5752455", "0.5752311", "0.57383144", "0.5737957", "0.5735727", "0.5735033", "0.5733325", "0.5732573", "0.5730542", "0.57190204", "0.57171214", "0.57152444", "0.57116914", "0.5711094", "0.5703312", "0.5698456", "0.5694999", "0.5694318", "0.56903523", "0.5688468" ]
0.744949
0
In case we're running innon autoreload mode we need to restart server
def reload(self): puts('Reloading application...') local('touch ../reload.txt')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()", "def _restart(self):\n pass", "def at_server_reload(self):\n self.db.started = True", "def restart_nginx():\n run_command_on_selected_server(_restart_nginx)", "def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()", "def dev_start():\r\n nginx_reload()\r\n djangoserver_start()", "def handleReload(self, confInfo=None):", "def restart_gunicorn_nginx():\n restart_gunicorn()\n restart_nginx()", "def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')", "def restart(self):\n pass", "def pro_start():\r\n nginx_reload()\r\n gunicorn_start()", "def reloadMode(self): \n\t\tpass", "def restart(self):\r\n pass", "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def restart(self):", "def restart():\n log('reiniciando servicos', yellow)\n nginx_stop()\n nginx_start()\n nginx_restart()\n nginx_reload()\n supervisor_stop()\n supervisor_start()", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def load_site_if_needed(self):\n self.site.reload_if_needed()", "def schedule_system_restart():\n global _force_system_restart\n _force_system_restart = True", "def reload(self):", "def reload(self):", "def hotdeploy_noreq():\n _run_deploy(do_update_requirements=False)\n collectstatic()\n restart()", "def attempt_restart(self):\n self.controller.publish(self, 'restart')", "def force_reload(service):\n _service(service, 'force-reload')", "def restart(self) -> None:", "def restart_all():\n\n restart_nginx()\n restart_supervisor()", "def trigger_reload(server):\n log.info(\"Triggering /reload on %s\", server)\n screenCmd(server, 'reload')", "def reload_config(self):\n pass", "async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')", "def refresh_wsgi():\n\n require(\"wsgi_path\", \"sudo_user\")\n cmd = \"touch -c %s\" % env.wsgi_path\n sudo(cmd, user=env.sudo_user)", "def reload(self):\n\n pass", "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')", "def admin_server(request):\n return run_server(interval='10000')", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def restart():\n run_commands('python manage.py supervisor restart all')", "def save(self, *args, **kwargs):\n super().save()\n\n if self.requires_restart() and not InvenTree.ready.isImportingData():\n InvenTreeSetting.set_setting('SERVER_RESTART_REQUIRED', True, None)", "def repl_restart(restart: bool = True) -> None:", "def reload_config():\n subprocess.run([SUPERVISOR_CMD, \"reload\"])", "def restart_django(restart_url=None):\n with env.cd(settings.PROJECT_PATH):\n env.run('touch rnacentral/rnacentral/wsgi.py')\n if restart_url:\n requests.get(restart_url)", "def on_server_start(self):\n raise NotImplementedError", "def _handle_soft_relaunch(self, msg):\n logger.debug('removing cache for google earth')\n try:\n # deleting out of OLDHOME because that's where the cache is stored\n earth_dir = '%s/.googleearth' % os.environ['OLDHOME']\n shutil.rmtree(earth_dir)\n self._clear_cache()\n os.mkdir(earth_dir)\n except Exception as e:\n logger.warning('found error while removing earth cache: %s, could be normal operation though' % e.message)\n self._render_configs()\n self.earth_proc.handle_soft_relaunch()", "def request_shutdown(self, restart=False):", "def reload_gunicorn():\n puts(yellow(\"Reload gunicorn graceful\"))\n sudo('kill -HUP `cat %s`' % (env.gunicorn_pidpath), user=env.app_user)", "def continue_server():\n update_server_status({'ready': True})", "async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")", "def node_restart(ctx):\n ctx.obj['node'].attempt_restart()", "def restartFluidinfo():\n for port in range(9001, 9009):\n sudo('stop fluidinfo-api-node PORT=%d || true' % port)\n sudo('start fluidinfo-api-node PORT=%d' % port)\n with settings(warn_only=True):\n sudo('kill -USR1 $(cat /var/run/nginx.pid)')", "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def nginx_reload():\n log('reload nginx', yellow)\n sudo('/etc/init.d/nginx reload')", "def restart_nginx():\n sudo('/etc/init.d/nginx restart')", "def touch_project():\n remote('touch config/wsgi*')", "def do_restart(app):\n\n config = glob(join(UWSGI_ENABLED, '{}*.ini'.format(app)))\n\n if len(config) > 0:\n echo(\"Restarting app '{}'...\".format(app), fg='yellow')\n for c in config:\n remove(c)\n spawn_app(app)\n else:\n echo(\"Error: app '{}' not deployed!\".format(app), fg='red')", "def restart():\n logging.warning (\"[FLASKWEB] Shutting down K3 Dispatcher....\")\n shutdown_dispatcher()\n return 'Dispatcher is restarting.....Give me a millisec'", "def on_server_start(self, server):\n pass", "def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()", "def restart_with_reloader():\n print(\"[+] RESTARTING\")\n cwd = os.getcwd()\n args = _get_args_for_reloading()\n new_environ = os.environ.copy()\n new_environ[\"TERMNINJA_SERVER_RUNNING\"] = \"true\"\n cmd = \" \".join(args)\n worker_process = Process(\n target=subprocess.call,\n args=(cmd,),\n kwargs={\"cwd\": cwd, \"shell\": True, \"env\": new_environ},\n )\n worker_process.start()\n return worker_process", "def test_redeploy(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def server_activate(self):\n\t\tpass", "def __init__(self):\n self._restart = True\n Application.__init__(self)", "def __init__(self):\n self._restart = True\n Application.__init__(self)", "def start_server(self):\n if not self._server:", "def nginx_restart():\n log('restart nginx', yellow)\n sudo('/etc/init.d/nginx restart')", "def restart_with_reloader():\n while True:\n print(f'Restarting with reloader')\n args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv\n new_environ = os.environ.copy()\n new_environ[\"RUN_MAIN\"] = 'true'\n exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)\n if exit_code != 3:\n return exit_code", "async def on_reload(name: str):\n global started\n local_started = started\n\n await plugins.reload(name)\n\n started = local_started", "def reload_app_instance(self, instance_id, app_name=\"\"):\n log.info(\"Reloading app . id = %s name = %s\" % (instance_id, app_name))\n if self.stop_app_instance(instance_id):\n self.load_app_instances_configs()\n self.configure_and_init_app_instance(instance_id)", "def at_server_shutdown(self):\n self.db.started = False", "def graceful_reload(signum, traceback):\n court.close()\n signal.signal(signal.SIGHUP, graceful_reload)", "async def attempt_reconnect(self):\n await deploy.reconnect()", "def is_hot_reload():\n return os.environ.get('WERKZEUG_RUN_MAIN')", "def kill_server():\n global _server_url\n if _server_url != None:\n try:\n fp = urllib.request.urlopen('%sexit' % (_server_url,))\n except:\n pass\n\n _server_url = None", "def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()", "def sync_apps(self):\n cherrypy.server.httpserver.wsgi_app = self.get_app()", "def restart_from_helper ( self, ):\r\n self.no_helper_restarts += 1\r\n self.logger.info( \"restart_from_helper\" )\r\n\r\n self.restart()", "def IntrumentFailHook(self):\n #Restart iserver\n #If failed to restart\n #\treturn fail\n pass", "def reload_configurations(self) -> None:\n ...", "def restart():\n stop()\n start()", "def restart_gunicorn():\n with settings(warn_only=True):\n with cd(env.code_dir):\n pid = sudo('cat gunicorn.pid')\n remove_pyc_files()\n if not pid.succeeded:\n start_gunicorn()\n else:\n sudo('kill -HUP %s' % pid)", "def restart_scrapy_daemon():\n global REPO_BASE_PATH\n logger.info('Scrapy daemon restarting...')\n arguments = ['python'] + [REPO_BASE_PATH+'/deploy/sqs_ranking_spiders/scrapy_daemon.py'] + sys.argv[1:]\n if 'restarted' not in arguments:\n arguments += ['restarted']\n else:\n logger.error('Error while restarting scrapy daemon. '\n 'Already restarted.')\n return\n logging.info('Starting %s with args %s' % (sys.executable, arguments))\n os.execv(sys.executable, arguments)", "def restart(self):\n self.__init__()\n return", "def reload_config(self):\n if self.faucet is not None:\n self.faucet.reload_config(None)", "def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))", "def middleware(self, environ, start_response):\n app = self.app\n self.register(app.config['CHANNEL_SERVER'], 'reload', app.reload)", "def restart(self):\r\n self._update('restart')\r\n\r\n self.supervisord.options.mood = SupervisorStates.RESTARTING\r\n return True", "def serverStatusHealth(request):\n\n initRequest(request)\n periodOfAllServWorkRestart = 15 #minutes.\n restartTimeWindow = 5\n\n debug = True\n\n # Here we should load all the servers from the settingsdjangosettings.\n # next is just for tests\n\n data = getCacheEntry(request, \"StatusHealth\")\n\n print (\"serverStatusHealth \", datetime.now(), \" runninghost:\", request.session[\"hostname\"], \" \", data)\n\n if data is None:\n q = collections.deque()\n q.append(\"aipanda100\")\n q.append(\"aipanda105\")\n q.append(\"aipanda106\")\n q.append(\"aipanda115\")\n q.append(\"aipanda116\")\n q.append(\"aipanda107\")\n q.append(\"aipanda108\")\n lastupdate = datetime.now()\n data['q'] = pickle.dumps(q)\n data['lastupdate'] = lastupdate\n setCacheEntry(request, \"StatusHealth\", json.dumps(data, cls=DateEncoder), 60 * 60)\n else:\n data = json.loads(data)\n q = pickle.loads(data['q'])\n lastupdate = datetime.strptime(data['lastupdate'], djangosettings.defaultDatetimeFormat)\n\n # end of test filling\n\n currenthost = q.popleft()\n runninghost = request.session[\"hostname\"]\n\n if (currenthost == runninghost):\n if (datetime.now() - lastupdate) > timedelta(minutes=(periodOfAllServWorkRestart)) and \\\n (datetime.now() - lastupdate) < timedelta(minutes=(periodOfAllServWorkRestart+restartTimeWindow)):\n return HttpResponse(\"Awaiting restart\", content_type='text/html')\n elif (datetime.now() - lastupdate) > timedelta(minutes=(periodOfAllServWorkRestart)) and \\\n (datetime.now() - lastupdate) > timedelta(minutes=(periodOfAllServWorkRestart+restartTimeWindow)):\n data = {}\n q.append(currenthost)\n data['q'] = pickle.dumps(q)\n data['lastupdate'] = datetime.now().strftime(djangosettings.defaultDatetimeFormat)\n setCacheEntry(request, \"StatusHealth\", json.dumps(data, cls=DateEncoder), 60 * 60)\n return HttpResponse(\"Normal operation\", content_type='text/html')\n\n # rows = subprocess.check_output('ps -eo cmd,lstart --sort=start_time | grep httpd', shell=True).split('\\n')[:-2]\n # print \"serverStatusHealth \", datetime.now(), \" rows:\", rows\n #\n # if (currenthost == runninghost) and (datetime.now() - lastupdate) > timedelta(minutes=periodOfAllServWorkRestart):\n #\n # if len(rows) > 0:\n # httpdStartTime = list(datefinder.find_dates(rows[0]))[0]\n # if (datetime.now() - httpdStartTime) < timedelta(minutes=periodOfAllServWorkRestart):\n #\n # print \"serverStatusHealth \", \"httpdStartTime\", httpdStartTime\n #\n # data = {}\n # data['q'] = pickle.dumps(q)\n # data['lastupdate'] = datetime.now().strftime(defaultDatetimeFormat)\n # setCacheEntry(request, \"StatusHealth\", json.dumps(data, cls=DateEncoder), 60 * 60)\n #\n # print \"serverStatusHealth \", \"Normal operation0\"\n # return HttpResponse(\"Normal operation\", content_type='text/html')\n # # We think that wsgi daemon recently restarted and we can change order to the next server\n # # q.put(currenthost)\n # # q. put to cache\n # # lastupdate put to cache\n # # return success\n #\n # # we return failed by default\n # print \"serverStatusHealth \", \"Awaiting restart\"\n # return HttpResponse(\"Awaiting restart\", content_type='text/html')\n #\n # print \"serverStatusHealth \", \"Normal operations1\"\n return HttpResponse(\"Normal operation\", content_type='text/html')", "def run_forever(self):\n self.app.run()", "def ForceReload():\n url = 'http://icfpc2013.cloudapp.net/myproblems?auth=0017eB6c6r7IJcmlTb3v4kJdHXt1re22QaYgz0KjvpsH1H'\n reader = urllib2.urlopen(url)\n problems_string = reader.read()\n reader.close()\n try:\n fout = open(GetModelFilename(), mode='w')\n fout.write(problems_string)\n fout.close()\n except IOError:\n sys.stderr.write('failed to write to model file.')\n return", "def restart_tftp_server(self, mode=None):\n raise NotImplementedError", "def finish_maintenance(self, errors):\n if not self.can_restart:\n return\n\n try:\n self._shutdown()\n run(\" \".join(self.cmd_line_opts['argv']))\n self.client = pymongo.MongoClient(self.host, self.port)\n self._wait_secondaries_catch_up()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()", "def restart(config):\n shutdown(config)\n startup(config)\n return", "def syncrepl_refreshdone(self):\n pass", "async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True", "def is_restarting(self) -> bool:\r\n return False", "def force_load(self):\n pass", "def webserver_start():\n run(_webserver_command())", "def _rebuild_server(self, context, server, preserve_ephemeral):\n\n self.driver.rebuild(context, server, preserve_ephemeral)", "def update_website_configuration():\n put('config/supervisor_website.conf', \n '/etc/supervisor/conf.d/gunicorn.conf', \n use_sudo=True)\n sudo('supervisorctl update')\n sudo('supervisorctl reload')", "def _try_restart_fedora(self) -> None:\n\n try:\n util.run_script(['systemctl', 'restart', 'httpd'])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))\n\n # Finish with actual config check to see if systemctl restart helped\n super().config_test()", "def handle_reload_toolbox(self):", "def coldRestart(self):\n assert False, \"Deriving class must implement\"" ]
[ "0.72675717", "0.72130495", "0.704559", "0.668948", "0.66131496", "0.65728164", "0.65231895", "0.6460572", "0.64301753", "0.6418728", "0.6396875", "0.63923776", "0.6356839", "0.63194877", "0.6300053", "0.6296835", "0.6279461", "0.6236136", "0.62225044", "0.62004274", "0.62004274", "0.6196711", "0.6185604", "0.61671495", "0.6162447", "0.6138989", "0.61353713", "0.61261386", "0.6119312", "0.609769", "0.6047218", "0.60452616", "0.60301626", "0.6024753", "0.6023969", "0.6017434", "0.59858507", "0.5947842", "0.5945506", "0.5935061", "0.5925844", "0.5911604", "0.5904794", "0.5885981", "0.5874717", "0.58628213", "0.5853275", "0.5836577", "0.583106", "0.57968616", "0.57943004", "0.57929295", "0.5791987", "0.5789835", "0.578945", "0.578325", "0.57818586", "0.5775946", "0.57693607", "0.5763617", "0.5763617", "0.5756995", "0.5741467", "0.5735828", "0.57334536", "0.5733173", "0.57262015", "0.5717417", "0.57129854", "0.5701336", "0.5683739", "0.566641", "0.56530905", "0.5638497", "0.56378675", "0.5634676", "0.5633868", "0.56329876", "0.56144524", "0.5612338", "0.5611832", "0.56107014", "0.56089336", "0.5607645", "0.55912316", "0.55860704", "0.5580029", "0.5575194", "0.5562597", "0.55561787", "0.55531144", "0.5549967", "0.55469716", "0.5530053", "0.55220765", "0.552184", "0.5513497", "0.5508462", "0.5503111", "0.5496148" ]
0.6135069
27
Workaround manage.py migrate complications run syncdb in case it's our first run, so we make sure south_migrationhistory table is created run migrate to apply latest migrations run syncdb again to populate contrib.auth.models
def smart_syncdb_migrate(self): local('python manage.py syncdb') local('python manage.py migrate') local('python manage.py syncdb --all')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate():\n puts(yellow(\"Run South migrations\"))\n django_manage('migrate')", "def post_migrations(self):", "def migrate(self):\n\tpass", "def syncdb():\n with virtualenv():\n run('python manage.py syncdb --noinput')\n run('python manage.py migrate')", "def update_db():\r\n settings = getattr(options, 'settings', 'dev')\r\n sh(django_cmd('lms', settings, 'syncdb', '--traceback', '--pythonpath=.'))\r\n sh(django_cmd('lms', settings, 'migrate', '--traceback', '--pythonpath=.'))", "def migrate_database(self):\n\n self.db.migrate_database()", "def sync_db():\n\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Create tables for models which have not yet been installed?\",\n default=True,\n )\n )\n\n if check_prompt:\n with cd(\"%s\" % env.work_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"./manage.py syncdb\"\n \" --noinput\"\n )", "def migrate(where='local'):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n\n run('bin/django syncdb')\n try:\n run('bin/django schemamigration dasa --auto')\n except:\n pass\n run('bin/django migrate dasa')", "def model_post_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = False", "def southify(app):\n managepy('migrate %s 0001 --fake' % app)\n managepy('migrate %s' % app)", "def setup_before_migration(self, apps):", "def migrate_db():\n Base.metadata.create_all(ENGINE)", "def migration():", "def migrate(cr, version):\n pass", "def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')", "def migrate():\n run('cd /home/indabom/web && source ./bin/activate && cd ./site && python manage.py migrate')", "def model_pre_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = True", "def migrate_new_apps():\n new_apps = run('%s %s/fabfiles/django_scripts/get_apps_without_migration.py'\n % (env.PYTHON_BIN, env.SRC_PATH))\n # The script denotes the start of its output by \"{% output %}\" tag so we\n # only take whatever's after that\n new_apps = new_apps.split('{% output %}')[1].split()\n with cd(env.SRC_PATH):\n for app in new_apps:\n sudo(\"%s manage.py schemamigration %s --initial\" %\n (env.PYTHON_BIN, app.strip()))\n sudo(\"%s manage.py migrate %s --no-initial-data\" %\n (env.PYTHON_BIN, app.strip()))", "def ready(self):\n import django_better_migrations.migration_writer_patch # noqa", "def migrate(ctx):\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"migrate\")", "def migrate(args=''):\n run_commands('python manage.py migrate %s' % args)", "def db_migrate():\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_UP.format(f\"upgrade-{when}\", when, MIGRATION_TABLE)\n down = MYSQL_DOWN.format(f\"downgrade-{when}\", when, MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: {os.path.join('migrations', sql_file)}\")", "def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)", "def upgrade():\n try:\n op.drop_table(\"ggrc_gdrive_integration_alembic_version\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in a new DB with no trace of the removed chain\n pass\n else:\n raise\n\n # The following duplicates a part of a gdrive-related migration,\n # since a bunch of old migrations in ggrc refer to meetings table.\n # This part is relevant only for db_reset (new databases), so we\n # shouldn't recreate this table in downgrade.\n try:\n op.drop_table(\"meetings\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in an old DB where meetings has been dropped in the removed chain\n pass\n else:\n raise", "def perform_migration():\n with cd(env.code_dir):\n with _virtualenv():\n sudo('python manage.py migrate --settings=prod_settings', pty=True)", "def migrate():\n if apply_migrations():\n click.echo(OK)\n else:\n sys.exit(1)", "def migratedb(rollback=False):\n\n require(\"virtualenv_path\", \"project_path\", \"sudo_user\")\n\n #\n # Some things need to be done first (i.e. if they need a different\n # database connection or some custom args)\n #\n if \"migratedb_first\" in env:\n\n for app, args in env.migratedb_first.iteritems():\n\n version = get_south_migrate_version(app, rollback)\n\n migrate_app_db(app, version, args)\n\n #\n # Do the rest afterwards\n #\n if has_version_info():\n\n apps = env.south_migrations.keys()\n\n for app in apps:\n\n print app\n\n version = get_south_migrate_version(app, rollback)\n\n migrate_app_db(app, version)\n\n #\n # If we know nothing, just migrate everything\n #\n else:\n migrate_app_db()", "def migrate(cls)->None:\n pass", "def migrate_fake():\n run('source /home/indabom/web/bin/activate && /home/indabom/web/site/manage.py migrate --fake')", "def migratedb_command():\n db = get_db()\n # This migration detects whether it needs to run before making changes.\n db.migrate_add_user_is_enabled()", "def tearDownClass(cls):\n management.call_command(\"migrate\")", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def migrate(*apps):\n # First sync db\n print(apps)\n\n if len(apps) > 0:\n for app in apps:\n try:\n _manage('migrate %s' % app)\n except Exception as e:\n print(red('Failed to migrate {} app! {}'.format(app, str(e))))\n else:\n _manage('migrate')", "def upgrade():\r\n current_context = op.get_context()\r\n meta = current_context.opts['target_metadata']\r\n user = sa.Table('users', meta, autoload=True)\r\n\r\n # Add the initial admin user account.\r\n op.bulk_insert(user, [{\r\n 'username': u'admin',\r\n 'password': u'$2a$10$LoSEVbN6833RtwbGQlMhJOROgkjHNH4gjmzkLrIxOX1xLXNvaKFyW',\r\n 'email': u'testing@dummy.com',\r\n 'activated': True,\r\n 'is_admin': True,\r\n 'api_key': u'123456',\r\n }\r\n ])", "def create_automatic_migration():\n with cd(env.SRC_PATH):\n apps = run('%s fabfiles/django_scripts/get_apps_to_migrate.py' %\n env.PYTHON_BIN).split('\\n')\n with settings(hide('warnings'), warn_only=True):\n for app in apps:\n output = sudo('%s manage.py schemamigration %s --auto' %\n (env.PYTHON_BIN, app.strip()))\n\n # Raise any error other than nothing seems to have changed\n if output.failed:\n if 'Nothing seems to have changed' not in output:\n raise Exception('Error when running automated schema migration')", "def initialise_database():\n with cd(code_dir):\n run(python_add_str + \"python manage.py syncdb --all\")\n run(python_add_str + \"python manage.py migrate --fake\")", "def reset_db_danger():\n from flask.ext.migrate import init, migrate\n # Remove the migration folder if exist\n if os.path.exists('migrations'):\n shutil.rmtree('migrations')\n\n # Remove the sqlite database files if exist\n for fl in glob.glob('*.sqlite'):\n os.remove(fl)\n\n # Reset Migration Database\n init()\n\n # migrate database to latest revision\n migrate(message='init')", "def syncdb():\n\n require(\"virtualenv_path\", \"project_path\", \"sudo_user\")\n utils.django_manage_run(\n env.virtualenv_path,\n env.project_path,\n \"syncdb\",\n env.sudo_user,\n )", "def run_migration_checks():\n check_model_state()\n check_migration_state()", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n return None", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n return None", "def makemigrations(*apps):\n # First sync db\n print(apps)\n\n if len(apps) > 0:\n for app in apps:\n try:\n _manage('makemigrations %s' % app)\n except Exception as e:\n print(red('Failed to migrate {} app! Exception: {}'.format(app, str(e))))\n else:\n _manage('makemigrations')", "def setUpBeforeMigration(self, apps):\n pass", "def syncdb():\n run('source %s/bin/activate' % env.virtualenv_root)\n run('%s/mwana/manage.py syncdb' % env.code_root)", "def migrate(env):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n warnings.warn(message)\n else:\n getattr(registry, backend).initialize_schema()", "def db_upgrade():\n generate_migration_file()\n dbu_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n\n for time_step in [_.strip('.sql') for _ in migration_files()]:\n decide = MySQLScheme.fetch_one(REVISION_EXISTS,\n **{\"args\": {'revision': time_step}})\n if not decide:\n MySQLScheme.commit(getattr(dbu_query, f\"upgrade_{time_step}\").sql)\n LOGGER.info(f\"successful migration: {time_step}\")\n else:\n LOGGER.info(f'migration already exists: {time_step}')", "def configure_ext_migrate(app):\n migrate = Migrate(app, models.db)", "def register_migrations(self, migrations):\n with self.internal_db.begin() as conn:\n for migration in migrations:\n conn.execute(\n \"INSERT INTO migration (name) \" \"VALUES ('%s');\" % migration\n )", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n return True", "def safe_upgrade():\n goviewbe.upgrade_db(current_app)", "def sync_db():\n pass", "def sync_prod_db(env=None, reset_db=False, haus_vars={}):\n print green('sync/migrate DB')\n if reset_db:\n # uncomment below and replace DATABSE_URL with the prod database url\n # note that this is destructive of the PROD DB\n #local('heroku pg:reset DATABASE_URL') #add \"--confirm haus\" to remove required input\n pass\n local('heroku run ./manage.py migrate -a {}'.format(APP_INFO[env][\"heroku_app_name\"]))", "def migrate(heroku_app=HEROKU_APP):\n subprocess.run([\n 'heroku', 'run',\n '--app', heroku_app,\n '--env', 'PYTHON_PATH=/app',\n '--exit-code',\n '--',\n 'python', '-m', 'frank.manage', 'db', 'upgrade',\n ])\n subprocess.run(['heroku', 'restart', '--app', heroku_app])", "def upgrade_app_db(app, user):\n ctx.logger.info('Upgrading %s DB', app.capitalize())\n run('db-migrate', app, user)", "def syncdb():\n with virtualenv():\n django_settings = 'DJANGO_SETTINGS_MODULE=%s' % env.settings\n env.run('%s ./manage.py syncdb --noinput' % django_settings)", "def upgrade():\n with op.batch_alter_table(\"users\") as batch_op:\n batch_op.drop_column(\"registered_date\")\n batch_op.drop_column(\"registered_age\")\n batch_op.drop_column(\"cell\")\n batch_op.drop_column(\"portrait_id\")\n batch_op.drop_column(\"street_number\")\n batch_op.drop_column(\"id_value\")\n batch_op.drop_column(\"nat\")\n batch_op.drop_column(\"id_name\")\n batch_op.drop_column(\"md5\")\n batch_op.drop_column(\"date_of_birth\")\n batch_op.drop_column(\"sha256\")\n batch_op.drop_column(\"username\")\n batch_op.drop_column(\"salt\")\n batch_op.drop_column(\"timezone_offset\")\n batch_op.drop_column(\"uuid\")\n batch_op.drop_column(\"title\")\n batch_op.drop_column(\"age\")\n batch_op.drop_column(\"longitude\")\n batch_op.drop_column(\"sha1\")\n batch_op.drop_column(\"timezone_description\")\n batch_op.drop_column(\"password\")\n batch_op.drop_column(\"latitude\")", "async def migrate(self):\n # Controlla se ci sono tabelle nel db\n async with self.db.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(\"\"\"SELECT COUNT(DISTINCT table_name) as c\n FROM information_schema.columns\n WHERE table_schema = %s\"\"\", (conn.db,))\n db_empty = (await cur.fetchone())[\"c\"] <= 0\n\n # Se ci sono tabelle, prova a leggere `db_version`\n if not db_empty:\n await cur.execute(\"SELECT db_version FROM db_version LIMIT 1\")\n db_version_in_db = await cur.fetchone()\n db_version = 0 if db_version_in_db is None else db_version_in_db[\"db_version\"]\n else:\n db_version = 0\n\n # Prendi la lista di file sql e py da eseguire\n new_migrations = [x for x in self.migrations if x.id > db_version]\n\n # Controlla se ci sono migration da eseguire\n if not new_migrations:\n self.logger.info(\"No new migrations. The database is already up to date!\")\n return\n\n # Esegui migrations\n self.logger.info(\"Current db version: @{}\".format(db_version))\n db_version += 1\n current_migration = self.get_migration(db_version)\n while current_migration is not None:\n self.logger.info(\"Executing {}\".format(current_migration.file_name))\n\n if current_migration.type == \"sql\":\n # Leggi ed esegui file sql\n with open(\n os.path.join(os.path.dirname(__file__), \"migrations/{}\".format(current_migration.file_name)), \"r\"\n ) as f:\n data = f.read()\n async with self.db.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(data)\n await conn.commit()\n elif current_migration.type == \"py\":\n # Importa modulo py\n module = importlib.import_module(\"migrator.migrations.{}\".format(current_migration.file_name[:-3]))\n migr = getattr(module, \"do\")\n await migr()\n\n # Migration eseguita, aggiorna `db_version`\n self.logger.info(\"Migration {} executed with no errors\".format(current_migration.file_name))\n await self.save_db_version(db_version)\n\n # Vai alla prossima migration\n db_version += 1\n current_migration = self.get_migration(db_version)\n self.logger.info(\"All migrations executed correctly\")", "def migrate(env, dry_run=False):\n registry = env['registry']\n settings = registry.settings\n readonly_backends = ('storage', 'permission')\n readonly_mode = asbool(settings.get('readonly', False))\n\n for backend in ('cache', 'storage', 'permission'):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = ('Cannot migrate the %s backend while '\n 'in readonly mode.' % backend)\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)", "def allow_syncdb(self, db, model):\n\n return True", "def reset(self) -> None:\n call_command('migrate', verbosity=0, database=self._database)", "def test_complete_model_migration(monkeypatch):\n resync_after_migrate_mock = Mock()\n monkeypatch.setattr('datahub.search.tasks.resync_after_migrate', resync_after_migrate_mock)\n mock_app = create_mock_search_app(\n current_mapping_hash='current-hash',\n target_mapping_hash='target-hash',\n )\n get_search_app_mock = Mock(return_value=mock_app)\n monkeypatch.setattr('datahub.search.tasks.get_search_app', get_search_app_mock)\n\n complete_model_migration.apply(args=('test-app', 'target-hash'))\n resync_after_migrate_mock.assert_called_once_with(mock_app)", "def make_migrations(ctx, app_name):\n run(\"./manage.py makemigrations {}\".format(app_name), pty=True)", "def reset():\n local(\"python manage.py initdb\")", "def db_initialise():\n generate_migration_file()\n if not MySQLScheme.fetch_one(IS_MIGRATION_TABLE,\n **{\"args\": {'schema': SCHEMA}}):\n with open(MIGRATION_FILE, 'r') as init_sql:\n data = init_sql.read()\n\n if f\"CREATE TABLE IF NOT EXISTS {MIGRATION_TABLE}\" not in data:\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_MIGRATION_UP.format(f\"upgrade-{when}\", when,\n MIGRATION_TABLE)\n down = MYSQL_MIGRATION_DOWN.format(f\"downgrade-{when}\",\n MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: \"\n f\"{os.path.join('migrations', sql_file)}\")\n else:\n when = re.findall('[0-9]+', data)[0]\n\n generate_migration_file()\n dbi_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n MySQLScheme.commit(getattr(dbi_query, f\"upgrade_{when}\").sql)\n LOGGER.info(f\"initial successful migration: {when}\")", "def downgrade():\n op.execute(\"\"\"\n CREATE TABLE ggrc_gdrive_integration_alembic_version (\n version_num varchar(32) NOT NULL\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8\n \"\"\")\n op.execute(\"\"\"\n INSERT INTO ggrc_gdrive_integration_alembic_version (version_num)\n VALUES ('3f64d03c6c01')\n \"\"\")", "def sync_tables():\n sync_table(ShoppingList)\n sync_table(User)\n sync_table(Category)\n sync_table(Feed)\n sync_table(News)\n sync_table(Photo)\n sync_table(Profile)\n sync_table(Video)\n sync_type(FeedPhoto)\n sync_type(NewsPhoto)", "def run_migrations_offline():\n print('running offline!!!!!!!!!!!!!!!!');\n url = AppConfig.SQLALCHEMY_DATABASE_URI\n context.configure(\n url=url,\n target_metadata=target_metadata,\n literal_binds=True\n )\n\n with context.begin_transaction():\n context.run_migrations()", "def prompt_for_historical_migration(app_name, migration_name, required_commit):\n\n def get_most_recent_migration_date():\n return MigrationRecorder.Migration.objects \\\n .order_by('-applied') \\\n .values_list('applied') \\\n .first()[0]\n\n def get_days_since_last_migration():\n current_time = datetime.now()\n last_migration_time = get_most_recent_migration_date()\n\n return (current_time - last_migration_time).days\n\n @skip_on_fresh_install\n def _run_command(apps, schema_editor):\n print(\"\")\n print(f\"\"\"\n This migration cannot be run, as it depends on code that has since been removed.\n To fix this, follow the instructions below to run this migration from a previous version of the code.\n In order to prevent this in the future, we recommend running migrations at least once every 6 weeks.\n For reference, the current code has not run migrations for {get_days_since_last_migration()} days.\n\n Run the following commands to run the historical migration and get up to date:\n With a cloud setup:\n commcare-cloud <env> fab setup_limited_release --set code_branch={required_commit}\n\n commcare-cloud <env> django-manage --release <release created by previous command> migrate_multi {app_name}\n\n commcare-cloud <env> deploy commcare\n\n With a development setup:\n git checkout {required_commit}\n ./manage.py migrate_multi {app_name}\n\n If you are sure this migration is unnecessary, you can fake the migration:\n With a cloud setup:\n commcare-cloud <env> django-manage migrate_multi --fake {app_name} {migration_name}\n\n With a development setup:\n ./manage.py migrate_multi --fake {app_name} {migration_name}\n \"\"\") # noqa: E501\n sys.exit(1)\n\n return migrations.RunPython(_run_command, reverse_code=migrations.RunPython.noop)", "def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()", "def ExportMigrations():\n\n # Import MigrationExecutor lazily. MigrationExecutor checks at\n # import time that the apps are ready, and they are not when\n # django_prometheus is imported. ExportMigrations() should be\n # called in AppConfig.ready(), which signals that all apps are\n # ready.\n from django.db.migrations.executor import MigrationExecutor\n\n if \"default\" in connections and (isinstance(connections[\"default\"], DatabaseWrapper)):\n # This is the case where DATABASES = {} in the configuration,\n # i.e. the user is not using any databases. Django \"helpfully\"\n # adds a dummy database and then throws when you try to\n # actually use it. So we don't do anything, because trying to\n # export stats would crash the app on startup.\n return\n for alias in connections.databases:\n executor = MigrationExecutor(connections[alias])\n ExportMigrationsForDatabase(alias, executor)", "def recreate_db():\n drop_db()\n create_db()\n populate_db()", "def init_db():\n import cerbereapp.models\n Base.metadata.create_all(bind=engine)", "def makemigration(self):\n template = os.path.join(os.path.dirname(__file__),\n 'migration_template.py')\n ver = self.latest(quiet=True) + 1\n destination = os.path.abspath(self.config.get('migrate', 'location'))\n if not os.path.exists(destination):\n os.makedirs(destination)\n fname = 'version_{}.py'.format(ver)\n shutil.copyfile(template, os.path.join(destination, fname))\n self.logger.info('Migration \\'{}\\' created'.format(fname))\n self.latest()", "def upgrade():\n op.execute(\n f\"\"\"\n ALTER TABLE\n {config.CLEAN_SCHEMA}.forecasts\n RENAME COLUMN\n training_horizon\n TO\n train_horizon;\n \"\"\",\n ) # noqa:WPS355", "def check_missing_migrations():\n from django.db.migrations.autodetector import MigrationAutodetector\n from django.db.migrations.loader import MigrationLoader\n from django.db.migrations.questioner import (\n NonInteractiveMigrationQuestioner as Questioner,\n )\n from django.db.migrations.state import ProjectState\n\n loader = MigrationLoader(None, ignore_no_migrations=True)\n conflicts = loader.detect_conflicts()\n if conflicts:\n raise Exception(\n \"Migration conflicts detected. Please fix your migrations.\"\n )\n questioner = Questioner(dry_run=True, specified_apps=None)\n autodetector = MigrationAutodetector(\n loader.project_state(),\n ProjectState.from_apps(apps),\n questioner,\n )\n changes = autodetector.changes(\n graph=loader.graph,\n trim_to_apps=None,\n convert_apps=None,\n migration_name=None,\n )\n if changes:\n raise Exception(\n \"Migration changes detected. \"\n \"Please update or add to the migration file as appropriate\"\n )\n print(\"Migration-checker detected no problems.\")", "def setUp(self):\n super(MigrationTestCase, self).setUp()\n\n self.executor = MigrationExecutor(connection)\n self.executor.migrate(self.migrate_from)", "def migrate(cls)->None:\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS blacklist(\n id serial PRIMARY KEY,\n token varchar\n )\"\"\")\n database.connection.commit()", "def django_db_setup(django_db_setup, django_db_blocker):\n with django_db_blocker.unblock():\n # todo Now remove the --noinput just to be sure that the test database's data will be deleted\n management.call_command('flush', '--noinput')\n zakanda.db.create_initial_data()", "def allow_syncdb(self, db, model):\n return True", "def allow_syncdb(self, db, model):\n return True", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if db == 'default':\n return True\n else:\n return False", "def _pre_setup(self, *args, **kwargs):\n get_user_model().objects.all().delete()\n super()._pre_setup(*args, **kwargs)", "def _load_migrations(self):\n self.migrations.clear()\n files = os.listdir(os.path.join(os.path.dirname(__file__), \"migrations\"))\n for file in files:\n matches = re.search(\"(?:m)(\\d+)(?:_(.+))?\\.(sql|py)\", file, re.IGNORECASE)\n if matches is None:\n continue\n self.migrations.append(Migration(*matches.groups()))", "def run_migrations_offline():\n\n # TODO: Enable postgres version 7/23/2019 # url = get_url()\n # TODO: Enable postgres version 7/23/2019 # context.configure(\n # TODO: Enable postgres version 7/23/2019 # url=url, target_metadata=target_metadata, literal_binds=True, compare_type=True\n # TODO: Enable postgres version 7/23/2019 # )\n\n url = config.get_main_option(\"sqlalchemy.url\")\n context.configure(url=url, target_metadata=target_metadata, literal_binds=True)\n\n with context.begin_transaction():\n context.run_migrations()", "def upgrade():\n\n conn = op.get_bind()\n invalid_acr = get_invalid_acrs(conn, models_names)\n\n if invalid_acr:\n invalid_acr_ids = [x.id for x in invalid_acr]\n add_to_objects_without_revisions_bulk(conn,\n invalid_acr_ids,\n acr,\n \"deleted\")\n delete_invalid_acr(conn, models_names)", "def confirm_migrate_pickle(before, after):\n assert False", "def _initial_setup(self):\n logger.info(\"Performing initial database setup...\")\n\n # Set up the migration_version table\n self._execute(\n \"\"\"\n CREATE TABLE migration_version (\n version INTEGER PRIMARY KEY\n )\n \"\"\"\n )\n\n # Initially set the migration version to 0\n self._execute(\n \"\"\"\n INSERT INTO migration_version (\n version\n ) VALUES (?)\n \"\"\",\n (0,),\n )\n\n # Set up any other necessary database tables here\n\n logger.info(\"Database setup complete\")", "def _run_migrations(self, current_migration_version: int):\n logger.debug(\"Checking for necessary database migrations...\")\n\n while current_migration_version < latest_migration_version:\n next_migration_version = current_migration_version + 1\n logger.info(\n f\"Migrating the database from v{current_migration_version} to v{next_migration_version}...\",\n )\n\n migration = importlib.import_module(f\".migrations.{str(next_migration_version).rjust(3, '0')}\", \"middleman\")\n # noinspection PyUnresolvedReferences\n migration.migrate(self)\n\n # Update the stored migration version\n self._execute(\"UPDATE migration_version SET version = ?\", (next_migration_version,))\n\n logger.info(f\"Database migrated to v{next_migration_version}\")\n current_migration_version += 1", "def run_once_off_migration(command_name, *args, required_commit=None, **kwargs):\n @skip_on_fresh_install\n def _run_command(apps, schema_editor):\n run_management_command_or_exit(command_name, required_commit, *args, **kwargs)\n\n return migrations.RunPython(_run_command, reverse_code=migrations.RunPython.noop, elidable=True)", "def migrate_up(self, version, with_data=False):\n # NOTE(xek): This is a list of migrations where we allow dropping\n # things. The rules for adding exceptions are very very specific.\n # Chances are you don't meet the critera.\n # Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE\n exceptions = [\n 64, # drop constraint\n 86, # drop watch_rule/watch_data tables\n ]\n # Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE\n\n # NOTE(xek): We start requiring things be additive in\n # liberty, so ignore all migrations before that point.\n LIBERTY_START = 63\n\n if version >= LIBERTY_START and version not in exceptions:\n banned = ['Table', 'Column']\n else:\n banned = None\n with BannedDBSchemaOperations(banned):\n super(HeatMigrationsCheckers, self).migrate_up(version, with_data)", "def _migrate_if_necessary(self, entries):\r\n entries = [\r\n self._migrate[entry.get('schema', 0)](self, entry)\r\n for entry in entries\r\n ]\r\n return entries", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n return db == self.db_name", "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "def recreate_db():\n drop_db()\n create_db()", "def run_migrations_offline():\n # pylint:disable=E1101\n context.configure(url=db_url,\n target_metadata=target_metadata)\n with context.begin_transaction():\n context.run_migrations()\n # pylint:enable=E1101", "def update_user_backward(apps, schema_editor):\n Group.objects.all().delete()", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def rebuild_db():\n delete_db()\n create_db()\n insert_db()", "def migrate(migrator, database, fake=False, **kwargs):\n\n tables = database.get_tables()\n\n if 'tea_vendors' not in tables:\n @migrator.create_model\n class TeaVendor(pw.Model):\n description = pw.CharField(max_length=255)\n link = pw.CharField(max_length=255)\n logo = pw.CharField(max_length=255, null=True)\n name = pw.CharField(max_length=255)\n twitter = pw.CharField(max_length=255, null=True)\n slug = pw.CharField(max_length=255, unique=True)\n order = pw.IntegerField(default=0)\n\n class Meta:\n db_table = \"tea_vendors\"\n\n if 'tea_teas' not in tables:\n @migrator.create_model\n class Tea(pw.Model):\n deleted = pw.DateTimeField(null=True)\n description = pw.CharField(max_length=255, null=True)\n illustration = pw.CharField(max_length=255)\n ingredients = pw.TextField(null=True)\n link = pw.CharField(max_length=255)\n long_description = pw.TextField(null=True)\n name = pw.CharField(max_length=255)\n price = pw.FloatField(null=True)\n price_unit = pw.CharField(max_length=255, null=True)\n slug = pw.CharField(max_length=255)\n tips_raw = pw.CharField(max_length=255, null=True)\n tips_duration = pw.IntegerField(null=True)\n tips_mass = pw.IntegerField(null=True)\n tips_temperature = pw.IntegerField(null=True)\n tips_volume = pw.IntegerField(null=True)\n tips_extra = pw.CharField(max_length=255, null=True)\n tips_max_brews = pw.IntegerField(default=1)\n updated = pw.DateTimeField(default=dt.datetime.now)\n vendor = pw.ForeignKeyField(db_column='vendor', rel_model=migrator.orm['tea_vendors'], to_field='id')\n vendor_internal_id = pw.CharField(db_column='vendor_id', max_length=255, null=True)\n\n class Meta:\n db_table = \"tea_teas\"\n\n if 'tea_lists' not in tables:\n @migrator.create_model\n class TeaList(pw.Model):\n name = pw.CharField(max_length=255)\n created_at = pw.DateTimeField(default=dt.datetime.now)\n share_key = pw.CharField(max_length=255, null=True, unique=True)\n cookie_key = pw.CharField(max_length=255, unique=True)\n creator_ip = pw.CharField(max_length=255)\n share_key_valid_until = pw.DateTimeField(null=True)\n\n class Meta:\n db_table = \"tea_lists\"\n\n if 'tea_lists_items' not in tables:\n @migrator.create_model\n class TeaListItem(pw.Model):\n is_empty = pw.IntegerField()\n tea_list = pw.ForeignKeyField(db_column='list_id', rel_model=migrator.orm['tea_lists'], to_field='id')\n tea = pw.ForeignKeyField(db_column='tea_id', rel_model=migrator.orm['tea_teas'], to_field='id')\n\n class Meta:\n db_table = \"tea_lists_items\"\n\n if 'tea_types' not in tables:\n @migrator.create_model\n class TeaType(pw.Model):\n name = pw.CharField(max_length=255, unique=True)\n slug = pw.CharField(max_length=255, unique=True)\n is_origin = pw.BooleanField()\n order = pw.IntegerField(null=True)\n\n class Meta:\n db_table = \"tea_types\"\n\n if 'tea_teas_types' not in tables:\n @migrator.create_model\n class TypeOfATea(pw.Model):\n tea = pw.ForeignKeyField(db_column='tea_id', rel_model=migrator.orm['tea_teas'], to_field='id')\n tea_type = pw.ForeignKeyField(db_column='type_id', rel_model=migrator.orm['tea_types'], to_field='id')\n\n class Meta:\n db_table = \"tea_teas_types\"\n\n primary_key = pw.CompositeKey('tea', 'tea_type')", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True" ]
[ "0.7362598", "0.7175394", "0.6962745", "0.67925906", "0.66853064", "0.665665", "0.66325575", "0.65849835", "0.6555793", "0.65435", "0.6471299", "0.6459678", "0.6422288", "0.6384289", "0.63629246", "0.6273245", "0.615992", "0.61169046", "0.6091286", "0.60912824", "0.60731256", "0.6034795", "0.6017065", "0.6007843", "0.5996848", "0.5968128", "0.59622544", "0.5950757", "0.59288013", "0.59194374", "0.59129155", "0.5911925", "0.5906348", "0.5880502", "0.58773863", "0.5866136", "0.5820015", "0.581397", "0.5791617", "0.5784021", "0.5784021", "0.57644975", "0.57629895", "0.57177883", "0.5702521", "0.5702188", "0.568852", "0.5653638", "0.56517977", "0.56501245", "0.5600406", "0.559397", "0.558977", "0.55845165", "0.5575821", "0.5573521", "0.55717826", "0.55703235", "0.5535298", "0.5500599", "0.54974455", "0.5496825", "0.5461394", "0.5454643", "0.54264605", "0.54263204", "0.5423727", "0.54121697", "0.5407888", "0.5392694", "0.53922385", "0.5377959", "0.5367431", "0.5360756", "0.5353537", "0.5349805", "0.534639", "0.5345183", "0.5344695", "0.5344695", "0.5323069", "0.5303038", "0.5302953", "0.52792364", "0.52786", "0.5278075", "0.5270721", "0.5269795", "0.52619463", "0.52498704", "0.5242071", "0.5225159", "0.52249557", "0.5220851", "0.5219436", "0.5219333", "0.5208306", "0.520596", "0.51956743", "0.51913226" ]
0.7330435
1
Takes an image and assigns a speed from 01 based upon it.
def process(img): global start frame = cv2.GaussianBlur(img, (21, 21), 0) fgmask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) fgmask = cv2.absdiff(start, fgmask) avg = max(np.average(fgmask), 10) fgmask = cv2.dilate(fgmask, None, iterations=2) ret, fgmask = cv2.threshold(fgmask, avg, 255, cv2.THRESH_BINARY) image, contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) bigContours = [] for contour in contours: if cv2.contourArea(contour) >= 3000: bigContours.append(contour) ax = 0 ay = 0 for contour in bigContours: moments = cv2.moments(contour) cx = int(moments['m10']/moments['m00']) cy = int(moments['m01']/moments['m00']) ax += cx ay += cy if not bigContours: speed = 0 else: ax /= len(bigContours) ay /= len(bigContours) my, mx, channels = img.shape my /= 2 mx /= 2 dist = math.sqrt((ax - mx)**2 + (ay - my)**2) speed = max(min((mx - dist) / my, 1), 0.1) if speed > 0.8: speed = 1 return speed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speed(self, speed: int, time: int = 0, /) -> None:", "def set_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n set_left_speed(speed)\n #time.sleep(.1)\n set_right_speed(speed)", "def set_speed():\n pass", "def speed(self, value: int, /) -> None:", "def set_speed(self,speed):\n self.speed = speed", "def set_speed(self,speed):\n self.speed_p = speed", "def setSpeedEngine1(speed: int):\n pass", "def set_speed(self, speed):\n self.speed = speed", "def speed(self, s=0):", "def increment_speed(self):\n self.speed += 0.0004", "def speed(self) -> int:", "def speed(self) -> int:", "def setSpeedEngine2(speed: int):\n pass", "def set_speed(self, speed: str) -> None:\n self.wink.set_state(True, speed)", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def update(self, speed_multiplier):\n self.move_amount = resolve_velocity(self.direction, self.speed/speed_multiplier)\n self.position[0] += self.move_amount[0]\n self.position[1] -= self.move_amount[1]\n self.image_counter += 1\n self.image = self.images[int(self.image_counter/10)]\n if self.image_counter == 29:\n self.image_counter = 0\n self.rect.center = self.position", "def set_speed(self, speed):\r\n speed = float(speed)\r\n speed = int(round(speed * 27.7778))\r\n return self.send_command('speed %s' % speed)", "def speed(self, value: float):\n self._speed = value", "def setSpeedEngine4(speed: int):\n pass", "def speed(self, speed=None):\n speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }\n if speed is None:\n return self._speed\n if speed in speeds:\n speed = speeds[speed]\n elif 0.5 < speed < 10.5:\n speed = int(round(speed))\n else:\n speed = 0\n self.pen(speed=speed)", "def set_speed(self, ratio):\n self._speed = ratio", "def speed(self, speed):\n self._speed = speed\n self._rotspeed = speed", "def movespeed(self, speed):\n self._speed = speed", "def set_speed(self, speed):\n self.device.set_speed(speed)\n return \"OK\"", "def change_speed(self, action):\r\n if action == \"faster\":\r\n self.speed += 1\r\n else:\r\n if self.speed > 1:\r\n self.speed -= 1", "def set_speed(self, new_speed):\n self.__x_speed, self.__y_speed = new_speed", "def map_scroll_speed(speed):\n speed_map = (0b011, 0b010, 0b001, 0b110, 0b000, 0b101, 0b100, 0b111)\n return speed_map[speed]", "def _speed_action(i, speed, mi):\n result = mi\n # Need to avoid speeds of 1, -1 resulting in zero movement\n if i % (abs(speed) + 1) != 0:\n if speed > 0:\n result = (mi + 1) % NUM_STEPPER_STATES\n elif speed < 0:\n result = (mi - 1) % NUM_STEPPER_STATES\n\n return result", "def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)", "def set_speed(self, level):\n speed = self.SPEED + (self.SPEED_INCREMENT * level)\n\n if self.lane % 2:\n # Move to the right\n self.velocity = (speed, 0)\n else:\n # Move to the left\n self.velocity = (-speed, 0)", "def update(self, image):\n indices = np.where(image[50:, :, 0] == 236)\n if len(indices[0]) > 0:\n x = (np.amin(indices[1]) + np.amax(indices[1])) // 2\n y = 50 + (np.amin(indices[0]) + np.amax(indices[0])) // 2\n self.velocity = (x - self.location[0], y - self.location[1])\n self.location = (x, y)", "def increase_car_speed(self):\r\n self.car_speed += 5", "def set_animation_speed(self, speed):\n self.m_animation_speed = self.calculate_animation_speed(speed)", "def set_speed(self, speed):\n assert isinstance(speed, float), \"Must be a float\"\n \n if speed < 0.0:\n raise ValueError(\"Negative speeds not supported\")\n \n self.speed = speed", "def set_left_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n return write_i2c_block(ADDRESS,set_left_speed_cmd+[speed,0,0])", "def set_left_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n return write_i2c_block(ADDRESS,set_left_speed_cmd+[speed,0,0])", "def __init__(self, image, scale):\n\n # Call the parent init\n super().__init__(image, scale)\n\n # Create a variable to hold our speed. 'angle' is created by the parent\n # The put vehicle to init position\n self.speed = 0\n self.max_speed = 5\n self.respawning = 0\n\n # Mark that we are respawning.\n self.respawn()", "def set_right_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n return write_i2c_block(ADDRESS,set_right_speed_cmd+[speed,0,0])", "def set_right_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n return write_i2c_block(ADDRESS,set_right_speed_cmd+[speed,0,0])", "def setspeed(speed):\n if speed is None:\n click.echo(\"speed value is required\")\n raise click.Abort()\n\n for fan in range(_wrapper_get_num_fans()):\n status = _wrapper_set_fan_speed(fan, speed)\n if not status:\n click.echo(\"Failed\")\n sys.exit(1)\n\n click.echo(\"Successful\")", "def set_speed(self, speed_mps):\n speed_cmd = CommandLongRequest()\n speed_cmd.command = 178\n speed_cmd.param1 = 1\n speed_cmd.param2 = speed_mps\n speed_cmd.param3 = -1\n speed_cmd.param4 = 0\n\n rospy.loginfo(\n CBLUE2 + \"Setting speed to {}m/s\".format(str(speed_mps)) + CEND)\n response = self.command_client(speed_cmd)\n\n if response.success:\n rospy.loginfo(\n CGREEN2 + \"Speed set successfully with code {}\".format(str(response.success)) + CEND)\n rospy.loginfo(\n CGREEN2 + \"Change Speed result was {}\".format(str(response.result)) + CEND)\n return 0\n else:\n rospy.logerr(\n CRED2 + \"Speed set failed with code {}\".format(str(response.success)) + CEND)\n rospy.logerr(\n CRED2 + \"Speed set result was {}\".format(str(response.result)) + CEND)\n return -1", "def __init__(self, img, width, height, animations=None, frame=0, speed=0.125, start_animation=E_ANIM):\n super().__init__(img, 0, 0, width, height)\n self.img = img\n\n self.current_animation = start_animation\n self.frame = frame\n self.speed = speed\n self.timer = 0\n self.direction = (0,1)\n\n if animations:\n self.anims = animations\n else:\n self.anims = { E_ANIM: (0,1) }", "def setSpeedEngine3(speed: int):\n pass", "def move_set_speed(self, speed):\n # self.motor_set_speed(MOTOR_LEFT, speed)\n # self.motor_set_speed(MOTOR_RIGHT, speed)\n self.move_speed = speed\n print(\"move_speed is now:\", self.move_speed)", "def set_speed(self, speed=0):\n speed = clamp(speed)\n self._state.speed = speed\n self.send_command(Command.SET_SPEED, [int(speed)])", "def set_speed(self, speed, motor):\n self.driver.set_speed(speed, motor)\n self.last_control = time.time()", "def set_speed(self, speed):\n return self.bot_client.send_command(_Command.SetSpeed, speed)", "def on_speed_change(self, event) -> None:\r\n\r\n speed_level = int(self.speed_scale.get())\r\n self.animator.time_per_gen = self.TIMES_PER_GEN[speed_level]", "def set_speed(self, speed):\n self._set_sub_text('speed', text=str(speed))\n return self", "def set_speeds(self, speed_1, speed_2):\n pass", "def __init__(self, angle, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"Image/Trafico.png\")\n self.rect = self.image.get_rect()\n self.image_orig = self.image\n self.speed = 2\n self.direction = angle\n self.steering = 90\n self.x = x\n self.y = y", "def set_motor_speed(self, speed=0.0):\r\n self.target_speed = speed", "def increase_speed(self, character):\n character.speed = min(character.max_steps/4, character.speed * 1.25)", "def set_speed(self, speed, ports='ABCD'):\n\n speed += self.avg_speed\n if self.inverted:\n speed = -speed\n\n if speed > self.margin:\n speed = self.margin\n elif speed < -self.margin:\n speed = self.margin\n\n for p in ports:\n if self.motors[p].connected:\n self.motors[p].run_forever(speed_sp=speed, speed_regulation=True)\n else:\n print(\"Cant run motor on\", p, \"- not connected\")", "def scenario1(height, speed):\n time = math.sqrt((2 * height) / 9.81)\n result = speed * time\n return result", "def increase_speed(self):\n self.covid_horizontal_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.hero_speed_factor *= self.speedup_scale", "def setMotorSpeed(self,motorID,speed):\n speed = max(min(speed,1.0),-1.0) #range limit\n direction = speed < 0 # set reverse direction bit if speed less than 0\n bit8speed = self.params[1] & 1 #first bit of paramter 1 can be used to determin if its in 8 bit speed mode\n speedMultiplyer = 127 # speed is between 0-127 for 7bit speed mode\n if bit8speed:\n speedMultiplyer = 255 #speed is between 0-255 for 8bit speed mode\n speedByte = int(abs(speed)*speedMultiplyer)# covert floating speed to scaled byte\n \n cmd = speedByte >= 128 # bit 0 of command is used for 8th bit of speedbyte as speedbyte can only use 7 bits\n \n speedByte &= 127 #clear the 8th bit of the speedbyte as it can only use 7 bits\n \n cmd |= direction << 1 #shift direction into bit 1\n cmd |= motorID << 2 #shift motor id into bit 2\n cmd |= 1 << 3 # just set bit 3\n\n #send the speed command\n self.driver.sendReceive([0xaa,self.id,cmd,speedByte],0)", "def set_speed(self, speed, video_display_name=None):\r\n # mouse over to video speed button\r\n speed_menu_selector = self.get_element_selector(video_display_name, VIDEO_BUTTONS['speed'])\r\n element_to_hover_over = self.q(css=speed_menu_selector).results[0]\r\n hover = ActionChains(self.browser).move_to_element(element_to_hover_over)\r\n hover.perform()\r\n\r\n speed_selector = self.get_element_selector(video_display_name, 'li[data-speed=\"{speed}\"] a'.format(speed=speed))\r\n self.q(css=speed_selector).first.click()", "def increase_speed(self):\n self.target_speed *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale", "def update(self):\n if self.angle < 10:\n self.image = self.sprites[0]\n elif self.angle > 10 and self.angle < 20:\n self.image = self.sprites[1]\n elif self.angle > 20 and self.angle < 30:\n self.image = self.sprites[2]\n elif self.angle > 30 and self.angle < 40:\n self.image = self.sprites[3]\n elif self.angle > 40 and self.angle < 50:\n self.image = self.sprites[4]\n elif self.angle > 50 and self.angle < 60:\n self.image = self.sprites[5]\n elif self.angle > 60:\n self.image = self.sprites[6]", "def match_car_speed(self, fast_car, slow_car):\n fast_car.current_speed = slow_car.current_speed", "def step(self):\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass", "def move(self):\r\n\r\n # Randomizes movement after 40 steps and flips sprite \\\r\n # (if x-value of speed variable changes from positive to negative)\r\n if step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 40 steps, but doesn't flip sprite because \\\r\n # x-value of speed variable doesn't change from positive to negative\r\n elif step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Randomizes movement after 80 steps and flips sprite \\\r\n # (if x-value of speed variable changes from negative to positive)\r\n if step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 80 steps, but doesn't flip sprite \\\r\n # because x-value of speed variable doesn't change from positive to negative\r\n elif step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Flips the dino sprite when it hits the left or right side of the enclosure \\\r\n # and reverses dino's speed\r\n if self.rect.right > 818 or self.rect.left < 182:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[0] = - self.speed[0]\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Reverses the dino's speed if it hits the top or bottom side of the enclosure\r\n if self.rect.top < 55 or self.rect.bottom > 542:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[1] = - self.speed[1]\r\n\r\n # Causes dinosaur to go to the tree when hunger is high enough\r\n if hunger >= 205:\r\n if step != 40 and step != 80 and 0 < thirst < 175:\r\n if self.rect.left > 300 and self.speed[0] not in range(-1000, 0):\r\n # Speed must be rounded so that speed[0] and speed[1] is in the range functions above \\\r\n # (range function doesn't take decimal point numbers)\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 300 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n if self.rect.left < 300 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 300 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n\r\n # Causes dinosaur to go to the pond when thirst is high enough\r\n if thirst == 175:\r\n if step != 40 and step != 80:\r\n if self.rect.left > 540 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 540 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n if self.rect.left < 540 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 540 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n\r\n # Sets rectangle surrounding dino sprite to new position based on its speed\r\n newpos = self.rect.move(self.speed)\r\n self.rect = newpos", "def __init__(self, initial_x:int, initial_y:int, width:int, height:int, power_type:str, time_to_live:int, debug:bool = False):\n\n #Call the superclass contructor\n super().__init__(initial_x, initial_y, width, height, PowerUp.sprites[power_type], debug)\n\n #Store variables\n self.power_type = power_type\n self.ttl = time_to_live\n\n #Scale the image\n self.scale(30,30)", "def calculate_animation_speed(self, speed):\n speed = float(speed)\n\n self.m_scl_pause = True if speed == 0 else False\n\n calc_speed = int(-1715 * pow(abs(speed), 3) + 4121 * pow(abs(speed), 2) - 3735 * abs(speed) + 1332)\n\n return calc_speed if speed >= 0 else -calc_speed", "def __init__(self, img_path: str, x: float) -> None:\r\n Attacker.__init__(self, img_path, x)\r\n self._dir = 1\r\n self._speed *= self._dir\r\n self._y = game_values.SCREEN_H / 4 + 50", "def increase_speed(self):\n self.ship_speed*=self.speedup_scale\n self.bullet_speed*=self.speedup_scale\n self.alien_speed*=self.speedup_scale\n self.alien_points=int(self.alien_points*self.score_scale)\n print(self.alien_points)", "def __init__(self, img_path: str, x: float) -> None:\r\n Attacker.__init__(self, img_path, x)\r\n self._dir = -1\r\n self._speed *= self._dir\r\n self._y = 3 * game_values.SCREEN_H / 4 + 50", "def speed(n):\n turtleTmp.speed(max(1, min(n, 10)))", "def __init__(self, speed, get_current_time):\r\n self.get_current_time = get_current_time\r\n self.speed = speed", "def set_speed(self, speed: str) -> None:\n if speed == SPEED_HIGH:\n self._bond.setSpeed(self._deviceId, self._speed_high)\n elif speed == SPEED_MEDIUM:\n self._bond.setSpeed(self._deviceId, self._speed_medium)\n elif speed == SPEED_LOW:\n self._bond.setSpeed(self._deviceId, self._speed_low)\n self._attributes['current_speed'] = speed", "def increase_speed(self):\n self.ship_speed_factor *= self.speed_up_scale\n self.bullet_speed_factor *= self.speed_up_scale\n self.alien_speed_factor *= self.speed_up_scale", "def set_speed(self, speed=None, auto=False, adaptive=False):\n if speed is None:\n speed = 0\n elif not util.is_natural(speed):\n raise TypeError('Expected positive number for speed, given %s.' % speed)\n elif speed > self.MAX_JTAG_SPEED:\n raise ValueError('Given speed exceeds max speed of %d.' % self.MAX_JTAG_SPEED)\n elif speed < self.MIN_JTAG_SPEED:\n raise ValueError('Given speed is too slow. Minimum is %d.' % self.MIN_JTAG_SPEED)\n\n if auto:\n speed = speed | self.AUTO_JTAG_SPEED\n\n if adaptive:\n speed = speed | self.ADAPTIVE_JTAG_SPEED\n\n self._dll.JLINKARM_SetSpeed(speed)\n\n return None", "def speed(self):\n return 1 # speed system not implemented yet", "def adjustSpeed(self, speed):\n\t\tif self.timeout <= 0:\n\t\t\tself.speed = max(self.minimumSpeed, min(self.maximumSpeed, self.speed + speed))", "def set_speed(self, axis, speed):\n #log.info(f\"set speed {axis} {speed}\")\n self.cmd_axis_speed[axis] = speed", "def __init__(self, *args, **kwargs):\n super().__init__(**kwargs)\n self.speed = kwargs.get('speed', 5)\n\n # Loading image file\n self.ball_image = image.load(config.resources_path + 'ball.png')\n self.width = self.ball_image.width\n self.height = self.ball_image.height\n self.ball_sprite = sprite.Sprite(self.ball_image, self.x, self.y)\n\n self.ball_image.rotation = randint(0, 360) # Rotates the sprite\n self.ball_image.scale = uniform(0.5, 2)\n\n self.x_direction = 1 # 1 for + axis direction\n self.y_direction = 1", "def assign_cycle(fpc, frames):\n return np.int_(np.floor((frames - 1) / float(fpc)))", "def interfacespeed(self, interfacespeed):\n\n self._interfacespeed = interfacespeed", "def set_speed(self, SHIP_MOVEMENT):\n self._speed = SHIP_MOVEMENT", "def change_port_speed(self, instance_id, public, speed):\r\n if public:\r\n func = self.guest.setPublicNetworkInterfaceSpeed\r\n else:\r\n func = self.guest.setPrivateNetworkInterfaceSpeed\r\n\r\n return func(speed, id=instance_id)", "def set_accel(self, accel):\n \"\"\" Accel is pixel per second second \"\"\"\n self.accel = accel", "def speed(self):\n self.convert_window(\"Speed\", \"meters/second\", [\"Mach number\", \"Nm/24hr\", \"centimeters/minute\", \"centimeters/second\", \"feet/hour\", \"feet/minute\", \"feet/second\", \"inches/minute\", \"inches/second\", \"kilometers/hour\", \"kilometers/second\", \"knots\", \"meters/hour\", \"meters/minute\", \"meters/second\", \"miles/hour\", \"miles/minute\", \"miles/second\", \"nautical miles/hour\", \"speed of light\", \"speed of sound\", \"yards/hour\", \"yards/minute\", \"yards/second\"])", "def change_speed(self, speed):\n\n self.__corrds[self.X_SPEED], self.__corrds[self.Y_SPEED] = speed\n if type(speed) != tuple:\n raise ValueError('speed must be a tuple of the form (sp_x, sp_y)')", "def setMotorSpeed(self, idMotor=0, sense=0, speed=0, board=0):\n msg = [idMotor, sense, int(speed / 256.0), speed % 256]\n return self.callModule('motors', board, 0, 'setvelmtr', msg)", "def _update_speed(self, speed):\n if speed is None:\n return\n if speed == self._current_speed:\n return\n\n self._current_speed = speed\n self._update_speed_attributes()\n LOG.info(\n f\"Updated LUNOS {self._name}: {self.percentage}% {self._current_speed}\"\n )", "def animation(self, freq=100):\n if (self.current_time - self.timer) > freq:\n if self.index < (len(self.image_list) - 1):\n self.index += 1\n else:\n self.index = 0\n self.timer = self.current_time\n self.image = self.image_list[self.index]", "def getAnimation(image_prefix, char_scale=False):\n \n fullname_prefix = os.path.join('..\\\\','sprites')\n fullname_prefix = os.path.join(fullname_prefix, image_prefix)\n \n #Make sure at least the first frame is there\n try:\n image = pygame.image.load(fullname_prefix + '0000.png')\n except pygame.error, message:\n print 'Cannot load image:', image_prefix + \\\n '0000.png not found in images folder'\n raise SystemExit, message\n \n #Loop through the frames\n result = []\n index = 0\n while True:\n suffix = \"%04d\" % index\n try:\n image = pygame.image.load(fullname_prefix + suffix + '.png')\n image = image.convert()\n if char_scale:\n image = pygame.transform.scale2x(image)\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n result.append(image)\n except pygame.error, message:\n #Done loading frames, break\n break\n index += 1\n return result", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)", "def set_speed(self, v):\n self.v = v", "def increase_speed(self):\n self.ship_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.alien_speed_factor *= self.speedup_scale\n self.alien_points = int(self.alien_points * self.score_scale)", "def __init__(self, game, brick, png_prefix, speed=DEFAULT_FALL_SPEED):\n super().__init__()\n self.game = game\n self._speed = speed\n self._animation = itertools.cycle(\n image for image, _ in load_png_sequence(png_prefix))\n self._animation_start = 0\n\n self.image = None\n # Position the powerup by the position of the brick which contained it.\n self.rect = pygame.Rect(brick.rect.bottomleft,\n (brick.rect.width, brick.rect.height))\n\n # The area within which the powerup falls.\n screen = pygame.display.get_surface()\n self._area = screen.get_rect()\n\n # Visibility toggle.\n self.visible = True", "def speed_setting_season(self):\n if self.season == \"spring\":\n self.grid.speed_values[self.path_color] = 4\n self.grid.speed_values[(0, 0, 255)] = 0.1\n elif self.season == \"winter\":\n self.grid.speed_values[self.path_color] = 3\n self.grid.speed_values[(0, 0, 255)] = 0.1\n elif self.season == \"fall\":\n self.grid.speed_values[self.path_color] = 6\n elif self.season == \"summer\":\n pass", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def __init__(self, image_to_load, coo_x, rang, coo_y=432):\n\n super().__init__()\n self.image = pg.image.load(image_to_load)\n self.image = pg.transform.scale(self.image, (80, 80))\n self.rect = self.image.get_rect()\n self.rect.x = coo_x\n self.rect.y = coo_y\n self.last_direction = 1\n self.counter = 0\n self.range = rang\n\n if self.range % 2 == 0:\n self.half = self.range / 2 - 1\n else:\n self.half = self.range / 2", "def set_cmd_velocity(self, speed):\n self.gripper_io.set_signal_value(\"speed_mps\", speed)", "def __init__(self, x, y):\n # Call the parent class (Sprite) constructor\n super().__init__()\n self.image = pygame.image.load(\"ship1.png\")\n self.health = 5\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n \n # -- Attributes\n # Set speed vector\n self.change_x = 0\n self.change_y = 0", "def _convert_speed(self, value):\n if value & SPEED_ACTIVE:\n return None\n else:\n return value & SPEED_MASK", "def set_speed(self, speed):\n # create the MAV_CMD_DO_CHANGE_SPEED command\n msg = self.message_factory.command_long_encode(0, 0,mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED,0,0,speed,0, 0, 0, 0, 0)\n\n # send command to vehicle\n self.send_mavlink(msg)\n self.flush()" ]
[ "0.6555752", "0.6486258", "0.63528025", "0.63336885", "0.6330112", "0.6309524", "0.6249848", "0.6223644", "0.61646354", "0.615034", "0.6127424", "0.6127424", "0.60513264", "0.60458", "0.6031286", "0.6002849", "0.5983297", "0.5926888", "0.5913099", "0.5911406", "0.5908372", "0.5901325", "0.58828104", "0.5882252", "0.5868959", "0.5817392", "0.57884955", "0.5768", "0.57636446", "0.5714049", "0.5701848", "0.56977695", "0.5690077", "0.5687972", "0.56815946", "0.5674802", "0.5674802", "0.5659328", "0.5641782", "0.5641782", "0.56276995", "0.5626314", "0.56235373", "0.5594621", "0.5580908", "0.5565523", "0.55572814", "0.552991", "0.55297154", "0.5516831", "0.5485057", "0.54784876", "0.5474562", "0.5473372", "0.5463202", "0.5455356", "0.54548705", "0.5452227", "0.5450132", "0.5445607", "0.5439415", "0.5430988", "0.5419589", "0.53986883", "0.5397586", "0.53916305", "0.5389309", "0.5370527", "0.5370523", "0.5367615", "0.5367111", "0.53612596", "0.5355692", "0.5353824", "0.5352087", "0.53460413", "0.53426355", "0.5341718", "0.5336625", "0.53271216", "0.53196377", "0.5312804", "0.5312581", "0.5308067", "0.5304934", "0.5300747", "0.52994585", "0.5296875", "0.52575666", "0.52547073", "0.5251945", "0.5247346", "0.52459455", "0.5241939", "0.5234378", "0.5234378", "0.5233225", "0.5231938", "0.5231446", "0.52212965", "0.52163804" ]
0.0
-1
Ready handler. Import signals.
def ready(self): import roles.signals # pylint: disable=unused-import
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ready(self):\n import main.signals # noqa", "def ready(self):\n\n from . import signals # noqa", "def ready(self):\n import exams.signals # pylint: disable=unused-import", "def ready(self):\n logger.info('game.ready')\n import game.signals", "def ready(self):\n # export app settings\n self.export_settings()\n # import the submodule with signal handlers\n self.load_module('signals')", "def ready(self):\n pass", "def ready(self):\n import checkout.signals # noqa: F401", "def trigger_ready(self) -> None:\n self.trigger_signal(\"ready\")", "def ready(self):\n\n import shoppingList.apps.shoppingItems.signals # noqa", "def ready(cls):\n pass", "async def on_ready(self) -> None:", "def signal_ready(self):\n self.logger.debug(\"service is ready\")", "def ready(self):\n from django_sites_extensions import models\n from django_sites_extensions import signals", "def prepare(self):\n return HandlerReady()", "def _ready(cls):\n sync_call(cls.ready)", "def slot_owns_initialized(self, _sender, _data):\r\n self.check_connect_ready()", "def ready(self):\n return True", "def ready(self):\n # self.init_es_templete()\n self.init_uwsgi_log()", "def serverExplicitReady (self):\n self.server_ready.set()", "def if_ready(self, **kwargs):\n return True", "def on_ready(_unused, mesosite):\n LOG.info(\"on_ready() has fired...\")\n mesosite.close()\n bridge(process_data, isbinary=True)", "async def on_ready(self) -> None:\n LOGGER.info(f\"Connected as {self.user}\")\n await self.change_presence(\n activity=Activity(type=ActivityType.playing, name=\"in the waves\"),\n )\n self._setup_triggers()\n\n for trigger in self.triggers:\n LOGGER.info(f\"Added trigger: {trigger}\")", "def is_ready(self) -> bool:\n pass", "def signal_ready(self):\n self._container.kill(signal.SIGUSR1)", "def on_window_ready(self):\n pass", "async def async_trigger_ready(self) -> None:\n await self.async_trigger_signal(\"ready\")", "def is_ready() -> bool:\n return True", "def waitonready(self):\n debug('ControllerStartup.waitonready()')\n waitonready(self.pidevice, **self._kwargs)", "def tellReady(self):\n self.sender.send(self.sender.createPipelineReadyEvent(self.name))", "async def on_ready(self):\n print('READY 2 FITE ABOOS!!')", "def do_ready(self) -> bool:\n logger.info('Device ' + self.name + ' is ready.')\n return False", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def on_start(self):\n pass", "def ready(self):\n self.stdout.write('READY\\n')\n self.stdout.flush()", "def slot_client_connected(self, _sender, _data):\r\n self.check_connect_ready()", "def signalSetup(self):\n self.ui.b_info.clicked.connect(self.showInfo)\n self.ui.b_save.clicked.connect(self.openSave)\n self.ui.b_vid.clicked.connect(self.openVideo)\n self.ui.b_run.clicked.connect(self.startRun)\n self.ui.b_colour.clicked.connect(self.pickColour)\n self.ui.b_ground_truth.clicked.connect(self.openGroundTruth)\n\n self.ui.t_fps.textChanged.connect(self.changeFps)\n self.ui.t_low.editingFinished.connect(self.changeLow)\n self.ui.t_high.editingFinished.connect(self.changeHigh)\n self.ui.c_error_plot.stateChanged.connect(self.checkFiles)\n self.ui.c_speed_plot.stateChanged.connect(self.checkFiles)\n self.ui.c_crash_plot.stateChanged.connect(self.checkFiles)\n self.ui.combo_superpixel.currentIndexChanged.connect(\n self.changeSuperPixelMethod\n )\n self.ui.c_optimize.stateChanged.connect(self.checkFiles)\n self.ui.c_draw.stateChanged.connect(self.checkFiles)\n self.ui.c_velocity.stateChanged.connect(self.checkFiles)\n self.ui.c_object_detection.stateChanged.connect(self.checkFiles)", "def ready(self):\n post_migrate.connect(create_default_options)", "def ready(self):\n from chef_profile import handlers", "async def on_ready(self, payload: EventReadyPayload):\n log.info('ready event <%s>', payload)\n # 1. get all of friends\n friends: List[Contact] = await self.Contact.find_all()\n for friend in friends:\n log.info('load friend<%s>', friend)\n\n # 2. get all of rooms\n rooms: List[Room] = await self.Room.find_all()\n for room in rooms:\n log.info('load room<%s>', room)", "def is_ready(cls):\n\n return False", "def _onconnect(self):\n\n pass", "def ready(self):\n self.update({self.STATE: self.STATE_READY})", "def connectionMade(self):\n self.factory._r_on_connection_established(self)", "def on_startup(self) -> None:\n ...", "def on_start(self):\n ProxyServerHandler.current.handler_ready(self)", "def setup(self):\n\t\tif self.hasSignalModule and not self.signalsRegistered:\n\t\t\t# Jython does not support all signals, so we only use\n\t\t\t# the available ones\n\t\t\tsignals = ['SIGINT', 'SIGHUP', 'SIGABRT', 'SIGQUIT', 'SIGTERM']\n\t\t\timport signal\n\t\t\tfor sig in signals:\n\t\t\t\ttry:\n\t\t\t\t\tsignal.signal(getattr(signal, sig), self._shutdown)\n\t\t\t\t\tself.signalsRegistered.append(sig)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tLogger.Err(\"[!] monitoring.Signals._registerSignals:%s %s\\n\" % (sig, e))", "async def on_ready(self):\n if not hasattr(self.bot, 'uptime'):\n self.bot.uptime = datetime.utcnow()\n\n # Check if user desires to have something other than online\n status = config.STATUS_TYPE.lower()\n status_type = {\"idle\": discord.Status.idle, \"dnd\": discord.Status.dnd}\n\n # Check if user desires to have a different type of activity\n activity = config.ACTIVITY_TYPE.lower()\n activity_type = {\"listening\": 2, \"watching\": 3, \"competing\": 5}\n\n await self.bot.change_presence(\n activity=discord.Game(type=activity_type.get(activity, 0), name=config.ACTIVITY),\n status=status_type.get(status, discord.Status.online)\n )\n\n # Indicate that the bot has successfully booted up\n print(f'Ready: {self.bot.user} | Servers: {len(self.bot.guilds)}')", "async def on_ready():\n print(f'{bot.user} has connected!')\n try:\n await pull_prev_info()\n except Exception as e:\n print(\"Error in starting function with pulling previous information:\")\n print(e)\n\n try:\n await update_tournament_list()\n except Exception as e:\n print(\"Error in starting function with updating tournament list:\")\n print(e)\n\n try:\n refresh_sheet.start()\n except Exception as e:\n print(\"Error in starting function with updating tournament list:\")\n print(e)\n\n post_something.start()\n cron.start()\n go_stylist.start()\n manage_welcome.start()\n store_variables.start()\n change_bot_status.start()\n update_member_count.start()", "def ready(self) -> None:\n self._ready.set()\n self._go.wait()\n self._go.clear()\n self._context.error = None", "def _connect_signals(self):\r\n\r\n\t\t# Connecting socket signals\r\n\t\tself.__tcpSocket.error.connect(self.__display_error)\r\n\t\tself.__tcpSocket.readyRead.connect(self.__read_message)\r\n\r\n\t\t# Connecting the socket when requested\r\n\t\tself._window.set_connection_infos.connect(lambda x: self.connect_to_server(*x))\r\n\r\n\t\t# Connecting the bar names request\r\n\t\tself._window.get_bar_names.connect(lambda: self.encode_message(action=\"LA\"))\r\n\r\n\t\t# Setting the bar name when requested\r\n\t\tself._window.set_bar_name.connect(self.__set_name)\r\n\r\n\t\t# Connecting the preferences\r\n\t\tself._window.ask_preferences.connect(\r\n\t\t\tlambda: self.fill_preferences.emit((self.__tcpSocket.peerAddress().toString(), self.__tcpSocket.peerPort()),\r\n\t\t\t\t\t\t\t\t\t\t\t self.__name))\r\n\r\n\t\t# Connecting the send message signal\r\n\t\tself._window.send_message.connect(lambda x: self.encode_message(action=\"ME\", message=x))\r\n\r\n\t\t# Connecting for the connection dialog\r\n\t\tself._window.request_connection_infos.connect(\r\n\t\t\tlambda: self.open_connection_dialog.emit([[self.__tcpSocket.peerAddress().toString(),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t self.__tcpSocket.state() == QAbstractSocket.ConnectedState]]))", "def notifyReady(self, function, **kwargs):\n self._sig_ready.subscribe(function, **kwargs)", "def ready(self, component):\n\n self.fire(connect(self.host, self.port), 'ircbot')", "def isReady(self):\n\t\twhile self.osc.trigger_state() != \"save\":\n\t\t\ttime.sleep(.1)\n\t\treturn True", "def connectionMade(self):", "def ready(self):\n for model in self.get_models():\n # register model-level signals\n pre_save.connect(receivers.presave, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_presave\")\n pre_delete.connect(receivers.predelete, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_predel\")\n post_save.connect(receivers.postsave, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_postsave\")\n post_delete.connect(receivers.postdelete, sender=model, weak=False, dispatch_uid=f\"{model.__name__}_postdel\")\n\n # register many to many fields of model - EXPERIMENTAL\n m2m_field_names = []\n for m2m in model._meta.many_to_many:\n m2m_field = getattr(model, m2m.name)\n m2m_changed.connect(receivers.m2mchanged, sender=m2m_field.through, weak=False,\n dispatch_uid=f\"{model.__name__}_{m2m.name}\")", "async def on_connect(self):\n pass", "def _on_engine_ready(self):\n logger.debug('_on_engine_ready')\n\n self._status = self.WindowStatus.READY\n\n # Call custom callback\n self.on_window_ready()", "def post_start(self):", "def on_start(self):", "def on_start(self):", "def after_connect(self):\n pass", "async def on_ready(self):\n await self.wait_until_ready()\n self._guild = self.get_guild(self._guild_ID)\n\n LOGGER.info('Ready!')", "def Network_ready(self, data):\n self.ready = data['state']\n self._server.checkReady()", "def on_start(self):\n self.init()", "def _on_connection_success(self):\n if self.connect_handler:\n self.connect_handler()", "def external_input_ready(self):\n return True", "def connectionInitialized(self):\n log.msg('Connection Initialized')\n self.send(AvailablePresence())\n self.xmlstream.addObserver(\"/iq[@type='result']\", self.handleRequest)\n self.xmlstream.addObserver(\"/message\", self._onMessage)", "def handle_connect(self):\n pass", "def registration_started(self):\n pass", "def connected(self):\n d = self.configure()\n d.addCallback(self.startDiscovery)\n d.addErrback(self.reportError)", "def wait_until_ready(self):\n while not self.is_ready():\n time.sleep(0.01)", "def ready(self, component):\n self.fire(connect(self.host, self.port))", "def window_ready(self):\n raise NotImplementedError", "def connectionMade (self) :\r\n self.state = 'wait_hello'\r\n self.handshake_timeout = reactor.callLater(HANDSHAKE_TIMEOUT, self.err, \"handshake timeout expired\")\r\n self.log(\"connected\")", "def plugin_loaded():\n events.broadcast(\"plugin_loaded\")", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def on_load(self):\n pass", "def on_load(self):\n pass", "async def startup(self):", "async def startup(self):", "def is_ready(cls):\n\n return SUB_NOTIFY_READY", "async def on_connect(self) -> None:", "def on_pre_enter(self):\n self.setup()\n self.start()", "async def on_start(self):", "async def on_ready(self):\n logger.info('Bot is now ready and connected to Discord.')\n guild_count = len(self.guilds)\n logger.info(f'Connected as {self.user.name}#{self.user.discriminator} to {guild_count} guild{\"s\" if guild_count > 1 else \"\"}.')\n\n with self.get_session() as session:\n for guild in self.guilds:\n _guild: Guild = session.query(Guild).get(guild.id)\n if _guild is None:\n logger.warning(\n f'Guild {guild.name} ({guild.id}) was not inside database on ready. Bot was disconnected or did not add it properly...')\n session.add(Guild(id=guild.id))\n\n # TODO: Scan all messages on start for current period and check for new periods/updated vote counts.", "def ready(self):\n #Restart the service by loading static resources,\n #such as user dictionary\n #jieba.load_userdict(jieba_words_path)\n #jieba.analyse.set_stop_words(jieba_stop_words_path)\n pass", "def ready(self):\n if self.proc.stdout.readline() != \"OK\\n\":\n raise ValueError(\"Le bot {bot} n'arrive pas à se préparer\".format(bot=self.name))", "def onConnect(self, request_or_response):", "def connected(self):\n manager = self.manager()\n self.log().debug(\"Register [%s] callbacks\", self.name())\n\n manager.subscribeServerCallbacks(self, self.cfg().chatimg.servers or manager.SERVERS_ALL)", "def onenterready(self, event):\n print('onenterready; event: %s, %s->%s' % (event.event, event.src, event.dst))", "def started(self):", "async def on_ready(self) -> None:\n # Printing username and id of the bot\n print('Logged in as')\n print(self.user.name)\n print(self.user.id)\n print('------')\n\n # Printing servers to which the bot was added\n if len(self.guilds) > 0:\n print('Servers:')\n for g in self.guilds:\n print('> ', g)\n else:\n print('Bot was not added to any servers yet.')\n print('------')\n\n self.get_command_functions()\n\n # Printing commands\n if len(self.commands) > 0:\n print('Commands')\n for c in self.commands:\n print('> ', c)\n print('------')" ]
[ "0.85961705", "0.8477948", "0.7967753", "0.795883", "0.77848405", "0.7611131", "0.7388606", "0.7360492", "0.7330842", "0.7245289", "0.72177434", "0.7132327", "0.705162", "0.69547033", "0.691362", "0.6873511", "0.68556416", "0.672121", "0.6704173", "0.66943926", "0.6646311", "0.650583", "0.649319", "0.6487282", "0.64784545", "0.64502585", "0.6423932", "0.64138997", "0.6402582", "0.6397386", "0.6300968", "0.62758374", "0.62758374", "0.62758374", "0.62758374", "0.62758374", "0.62758374", "0.62758374", "0.62758374", "0.6272125", "0.627204", "0.6231869", "0.62268496", "0.62048775", "0.62026656", "0.6193954", "0.617126", "0.61547625", "0.6138285", "0.61333007", "0.6118331", "0.6117645", "0.61089385", "0.6099654", "0.60717046", "0.60655487", "0.6058762", "0.60376674", "0.60137177", "0.60111403", "0.5995436", "0.5987685", "0.5982032", "0.59786236", "0.597815", "0.597815", "0.59709364", "0.5952175", "0.59447914", "0.59432775", "0.5934564", "0.5917641", "0.5917489", "0.58786625", "0.58723253", "0.5871553", "0.5858604", "0.5845294", "0.5843102", "0.58420956", "0.58397144", "0.583471", "0.583471", "0.583471", "0.5822934", "0.5822934", "0.5821238", "0.5821238", "0.5817894", "0.5816675", "0.58148783", "0.580895", "0.5808641", "0.5798697", "0.5798033", "0.5796156", "0.5793031", "0.5786999", "0.57766014", "0.5770265" ]
0.7746457
5
ssum([1,2,3]) 6 ssum([2,3]) 5 ssum([3]) 3 ssum([]) 0
def ssum(L: list) -> int: return 0 if not L else L[0]+ssum(L[1:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)", "def sum_unique(l):\n pass", "def sum_of_squares(seq):\n if len(seq) == 0:\n return 0\n else:\n result = 0\n for num in seq:\n result += num ** 2\n return result", "def zsum(s, *args, **kwargs):\n return 0 if s.empty else s.sum(*args, **kwargs)", "def sum(*nums): \n s=0\n for num in nums:\n s += num\n return s", "def lss(inlist):\r\n ss = 0\r\n for item in inlist:\r\n ss = ss + item*item\r\n return ss", "def sum3(nums):\n count = 0\n for num in nums:\n count += num\n return count", "def sum_multiples(num):\n pass", "def lsum (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s", "def sum_numbers(sequence):\r\n\r\n total = 0\r\n seq = get_numbers(sequence)\r\n for element in seq:\r\n total += element\r\n\r\n return total", "def U(xs):\n ret = 0\n for x in xs:\n ret += log(x)\n return ret", "def test_running_sum_multi_zeros(self):\n argument = [0,0,0,0]\n expected = [0,0,0,0]\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"the list contains only zeros\")", "def _ss(data):\n c = sum(data)/len(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def sum_list(numbers):\n\t\n\tif len(numbers) == 0:\n\t\treturn 0 \n\n\tsum = numbers[0] +sum_list(numbers[1:])\n\treturn sum", "def summed(L):\r\n result = 0\r\n for e in L:\r\n result = result + e # or result += e\r\n return result", "def lucas(n):\n lucval = sum_series(n, 2, 1)\n print(lucval)\n return lucval", "def add_list_numbers(incoming_list):\n # summation=0\n if incoming_list:\n summation = sum(incoming_list)\n else:\n summation = 0\n return summation", "def test_suite():\n test(sum_all_elements([1,3,1,4,3,8]) == 5)\n test(sum_all_elements([1,3,5,7]) == 16)\n test(sum_all_elements([1, -7, 10, 23]) == -6)\n test(sum_all_elements(range(1,555,2)) == 76729)", "def multiplication_total_of(num_list):", "def cum_sum(seq):\n s = 0\n cumult = [0]\n for n in seq:\n s += n\n cumult.append(s)\n return cumult", "def n_suma(**elementy):\n return sum(elementy)/len(elementy)", "def lsummult (list1,list2):\r\n if len(list1) <> len(list2):\r\n raise ValueError, \"Lists not equal length in summult.\"\r\n s = 0\r\n for item1,item2 in pstats.abut(list1,list2):\r\n s = s + item1*item2\r\n return s", "def add_list_numbers(incoming_list):\n if incoming_list: #if incoming_list is not None and len(incoming_list) > 0\n return_value = sum(incoming_list)\n else:\n return_value = 0\n return return_value", "def get_sum(lst):\n _sum=0\n for i in lst:\n _sum+=i\n return _sum", "def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def magma_scasum(n, dx, incx, queue):\n\n return _libmagma.magma_scasum(n, int(dx), incx, queue)", "def sum_list(num_list):\n # return sum(num_list)\n sum_list = 0\n for number in num_list:\n sum_list += number\n print(sum_list)\n \n # code prints out the sum_list for each value, increasing by the value each time\n # final output is the sum of numbers\n # currently no output for '[]' as input ", "def sum(inputList):\n sum=0#the sum of the list starts from 0\n for num in inputList:\n sum=sum+num#add all number in the list\n print(\"the sum is\",sum)", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def sum_elements(arr):\n return sum(arr)", "def magma_sasum(n, dx, incx, queue):\n\n return _libmagma.magma_sasum(n, int(dx), incx, queue)", "def method2():\n n = 1000\n s = 0\n multiples = [3,5]\n total = []\n\n for m in multiples:\n total.append(0)\n\n minValue = 0\n while(minValue < 1000):\n minValue = 1000\n minPosition = 0\n for i, v in enumerate(total):\n if v < minValue:\n minValue = v\n minPosition = i\n\n temp = total[minPosition] + multiples[minPosition]\n\n if(temp < 1000) and (temp not in total):\n s += temp\n\n total[minPosition] = temp\n\n return s", "def sum(*args):\n result = 0\n for i in args:\n result += i\n return result", "def add_list_numbers(incoming_list):\n if incoming_list:\n retval = sum(incoming_list)\n else:\n retval = 0\n return retval", "def ll_sum(some_list):\n #This function will return total value of all integers combinded.\n result = 0\n if type(some_list) == list: #Check the element is list or not?\n for i in range(len(some_list)):\n result += ll_sum(some_list[i]) # if it's a list call this function \n #so it will call over and over untill it found element that not a list.\n elif type(some_list) == float or type(some_list) == int: #if it's not list return it value.\n result += some_list\n return result", "def count(seq):\n\treturn sum(1 for x in seq)", "def lucas():\n return sum_series(a=2, b=1)", "def sum_items(numbers):\n total = 0\n for item in numbers:\n total += item\n return total", "def no_teen_sum(a,b,c):\n list = []\n list.append(a)\n list.append(b)\n list.append(c)\n\n myTeen = fix_teen(list)\n sum = 0\n x = 0\n\n for x in myTeen:\n sum = sum + x\n\n return sum", "def final_sum(data: Iterator[str]) -> SnailfishNumber:\n return reduce(add, parse_input(data))", "def _sum_sequence(seq):\n\n def _add(x, y): return x + y\n\n return reduce(_add, seq, 0)", "def total(num_list):\n num_sum = 0.0\n for item in num_list:\n num_sum += item\n return num_sum", "def sum_values(values):\n return (sum(values))", "def count(seq):\n\n if not seq:\n return 0\n elif isinstance(seq[0], list):\n return count(seq[0]) + count(seq[1:])\n else:\n return 1 + count(seq[1:])", "def __init__(self, nums):\n acc = 0\n for i in range(len(self.sumArr)):\n self.sumArr.pop()\n for i in nums:\n acc += i\n self.sumArr.append(acc)\n print self.sumArr", "def ll_sum(x):\n xlist = []\n for i in x:\n for num in i:\n xlist.append(num)\n return sum(xlist)", "def quadsum(data):\n return np.nansum(data, 0)", "def find_sum( *my_list):\n # a = len(my_list)- 2\n # i = 0\n # suma=0\n # for i in my_list :\n # suma += my_list[i]\n # i+=1\n # return suma\n return sum(my_list)", "def sum(lists) -> list:\r\n return list(np.sum(lists, 0))", "def sum_list(lst):\n\n if lst == []:\n return 0\n else:\n return lst[0] + sum_list(lst[1:])", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def _ss(data):\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def sumSquares(aList):\r\n if isinstance(aList, list):\r\n total=0\r\n for value in aList:\r\n if(isinstance(value, int) or isinstance(value, float)) and abs(value)%3==0:\r\n total+=value**2\r\n return total\r\n else:\r\n return 'error'", "def get_nested_sum():\n l_int = [1,2,[], 3,[4,[], 5,[6]],[7],[8,9], 10,[[],11]]\n print 'Sum:', nested_sum(l_int) \n return", "def sum(lst):\n total = 0\n for i in lst:\n total += i\n return total", "def test_sum_of_empty_array():\n assert find_maximal_subarray_sum([], 3) == 0", "def sumDivisor(inputList):\n result = 0\n for i in inputList:\n result += i\n return result", "def fsum(items):\n return math.fsum(items)", "def lucas(n):\n\t'''\n\tlucL1=1\n\tlucL2=2\n\tif n<0:\n\t\tprint(\"please enter positive int value\")\n\n\telif n==0:\n\t\treturn 2\n\n\telif n==1:\n\t\treturn 1\n\n\telse:\n\t\tfor i in range(n-1):\n\t\t\tlucC=lucL1+lucL2\n\t\t\tlucL2=lucL1\n\t\t\tlucL1=lucC\n\t\treturn lucC\n\t'''\n\treturn sum_series(n,2,1)", "def rec_AbSum(p):\n if p == []:\n return 0\n return abs(p.pop()) + rec_AbSum(p)", "def missing_integer_simple(l):\n n = len(l)-1\n expected = n*(n+1)/2\n found = 0\n\n for num in l:\n if num is not None:\n found += num\n\n print(expected-found)", "def task4_add_repeatedly(num):\n while len(str(num)) != 1:\n num = sum([int(i) for i in str(num)])\n return num", "def Lucas(input) :\n\n # if input == 0 :\n\n # return 2\n # elif input == 1 :\n\n # return 1\n\n # else :\n\n # return Lucas(input-1) + Lucas(input-2)\n\n\n return sum_series(input,2,1)", "def sum1d_pos(summand):\n total = 0\n for i in range(summand.size):\n total += summand[i]\n return total", "def test_running_sum_multi_mix(self):\n argument = [4,0,2,-5,0]\n expected = [4,4,6,1,1]\n sums.running_sum(argument)\n self.assertEqual(expected,argument, \"the list contains a mixture of negative\"\n + \"and positive values. \")", "def sumsq(values):\n\n return sum(map(lambda x: x ** 2, values))", "def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result", "def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result", "def sum_reduce_nb(col, a, *args):\n return np.nansum(a)", "def recursive_sum(lst):\n\n if lst == []:\n return 0\n\n else:\n\n return lst[0] + recursive_sum(lst[1:])", "def test_running_sum_empty(self):\n argument = []\n expected = []\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"the list is empty. \")", "def fsum(iterable):\n return 0.0", "def test_sum_list_int(self):\n\n list_of_int = [1, 2, 3]\n result = sum(list_of_int)\n\n self.assertEqual(result, 6)", "def rzeros(nums):\n total = len(nums)\n zeros = 0\n nozeros = []\n for x in nums:\n if x != 0:\n nozeros.append(x) \n else:\n zeros = zeros + 1\n \n return (nozeros, total - zeros, zeros)", "def divisors_sum(upper=10**5):\n nums = [0] * (upper + 1)\n for i in range(1, upper + 1):\n for j in range(i, upper + 1, i):\n nums[j] += i\n return nums", "def test_list_int(self):\n data = [1, 2, 3]\n result = sum(data)\n self.assertEqual(result, 6)", "def sum_accumulators(accs):\n valid = [acc for acc in accs if acc]\n if len(valid) == 0:\n return None\n\n ret = valid[0]\n for v in valid[1:]:\n ret += v\n return ret", "def make_accumulator():\n sum = []\n def accumulator(x):\n sum.append(x)\n total = 0\n for elem in sum:\n total += elem\n return total\n return accumulator", "def sum_numbers(numbers):\n sum = 0\n for number in numbers:\n sum += number\n\n return sum", "def cumsum(ls):\n\t\n\tacc = 0\n\tr = [0 for v in ls]\n\tfor i,v in enumerate(ls):\n\t\tacc += v\n\t\tr[i] = acc\n\treturn r", "def reduce_by_multiplication(data):\n total = 1\n for num in data:\n total *= num\n return total", "def sum_list_elements(input_list):\n print(f\"Sumatorio de los elementos dela lista: {sum(input_list)}\")", "def test_suite():\n test(sum_upto_first_even([1,3,2]),4)\n test(sum_upto_first_even([1,3,3]),7)\n test(sum_upto_first_even([2,3,3]),0)", "def column_sums(square):\n total = 0", "def question_26(list_num: int) -> int:\n return sum(list_num)", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def totlen(inputlist):\n tot = 0.0\n for x in inputlist:\n if isinstance(x, matrix):\n tot += totlen(x.getitems())\n else:\n try:\n test = len(x)\n except:\n tot += 1.0\n else:\n tot += test\n return tot", "def lsquare_of_sums(inlist):\r\n s = sum(inlist)\r\n return float(s)*s", "def uniform_non_negative_integers_with_sum(count, sum_):\n positive = uniform_positive_integers_with_sum(count, sum_ + count)\n return [i - 1 for i in positive]", "def lessthan_5(num_list):", "def fn(x):\n if x <= 0: return int(x == 0)\n return sum(fn(x - xx) for xx in nums)", "def MySum( l ):\n\n #checking if arg is a list\n if isinstance(l, list):\n\n #adding all numbs\n result = 0\n\n for numb in l:\n result = result + int(numb)\n\n return result\n\n else:\n return \"Argument is not a list\"", "def one_fifty_sum():\n\n long_stringified = []\n for number in large_number:\n long_stringified.append(str(number))\n # print(long_stringified)\n # print('len of numbers', len( long_stringified ))\n\n stringified = []\n for number in long_stringified:\n stringified.append(number[:11])\n print('nums are strings', stringified)\n\n to_add = []\n\n length_of_num = len(stringified[0])\n zeroes = length_of_num-1\n\n for i in range(length_of_num):\n # print('column', i)\n sum = 0\n for j in range(len(stringified)):\n # print('sum', sum)\n # print('digit', stringified[j][i])\n sum += int(stringified[j][i])\n add_zero = '0' * zeroes\n sum = str(sum)\n # print( type(sum) )\n # print( type(add_zero) )\n num = sum + add_zero\n zeroes = zeroes - 1\n print('column', i, sum)\n new_num = int(num)\n # print(new_num, type(new_num) )\n to_add.append( new_num )\n\n total=0\n for element in to_add:\n total+=element\n print(total)", "def sum_sum(t, init):\n return sum(t, init)", "def sumDigit():", "def lcumsum (inlist):\r\n newlist = copy.deepcopy(inlist)\r\n for i in range(1,len(newlist)):\r\n newlist[i] = newlist[i] + newlist[i-1]\r\n return newlist" ]
[ "0.6345257", "0.62525344", "0.6204409", "0.61191237", "0.6101727", "0.6077421", "0.60648704", "0.60515577", "0.60255706", "0.5993455", "0.5983791", "0.5964002", "0.5958428", "0.594942", "0.5904664", "0.5882846", "0.5862508", "0.5862386", "0.5833747", "0.582293", "0.58177805", "0.58089757", "0.58056796", "0.5765478", "0.5727781", "0.5726941", "0.57147306", "0.56974435", "0.5696405", "0.5691822", "0.5690519", "0.5684608", "0.5671947", "0.5652665", "0.56514394", "0.5649867", "0.564485", "0.56337", "0.5612821", "0.56127757", "0.56118804", "0.5592035", "0.5591954", "0.55895954", "0.55373096", "0.5529069", "0.55255055", "0.5518196", "0.55141896", "0.5509153", "0.5497036", "0.5494149", "0.5494149", "0.5494149", "0.5494149", "0.5494149", "0.549411", "0.548295", "0.5482102", "0.54813254", "0.5481061", "0.5476706", "0.54745895", "0.54628414", "0.5461233", "0.54555565", "0.5451848", "0.5445674", "0.54411757", "0.5436179", "0.5435837", "0.5435837", "0.5430044", "0.5428836", "0.5425267", "0.5424448", "0.5424216", "0.54209334", "0.5419624", "0.5418645", "0.5413574", "0.5409353", "0.54062796", "0.54021966", "0.53973585", "0.5391288", "0.5388715", "0.5384685", "0.5381071", "0.5375974", "0.53698933", "0.53576744", "0.5350034", "0.5349919", "0.53496933", "0.5342619", "0.5319908", "0.52944285", "0.52741814", "0.52692777" ]
0.7876537
0
factorial(5) > 120 factorial(4) > 24 factorial(3) > 6 factorial(2) > 2 factorial(1) > 1 factorial(0) > 1
def factorial(N: int) -> int: return N*factorial(N-1) if N else 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def factorial(n):\n\n # the code for factorial", "def factorial(n):\n return reduce(mul, range(1, n), 1)", "def calculateFactorials():\n\n ni = []\n ni.append( 295232799039604140847618609643520000000) # 34!\n ITERATIONS = 34\n for n in range( 1, ITERATIONS,1 ) :\n ni.append(math.floor(ni[n - 1] / n))\n print( \"\\n \".join([\"xi = (xi * _x) >> PRECISION;\\n res += xi * %s;\" % hex(int(x)) for x in ni]))", "def factorial(n):\n ret = 1\n for i in range(2, n+1):\n ret *= i\n return ret", "def task17_factorial(num):\n result = 1\n for i in range(1, num + 1):\n result *= i\n return result", "def factorial(n):\n \n result = 1\n\n for i in range(1,n+1):\n result *= i\n\n return result", "def fact(n):\n\treturn int(factorial(n, exact=True))", "def Factorial(n):\n\tx = 1\n\tfor i in range(1, n + 1):\n\t\tx *= i\n\treturn x", "def factorial(x):\r\n output = 1\r\n for factor in range(2,x+1):\r\n output = output * factor\r\n return output", "def factorial(n):\n result = 1\n for i in range(1, n + 1):\n result *= i\n return result", "def factorial(n):\n return product(range(1, n + 1))", "def fact(n):\n return float(misc.factorial(n, True))", "def factorial(n):\r\n temp = 1\r\n for item in range(2, n+1):\r\n temp *= item\r\n return temp", "def factorial_loop(n):\n\n pass # @todo -fix this", "def factorial(n):\n\tf = 1\n\tfor i in range(1,n+1):\n\t\tf = f*i\n\n\treturn f", "def factorial(n: int) -> int:\n result = 1\n for i in range(1, n+1):\n result *= i\n return result", "def foo_6(x): ## calculate the factorial of x in a different way\n\tfacto=1\n\twhile x>=1:\n\t\tfacto=facto*x\n\t\tx=x-1\n\treturn facto", "def factorial(x):\r\n res = 1\r\n for i in range (1, x+1)\r\n res *= i\r\n return res", "def factorial(n):\n result = 1\n for x in range(2, n + 1):\n result = result * x\n\n return result", "def factorial(n):\n if n == 0:\n return 1\n else:\n return reduce((lambda x, y: x * y), range(1, n + 1))", "def factorial(n):\n if not n>=0:\n \traise ValueError('n must be >=0')\n if math.floor(n)!=n:\n \traise ValueError('n must be exact integer')\n if n+1==n:\n \traise OverflowError(\"n too large\")\n result=1\n factor=2\n while factor<=n:\n \tresult*=factor\n \tfactor+=1\n return result", "def factorial(n: int) -> int:\n _compute_factorial(n)\n return _factorial_sequence[n]", "def factorial(n: int) -> int:\n if n <= 1:\n return 1\n else:\n return n * factorial(n-1)", "def factorial (n):\n if n == 1:\n return 1\n \n \n else:\n return n * factorial(n - 1)", "def factorial(number):\n result = 1\n while number:\n result *= number\n number -= 1\n return result", "def factorial(n):\r\n if n == 1:\r\n return 1\r\n return n * factorial(n - 1)", "def obtain_factorial(x):\n product = 1\n for ii in list(range(x)):\n product = product * (ii + 1)\n\n return(product)", "def factorial(n):\n if isinstance(n, int) or isinstance(n, float):\n if n <= 1:\n return 1.0\n else:\n return n * factorial(n-1)\n else:\n res = 1.0\n for ni in n:\n res *= factorial(ni)\n return res", "def factorial(n):\n if n == 1:\n return n\n else:\n return n*factorial(n-1)", "def factorial(n: int):\n return 1 if n == 0 else factorial(n-1)*n", "def factorial(n):\n if n == 0:\n return 1\n return n * factorial(n - 1)", "def factorial(n):\n if n == 0:\n return 1\n return n * factorial(n - 1)", "def factorial_recursion(n):\n pass # @todo -fix this", "def factorial(n):\n if n == 0 or n == 1:\n return n\n return n * factorial(n-1)", "def factorial(x):\n value = 1\n for i in range(2, add(x, 1)):\n value = multiply(value, i)\n return value", "def factorial(n):\n if n < 0:\n raise ValueError(\"n cannot be negative\")\n result = 1\n for i in range(1, n + 1):\n result = multiply(result, i)\n return result", "def factorial(n: int):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)", "def test_factorial():\n import math\n for number in range(500):\n assert factorial(number) == math.factorial(\n number), \"Could not calculate the factorial of {}\".format(number)", "def factorial(n):\n\n if n == 1:\n return 1\n else:\n return n * factorial(n-1)", "def factorial(n):\n if n == 1:\n return 1\n\n return n * factorial(n - 1)", "def factorial(n):\n if n < 0:\n raise Exception(f\"n! for n < 0: n = {n}\")\n prod = 1\n for i in range(1, n+1):\n prod *= i\n return prod", "def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)", "def factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)", "def factorial(x):\n factorial = x\n if x <= 1:\n return 1\n else:\n while (x > 1):\n x -= 1\n factorial = multiply(factorial, x)\n return factorial", "def calcularfactorial(n):\r\n fact = 1\r\n for i in range(1, n+1): ## El valor inicial 1 es para que no arranque desde 0 si no desde 1. El valor final es n+1 xq el valor final del range nunca esta incluido\r\n fact = fact * i ## Multiplicamos el fact por el i. I va a valer lo que devuelva el range: 1,2,3,4 etc. Vamos a multiplicar los valores fact partiendo de 1 por todos los valores a recorrer\r\n return fact", "def factorial(n):\n product = 1\n for i in range(n, 1, -1):\n product *= i\n return product", "def factorial(fac_1):\r\n\tcontador = 1\r\n\tfor i in range(1,fac_1 + 1):\r\n\t\tcontador = (contador)*(i)\r\n\treturn contador", "def get_factorial(number):\n if number == 1:\n return 1\n else:\n return number * get_factorial(number - 1)", "def Factorial(x):\n # 0) SECURITY CHECK\n if not isinstance(x, int):\n raise ValueError( \"'Factorial' function only accepts integers\" )\n\n # 1) COMPUTE THE FACTORIAL\n if x == 0 or x == 1:\n return 1\n else:\n return functools.reduce(lambda x, y: x * y, range(1, x + 1))", "def factorial(number):\n if number == 0:\n return 1\n return number * factorial(number - 1)", "def factorial(n):\n if isinstance(n, int) == False:\n raise TypeError('n is not integer; n value is {} and n type is {}'.format(n, type(n)))\n if n < 0:\n raise Exception('n must be greater than or equal to zero; n value is {}'.format(n))\n if n == 0:\n return 1\n result = 1\n for i in range(1, n+1):\n result = result * i\n return result", "def calculate_factorial(num: int, factorial_value: int):\n if num <= 1:\n return factorial_value\n\n return calculate_factorial(num - 1, num * factorial_value)", "def factorial(k):\n fact = 1\n for i in range(1, k + 1):\n fact *= i\n return fact", "def factorial(N):\n # Initialize the outpout variable to 1\n product = 1\n for n in range(2,N + 1):\n # Update the output variable\n product = product * n\n return product", "def factorial(x):\n ans = 1\n for i in range(x, 1, -1):\n ans *= i\n return ans", "def task_factorial():\n # set range of factorials here\n lo, hi = 0, 11\n user_digit = get_int(lo, hi) \n solution = n_factorial(user_digit) \n print(\"The factorial of %d is %d\" % (user_digit, solution))", "def factorial(number):\n\n if number == 1:\n return number\n\n return number * factorial(number-1)", "def factorial(n: int) -> int:\n assert n >= 0\n if n == 0:\n return 1\n if n == 1 or n == 2: # pylint: disable=consider-using-in\n return n\n return n * factorial(n - 1)", "def factorial(number):\n num_factorial = 1\n for i in range(1, number+1):\n num_factorial*=i\n return num_factorial # returns a value back to the calling statement", "def factorial(num):\n if num <= 1:\n return 1\n\n if num > 1:\n return num * factorial(num-1)", "def factorial(n: int):\n # base case, reduce must have non-empty list\n if n <= 0:\n return 0\n # use reduce function to multiple elements\n return reduce(lambda x, y: x * y, range(1,n+1))", "def factorial(num):\n if num==1:\n return 1\n else:\n return(num*factorial(num-1))", "def factorial(n):\n if n < 1:\n raise ValueError(\"Not a valid value, must be greater than zero\")\n elif n == 1:\n return 1\n else:\n return n * factorial(n-1)", "def factorial(num):\n if num == 0:\n return 1\n else:\n return num * factorial(num - 1)", "def factorial(n: int) -> int:\n if n < 0:\n print(\"n must be a positive number. {} is an invalid response.\".format(n))\n exit(code=1)\n if n in (0, 1):\n return 1\n return factorial(n - 1) * n", "def test_4_factorial(self):\n self.assertEqual(factorial(4), 24)", "def fact(n):\n answer = 1\n while n > 1:\n answer *= n\n n -= 1\n return answer", "def fac(n:int) -> int :\n\n factorial = 1\n while n >= 1:\n factorial *= n\n n -= 1\n return factorial", "def factorial(n): \n def base(): \n return 1 \n def recursive(): \n return n * factorial(n-1)\n return ____________________________________________________________________", "def factorial(num):\n if num == 1:\n return num\n else:\n return num * factorial(num - 1)", "def test_10(self):\n\n input_ = 10\n output = math.factorial(input_)\n expected = 3628800\n\n self.assertEqual(expected, output,\n f'Result: {output}, expectd: {expected}')", "def factorial(n):\n if n < 0:\n return \"There is no factorial for such number\"\n elif n <= 1:\n return 1\n else:\n return n * factorial(n-1)", "def factorial(x):\n fact = gamma(x+1)\n return fact", "def calculate_factorial(\n self, value: \"int\", fail_if_bigger_than: \"int\" = None\n ) -> \"int\":\n pass", "def combinations(n) -> float:\r\n c = math.factorial(n) / (math.factorial(2) * math.factorial(n - 2))\r\n return c", "def print_factorial():\n n = get_inp_factorial()\n print(n, \"! = \", factorial(n), sep='')", "def wrong_factorial(n):\n\n if n == 0:\n return 1\n else:\n return wrong_factorial(n + 1) / (n + 1)", "def factorial(x):\n if x == 1:\n return x\n else:\n return x * factorial(x - 1)", "def fact_i(n):\n \n result = 1\n while n > 1:\n result *= n\n n -= 1\n return result", "def fact(n):\n if n == 0: return 1\n else: return n*fact(n-1)", "def factorial(num):\n result = 1\n\n if num == 0:\n return 1\n\n for i in range(num, 1, -1):\n result *= i\n return result", "def _compute_factorial(n: int) -> None:\n\n fact_count = len(_factorial_sequence)\n\n # have the terms up to n! already been computed?\n if n < fact_count:\n return\n\n # compute numbers iteratively from existing sequence\n product = _factorial_sequence[-1]\n for i in range(fact_count, n + 1):\n product *= i\n _factorial_sequence.append(product)", "def iterative_factorial(n: int) -> int:\n assert n >= 0\n if n == 0 or n == 1: # pylint: disable=consider-using-in\n return 1\n f = 1\n for i in range(2, n + 1):\n f *= i\n return f", "def make_anonymous_factorial():\n return lambda n: 1 if n == 1 else mul(n, make_anonymous_factorial()(sub(n, 1)))", "def fact(n):\n if n < 1:\n return 1\n else:\n return n * fact(n - 1)", "def fact(n):\n if n < 1:\n return 1\n else:\n return n * fact(n - 1)", "def fact(n):\n if n < 1:\n return 1\n else:\n return n * fact(n - 1)", "def fact(n):\n if n < 1:\n return 1\n else:\n return n * fact(n - 1)", "def fact(n):\n if n <= 1:\n return 1\n else:\n return n * fact(n-1)", "def facti(n):\n if n == 0: return 1\n f= 1\n for i in range(2,n):\n f= f*i\n return f", "def fact(n: int) -> int:\n if n == 0:\n return 1\n else:\n return n*fact(n-1)", "def factorial(n, method='reduce'):\n if not isinstance(n, (int, float)):\n raise TypeError('factorial(n): n must be integer not %s' % type(n))\n n = int(n)\n\n if n == 0 or n == 1:\n return 1\n\n if method == 'plain iterative':\n f = 1\n for i in range(1, n+1):\n f *= i\n return f\n elif method == 'plain recursive':\n if n == 1:\n return 1\n else:\n return n*factorial(n-1, method)\n elif method == 'lambda recursive':\n fc = lambda n: n and fc(n-1)*int(n) or 1\n return fc(n)\n elif method == 'lambda functional':\n fc = lambda n: n<=0 or \\\n reduce(lambda a,b: int(a)*int(b), range(1,n+1))\n return fc(n)\n elif method == 'lambda list comprehension':\n fc = lambda n: [j for j in [1] for i in range(2,n+1) \\\n for j in [j*i]] [-1]\n return fc(n)\n elif method == 'reduce':\n return reduce(operator.mul, range(2, n+1))\n elif method == 'scipy':\n try:\n import scipy.misc.common as sc\n return sc.factorial(n)\n except ImportError:\n print('numpyutils.factorial: scipy is not available')\n print('default method=\"reduce\" is used instead')\n return reduce(operator.mul, range(2, n+1))\n # or return factorial(n)\n else:\n raise ValueError('factorial: method=\"%s\" is not supported' % method)", "def chosse(n,k):\n import math \n if (n>=k and k>=0):\n return math.factorial(n) / (math.factorial(k) * math.factorial(n-k))\n else:\n return \"No se puede calcular el numero factorial indicado\"", "def foo_5(x): ## a recursive function that calculates the factorial of x\n\tif x == 1:\n\t\treturn 1\n\treturn x * foo_5(x-1)", "def factorial(x):\n if x == -0.5:\n return math.pi**0.5\n elif x > 0 and x-0.5 == int(x-0.5):\n return x*factorial(x-1)\n else:\n return math.factorial(x)", "def factorial(x):\n\n # check if input value is negative or positive\n if x < 0:\n return print(\"Factorials do not exist for negative numbers.\")\n else:\n y = 1\n for i in range(1, x + 1):\n y = y * i\n return y", "def fatorial(n):\r\n if n <= 1: return 1\r\n return n * fatorial(n-1)", "def factorial(x):\n\n if x == 1:\n return 1\n else:\n fact = x * factorial(x-1)\n #print(x, fact, ' is fact')\n return fact", "def double_factorial(n):\n assert n >= -1, \"n >= -1\"\n l = np.arange(1,n+1)\n if n == 0 or n == -1:\n return 1\n elif n%2 == 1:\n return np.prod(l[::2])\n else:\n return np.prod(l[1::2])", "def facti(n: int) -> int:\n if n == 0:\n return 1\n f = 1\n for i in range(2, n):\n f = f*i\n return f" ]
[ "0.8139361", "0.79235435", "0.78876895", "0.7874468", "0.7866474", "0.784772", "0.7841788", "0.7832985", "0.7827345", "0.7794977", "0.7794691", "0.7788781", "0.7775511", "0.7756817", "0.77249163", "0.77127343", "0.76884425", "0.7687114", "0.7661024", "0.76508266", "0.7629169", "0.7625111", "0.7615252", "0.76086456", "0.76023555", "0.7600271", "0.7585326", "0.75740016", "0.7571671", "0.75661623", "0.75511676", "0.75511676", "0.7546132", "0.75446415", "0.75444317", "0.7538628", "0.7537337", "0.7526939", "0.7523636", "0.75196034", "0.75149834", "0.7497123", "0.7497123", "0.74842215", "0.74408287", "0.74354964", "0.74344045", "0.7422766", "0.7414969", "0.7412612", "0.74100363", "0.7402092", "0.7380961", "0.7360203", "0.73504907", "0.7343259", "0.7340552", "0.73361695", "0.7329075", "0.7328843", "0.7323929", "0.73197967", "0.7302669", "0.7269214", "0.7252901", "0.7207128", "0.7197766", "0.71749216", "0.7162398", "0.71576244", "0.7136767", "0.71025985", "0.71000725", "0.7095782", "0.70899224", "0.7069211", "0.7058847", "0.70518804", "0.70431536", "0.70279354", "0.70000696", "0.6991766", "0.69859666", "0.6985282", "0.6974604", "0.6974604", "0.6974604", "0.6974604", "0.6971999", "0.69713557", "0.69443053", "0.6920871", "0.6917822", "0.69096917", "0.6907051", "0.6903078", "0.68775666", "0.6876836", "0.6875573", "0.68513495" ]
0.74416447
44
hello hell hel he h
def pars_str(stroka: str) -> None: print(stroka) return pars_str(stroka[:-1]) if stroka else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shout(word):\n print(word+\"!\")", "def middle(word):\n return word[1:-1]", "def think(s):", "def middle(word):\n\treturn word[1:-1]", "def gibberish(*args):\n \n # Initialize an empty string: hodgepodge\n hodgepodge = ''\n\n # Concatenate the strings in args\n for word in args:\n hodgepodge += word\n\n # Return hodgepodge\n return(hodgepodge)", "def get_word(letters):\r\n\r\n word = \"\"\r\n for letter in letters:\r\n word += letter \r\n \r\n return word", "def inner(word):\n return word + '!!!'", "def love(text):\n a = text.split( )\n l = (len(a)-2)\n del a[l]\n a.insert(l,\"love\")\n return ' '.join(a)", "def completion(s):\n if len(s) >= 1 and s[0] == 'h':\n return ('hello', 'hello there')\n return None", "def create_word(char_list):", "def get_word(w):\n return ''.join(c for c in w if c.isalpha()).lower()", "def translate_leet(phrase):", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Replace print with return\n return shout_word", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Replace print with return\n return shout_word", "def PROPER(text):\n return text.title()", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word=word + '!!!'\n\n # Replace print with return\n return (shout_word)", "def stemming(self,sentence):", "def get_word():\n return ' '.join(sys.argv[1:])", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Print shout_word\n print(shout_word)", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Print shout_word\n print(shout_word)", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Print shout_word\n print(shout_word)", "def make_title(words):", "def hello():\n return 'Hello World hahaha !!!!!!'", "def look_and_say(s):\n pass\n lst = list(s)\n groups = []\n for char in lst:\n if groups and groups[-1] and groups[-1][0] == char:\n groups[-1].append(char)\n else:\n groups.append([char])\n ret = []\n for group in groups:\n ret.append(str(len(group)))\n ret.append(group[0])\n\n return ''.join(ret)", "def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))", "def first(word):\n return word[0]", "def see(s):\n print(\"---- %s -----\" % s)", "def test_wrap_word():\n line = \"n\" * 81\n assert wrap_line(line) == \"n\" * 80 + \"\\nn\"", "def verse_2():\n print(\"Old MacDonald had a farm\")\n print(\"E-I-E-I-O\")", "def first(word):\n\treturn word[0]", "def H(s):\n return 'H_' + ''.join(['%02x' % ord(x) for x in s])", "def hello():\n return 'Hello I like to make AI Apps'", "def stem(s):\n special = {'appall', 'kill', 'stroll', 'kiss', 'thrill', 'chugg', 'dress', 'err', 'express', 'fall', 'free', 'gall', 'add','cross', 'impress', 'inn', 'call', 'ball', 'bill', 'buzz'} \n ie_words = {'vying', 'lying', 'dying', 'tying'}\n short_ing = {'bring','sling','sping', 'bring', 'sing', 'ring', 'king', 'cling' ,'fling', 'wing', 'ding', 'ping', 'ting'}\n c_k_words = {'kick', 'muck', 'lock','pick', 'back', 'mock', 'peck', 'lock', 'nick'}\n\n if len(s) <= 3:\n return s\n if s[-3:] == 'ing' or s[-4:] == 'ings': \n if s in short_ing:\n return s\n elif s in special:\n return s[:-3]\n elif s[:-3] not in special and s[-4] == s[-5]:\n return s[:-4]\n elif s[:-3] not in c_k_words and s[-4] == 'k':\n return s[:-4]\n elif s == 'everything' or s == 'anything' or s == 'something':\n return s[:-5]\n elif s in ie_words:\n return s[0] + 'ie'\n else:\n return s[:-3]\n elif s[-3:] == 'ers':\n return s[:-3]\n elif s[-2:] == 'es':\n return s[:-2]\n elif s[-2:] == 'en':\n return s[:-2]\n elif s[-2:] == 'er':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2] \n elif s[-2:] == 'ed':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2]\n elif s[-3:] == 'ies':\n return s[:-2]\n elif s[-1:] == 's':\n return s[:-1]\n elif s[-1:] == 'e' and s not in ie_words:\n return s[:-1]\n elif s[-3:] == 'ful':\n return s[:-3]\n elif s[:2] == 'de':\n return s[2:]\n elif len(s) > 4 and s[-4:] == 'able' or s[-4] == 'ible':\n return s[:-4]\n elif s[:2] == 'in' or s[:2] == 'il' or s[:2] == 'ir':\n return s[2:]\n elif s[-1:] == 'y':\n return s[:-1] + 'i'\n else:\n return s", "def choose_word():\n pass", "def get_word(wordlist, args): #{{{\n iters = 0\n while iters < 500:\n if args.lowercase == True:\n word = random.choice(wordlist).strip().lower()\n return word\n elif args.lowercase == False:\n word = random.choice(wordlist).strip().lower().capitalize()\n return word\n\n if args.punctuation == False:\n if len(word) < args.max_length and word.isalpha() == True:\n return word\n iters += 1\n elif args.punctuation == True:\n if len(word) < args.max_length:\n return word\n iters += 1 #}}}", "def hello_name(s):\n to_return = \"Hello {}!\".format(s)\n return to_return", "def daily1(word):\n if len(word) <= 2:\n return f'{word} to za krótkie słowo.'\n else:\n return word[::-1]", "def run_length(s):\n\n s = re.sub(\"[^a-zA-Z]\",\"\", s)\n s = s.lower()\n cnt = 1\n y = []\n z = []\n ch = ''\n for i in range(len(s)):\n if i + 1 < len(s) and s[i] == s[i + 1]:\n cnt += 1\n else:\n if cnt > 1:\n z.append(s[i-1])\n y.append(str(cnt))\n else:\n z.append(s[i])\n y.append(str(cnt))\n cnt = 1\n for i in range(len(y)):\n ch = ch + y[i] + z[i]\n i += 1\n print ch", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def repeat(s):\r\n\r\n return s", "def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]", "def main():\n word = input(\"Give me a word! \\n\\n\")\n vowels = ['a', 'e', 'i', 'o', 'u']\n if word[0].lower() in vowels:\n print(f\"\\n\\nPig latin: {word}way\")\n else:\n print(f\"\\n\\nPig latin: {word[1:]}{word[0]}ay\")", "def frequencyLetterDic(s):\n pass", "def shout():\n # Concatenate the strings: shout_word\n shout_word='congratulations'+'!!!'\n\n # Print shout_word\n print(shout_word)", "def break_words(stuff):\r\n # Above line is a short explanation for the function.\r\n\t# It will appear when help is typed in python\r\n words = stuff.split(' ') \r\n\t# .split is a built in function in python \r\n\t#It will sepearate words with what we will type in ()\r\n\t#In our case we are splitting with blank space.\r\n return words", "def test_single_letter_count(self):\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"h\"), 1)\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"z\"), 0)\n self.assertEqual(functions.single_letter_count(\"HelLo World\", \"l\"), 3)", "def one_pass(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n\n i, res = 0, ''\n while i < len(s):\n if i + 2 < len(s) and s[i + 2] == '#':\n res += alpha_map[s[i:i + 2]]\n i += 3\n else:\n res += alpha_map[s[i]]\n i += 1\n return res", "def atbash_cipher(s):\n try:\n new_s = \"\"\n for l in s:\n if string.ascii_lowercase.find(l, 0) != -1:\n pos = string.ascii_lowercase.find(l, 0)\n reverse = string.ascii_lowercase[::-1]\n new_s += reverse[pos]\n elif string.ascii_uppercase.find(l, 0) != -1:\n pos = string.ascii_uppercase.find(l, 0)\n reverse = string.ascii_uppercase[::-1]\n new_s += reverse[pos]\n else:\n new_s += l\n return new_s\n except (ValueError, IndexError) as ex:\n print(EXCEPTION_MESSAGE, ex)", "def shout(word1, word2):\n # Concatenate word1 with '!!!': shout1\n shout1=word1+'!!!'\n \n # Concatenate word2 with '!!!': shout2\n shout2=word2+'!!!'\n \n # Concatenate shout1 with shout2: new_shout\n new_shout=shout1 +shout2\n\n # Return new_shout\n return new_shout", "def _inject(word, phrase):\n words = phrase.split()\n words.append(word)\n random.shuffle(words)\n return \" \".join(words)", "def word(word_time):\n return word_time[0]", "def explode(self):\n return \"...it's a glove.\"", "def eat(string, s):\n #SHOULD RAISE HERE.\n return string[len(s):]", "def test_first_equal(self):\n self.assertEqual(heaviest_word(\"man i need a taxi up to ubud\"), \"taxi\")", "def hey(self, sentence=\"\"):\n if sentence == \"\" or sentence.replace(\" \", \"\") == \"\":\n return \"Fine. Be that way!\"\n if sentence.isupper():\n return \"Woah, chill out!\"\n if sentence[-1] == \"?\":\n return \"Sure.\"\n return \"Whatever.\"", "def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2", "def lower(self) -> str:", "def reverse_words(string):\n pass # TODO", "def too_long_words(word):\n\n # If work is longer than 10 letters, print the word according to these rules\n if len(word) > 10:\n print word[0] + str(len(word[1:-1])) + word[-1]\n\n else:\n print word", "def break_words(stuff):\r\n #parte la cadena cada vez que encuentra un espacio\r\n words = stuff.split(' ') \r\n return words", "def paste_strings(string_one,string_two):\n return string_one + \" \" + string_two", "def hello():\n return 'Hello HBNB!'", "def compile_word(word):\n # Your code here.\n if word.isalpha() and word.islower():\n return word\n if not word.isalpha():\n return word\n result = []\n mul = 1\n word = word[::-1]\n for w in word:\n if w.isalpha and w.isupper():\n result.append(str(mul) + '*' + w + \"+\")\n else:\n result.append(w)\n mul = mul*10\n ans = ''.join(result)\n return ans[:-1]", "def display_letters(word, guesses):\n pass", "def solution(s):", "def hey(what):\n\tif len(what) == 0 or what.isspace():\n\t\treturn \"Fine. Be that way!\"\n\t\"\"\"Checks if string is in upper case(Yelling)\"\"\"\n\tif what.isupper():\n\t\treturn \"Whoa, chill out!\"\n\t\"\"\"Iterates through string backwards looking for a ?, stopping if a non-\n\twhitespace character is found(Question)\"\"\"\n\tfor character in reversed(what):\n\t\tif character == '?':\n\t\t\treturn \"Sure.\"\n\t\tif character != \" \":\n\t\t\tbreak\n\t\"\"\"Catch all response for any other input\"\"\"\n\treturn \"Whatever.\"", "def sentence_printer(sentence):\r\n if len(sentence) == 0:\r\n return 2;\r\n for letter in sentence:\r\n sys.stdout.write(letter)", "def get_introduction(length=128, words=None):", "def test_strings_common_symbols():\n\n common_result = strings_ops.strings_common_symbols(\"hi\", \"hello\")\n assert common_result == \"h\"", "def front_back(string):\n pass", "def v(w,s):\n return w", "def print_new_word(self,word):\n new_word = ''\n for i in word:\n new_word+= i\n return new_word", "def shout():\n # Concatenate the strings: shout_word\n shout_word = 'congratulations'+'!!!'\n\n # Print shout_word\n print(shout_word)", "def supercombiner(bot, ev):\n # ported from jenni\n s = 'u'\n for i in iter(range(1, 3000)):\n if unicodedata.category(chr(i)) == \"Mn\":\n s += chr(i)\n if len(s) > 100:\n break\n bot.say(s)", "def ex5() :\r\n print(\" - Sentence Splitter - \")\r\n userText = input(\"Enter a line of text: \")\r\n splitText = userText.split() #split function to generate list of words\r\n for word in splitText : #for every word generate newline and print those words\r\n print(word)\r\n print(\"The longest word is \", max(len(word) for word in splitText), \" characters long.\") #max() function to check every word in list and show longest\r\n print(\"The shortest word is \", min(len(word) for word in splitText), \" characters long.\")#min() function to check every word in list and show shortest\r", "def shout(word1, word2):\n # Concatenate word1 with '!!!': shout1\n shout1 = word1 + '!!!'\n \n # Concatenate word2 with '!!!': shout2\n shout2 = word2 + '!!!'\n \n # Concatenate shout1 with shout2: new_shout\n new_shout = shout1 + shout2\n\n # Return new_shout\n return new_shout", "def hello_word():\n return {\"hello\": \"world\"}", "def fuzz(text):\r\n\r\n return ' '.join([fuzz_word(word) for word in text.split()])", "def spin_words(sentence):\n\n words = sentence.split()\n words = [word if len(word) < 5 else word[::-1] for word in words]\n return \" \".join(words)", "def count_hi(str):\n return str.count(\"hi\")", "def double_char(s):\n x = 0\n doubler = ''\n while(x < len(s)):\n doubler = doubler + (s[x] * 2)\n x = x + 1\n \n \n return doubler", "def two_passes(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n splitted = s.split('#')\n res = ''\n\n for i in range(len(splitted)):\n j = 0\n if i + 1 < len(splitted) and len(splitted[i]) > 2:\n while j < len(splitted[i]) - 2:\n res += alpha_map[splitted[i][j]]\n j += 1\n\n if i + 1 < len(splitted):\n res += alpha_map[splitted[i][j:]]\n else:\n while j < len(splitted[i]):\n res += alpha_map[splitted[i][j]]\n j += 1\n return res", "def getTitle(test:str) -> str:\n return test[5:].strip()", "def celex_diphthong_sub(word):\n word = re.sub(\"2\", \"#I\", word)\n word = re.sub(\"4\", \"QI\", word)\n word = re.sub(\"6\", \"#U\", word)\n word = re.sub(\"7\", \"I@\", word)\n word = re.sub(\"8\", \"E@\", word)\n word = re.sub(\"9\", \"U@\", word)\n return word", "def func0(s):\n\n return s+\"tsy\"", "def hello():\n return \"Hello\"", "def rotate_word(s,i):\n word=''\n if abs(i) > 26:\n i=i%26\n for char in s:\n old=ord(char)\n new=old+i\n if old < 65:\n fixed=old\n elif old > 122:\n fixed=old\n elif 90 < old < 97:\n fixed=old\n\telif 65 < old < 90:\n if new > 90:\n fixed=new-26\n elif new < 65:\n fixed=new+26\n else:\n fixed=new\n elif 97 < old < 122:\n if new > 122:\n fixed=new-26\n elif new < 97:\n fixed=new+26\n else:\n fixed=new\n rotated=chr(fixed)\n word=word+rotated\n return word", "def hello(first=\"Dave\", second=\"\"):\r\n return (\"Hello, \" + first + \" \" + second).strip() + \".\"", "def shout_echo(word1, echo=1):\n echo_words = \"\"\n shout_words = \"\"\n try: \n echo_words = word1 * echo\n\n shout_words = echo_words + \"!!!\"\n except:\n print(\"word must be a string and echo must be an integer\")\n\n return shout_words", "def help_enhancer(_h):\n return ''.join(reversed(_h))", "def letter_for(label):\n return \"ABCDEFGHIJ\"[label]", "def brute_force_attack(string):\n try:\n for key in range(1, 26):\n string_to_return = \"\"\n for l in string:\n if not(l >= 'A'and l <= 'Z' or l >= 'a'and l <= 'z'):\n string_to_return += l\n elif key + ord(l.upper()) > ord('Z'):\n string_to_return += chr(ord('A') + ord('Z') - ord(l.upper()))\n else:\n string_to_return += chr(ord(l.upper())+key)\n print(string_to_return)\n return string_to_return\n except Exception as ex:\n print(EXCEPTION_MESSAGE, ex)", "def stem(s):\r\n if len(s) < 5 :\r\n return s\r\n \r\n if s[-3:] == 'ing':\r\n if s[-4] == s[-5]:\r\n if s[-4] == 'l' :\r\n s = s[:-3]\r\n s = s[:-4]\r\n else: \r\n s = s[:-3]\r\n elif s[-2:] == 'er':\r\n s = s[:-2]\r\n elif s[-1] == 's' :\r\n s = s[:-1]\r\n stem_rest = stem(s)\r\n return stem_rest\r\n if len(s) >= 9:\r\n if s[-3:] == 'ion':\r\n s = s[:-3]\r\n \r\n \r\n elif s[0:3] == 'mis':\r\n if s == 'misses' or s == 'missus':\r\n return s[0:4]\r\n else:\r\n s = s[3:]\r\n elif s[:2] == 'un':\r\n s = s[2:]\r\n \r\n if len(s) >= 7:\r\n if s[:4] == 'over':\r\n s = s[4:]\r\n return s", "def space_before_after(self):\n self.spawn(\"./initials\").stdin(\" hailey James \", prompt=False).stdout(match(\"HJ\"), \"HJ\\n\").exit(0)", "def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret", "def _subconstituent_name(h):\n if h == 1:\n o = \"1st\"\n elif h == 2:\n o = \"2nd\"\n elif h == 3:\n o = \"3rd\"\n else:\n o = \"%dth\" % h\n return \"%s subconstituent\" % o", "def love(some_text):\n #This function will take a text and change the secound last word to \"love\".\n sentence = some_text.split()\n #print(sentence)\n sentence[-2] = \"love\"\n new_text = \" \".join(sentence)\n #print(new_text)\n return new_text", "def alphabet_war(fight):", "def extract_letters( idx1, idx2, text ):\r\n return text[idx1] + text[idx2]", "def analyze_word(s):\n\n a = {}\n a['word'] = s\n a['n_letters'] = len(s)\n a['n_vowels'] = count_vowels(s)\n \n return a", "def getWordKey(word):\n # BEGIN_YOUR_ANSWER (our solution is 1 lines of code, but don't worry if you deviate from this)\n return len(word), word\n # END_YOUR_ANSWER" ]
[ "0.58079946", "0.5710749", "0.56786484", "0.5669718", "0.56644154", "0.5633301", "0.56264555", "0.55352926", "0.55349237", "0.55043215", "0.5499545", "0.5480967", "0.54765", "0.54765", "0.54730135", "0.54675144", "0.5462265", "0.54491264", "0.5444947", "0.5444947", "0.5444947", "0.5442685", "0.54282546", "0.54245746", "0.5411303", "0.5398682", "0.536178", "0.5338914", "0.5337149", "0.5327178", "0.5326333", "0.53233075", "0.531002", "0.5294958", "0.52785045", "0.5273199", "0.5264201", "0.5247781", "0.5246454", "0.5241607", "0.5241263", "0.5217045", "0.52108806", "0.5195419", "0.5194826", "0.519134", "0.5187926", "0.5184738", "0.51739347", "0.517249", "0.51722544", "0.515874", "0.51451755", "0.51423496", "0.5136486", "0.51348656", "0.512856", "0.51259524", "0.512358", "0.51228404", "0.5120777", "0.51205283", "0.5116954", "0.51166135", "0.51081115", "0.51037896", "0.5099596", "0.5099031", "0.5095508", "0.5093637", "0.5091915", "0.5086539", "0.50715566", "0.506952", "0.5067089", "0.50558364", "0.5055489", "0.50517386", "0.5050316", "0.50468814", "0.50457007", "0.5043704", "0.5039158", "0.5038147", "0.50376123", "0.50356823", "0.50330645", "0.5028483", "0.5027475", "0.5020386", "0.50112164", "0.50067383", "0.49910957", "0.4989051", "0.49844602", "0.49810815", "0.4976196", "0.49748498", "0.49719846", "0.4971639", "0.49713314" ]
0.0
-1
print_stars(5) \n\n\n\n\n print_stars(4) \n\n\n\n print_stars(3) \n\n\n print_stars(2) \n\n print_stars(1) \n print_stars(0) ''
def print_stars(N: int) -> str: # if N: # return f'*\n{print_stars(N-1)}' # return '' return '' if not N else f'*\n{print_stars(N-1)}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')", "def star():\n print('*', end='')", "def starry_box(phrase):\n numStars = len(phrase) + 4\n print '*' * numStars\n print '*', phrase, '*'\n print '*' * numStars\n return", "def print_line(n):\n for i in range(1,n+1):\n str1 = ('*' * (i))\n print(str1)", "def draw_star(turtle, n):\n\n for i in range(n):\n turtle.forward(100)\n turtle.left(180 - 180/n)", "def sampleSquare():\n size = int(input('Enter the size: '))\n print('Sample Square of size', size)\n\n # display the first row of stars\n for i in range(size):\n star()\n newline()\n\n # display the \"middle\" rows. There are (size - 2) of them\n for i in range(size - 2):\n # for each row: star, spaces (size - 2 of them), star, newline\n star()\n for j in range(size - 2):\n space()\n star()\n newline()\n \n # display the last row of stars\n for i in range(size):\n star()\n newline()", "def newline():\n\n print('')", "def newline():\n print()", "def starbox(width, height):\n print(\"*\" * width) # print top edge of the box\n # print sides of the box\n for _ in range(height - 2):\n print(\"*\" + \" \" * (width - 2) + \"*\")\n print(\"*\" * width) # print bottom edge of the box", "def list(show=0):\n global stars_\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n if show == 0:\n i=0\n for s in stars_:\n i=i+1\n print i,s[0],s[1],s[2],s[3]\n else:\n if show > 0 and show <= len(stars_):\n s = stars_[show-1]\n print show,s[0],s[1],s[2],s[3]\n else:\n print \"Bad star index\"", "def space():\n print(' ', end='')", "def drawFivePointStar(turtle):\n\n for i in range(5):\n turtle.forward(100)\n turtle.left(216)", "def calculate_text_stars(word_counts) -> int:\n if word_counts == []:\n return 3\n words_per_slide = sum(word_counts) / len(word_counts)\n stars = 5 - abs(words_per_slide - 35) / 8\n # print(stars)\n return max(0, min(5, int(stars + 0.5)))", "def draw_1(n: int):\n \n for row in range(n):\n\n for col in range(n - row - 1):\n print(' ', end='')\n\n for col in range(2 * row + 1):\n print('*', end='')\n \n print()", "def while_X():\r\n i=0\r\n while i<6:\r\n j=0\r\n while j<6:\r\n if i-j==0 or i+j==5:\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n j+=1 \r\n print()\r\n i+=1", "def show_stacked_body(start=0, stack=3, show_calc=0):\n stack_sort = random.randint(0, 3)\n\n for n in range(start, stack):\n n_stars = 1 + (2*n)\n\n if show_calc:\n calc_result = str(n+1) + '...n_stars= ' + str(n_stars)\n else:\n calc_result = ''\n\n chance = random.randint(0, 10)\n star_body = (\"-\" * n_stars)\n\n if chance in [0,1,2,3,4,5,6,7,8,9,10]:\n for n in range(n_stars):\n pos = random.randint(0, n_stars-1)\n\n # chance%3 or stack_sort%3 or ORNAMENT_SORT%3\n if stack_sort%3 == 0:\n ornaments = random.choice('_.^:;\\'') # _.^:;\\'\n elif stack_sort%3 == 1:\n ornaments = random.choice(\"abcdefghijklmnopqrstuvwxyz\") # _.^:;\\'\n elif stack_sort%3 == 2:\n ornaments = random.choice(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\") # _.^:;\\'\n\n star_body = star_body[:pos] + ornaments + star_body[pos+1:]\n\n line_print = star_body.center(GROUND_WIDTH) + calc_result\n print(line_print)", "def nl():\n\tprint(\"\")", "def cool_print(self, text=str, newline=True, margin=21, rate=.02):\n print(\" \" * margin, end='')\n for letter in text:\n sleep(.02)\n stdout.write(letter)\n stdout.flush()\n if newline:\n print()", "def single_line():\n print (\"-------------------------------------------------------------\")", "def newline(lines=1):\n\n # Print the new line iterated by the amount of new lines\n print('\\n' * lines)", "def createStar(npoints):\n # START CODE HERE\n\n pass\n # END CODE HERE # (remove the pass statement)", "def print_line():\n print('+ - - - - + - - - - +'),", "def draw_2(n: int):\n\n for row in range(n):\n for col in range(n - row):\n print('*', end='')\n print()", "def draw_5(n: int):\n\n # Top half + middle\n for row in range(n // 2 + (n % 2)):\n cols = (row) * 2 + (n % 2)\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()\n\n # Bottom half\n for row in range(n // 2):\n cols = n - (row + 1) * 2\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()", "def square(n):\n\n my_CRLF = '\\n'\n return_value = ''\n for _ in range(n):\n return_value += line(n) + my_CRLF\n return return_value", "def drawStar(duration):\n # START CODE HERE #\n\n\n pass\n # END CODE HERE # (remove the pass statement)", "def display_ratings(ratings):\n # only attempt to display the ratings if any were found\n if ratings:\n print('\\n[RATINGS]\\n')\n\n for rating in ratings:\n print(f' {rating}', end=' ')\n # needed to get printing back to normal\n print()", "def for_five():\r\n\r\n for row in range(7):\r\n for col in range(5):\r\n if col==0 and row<6 and row!=4 or col>0 and col<3 and row%3==0 or col==3 and (row==0 or row>3) and row<6:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "def draw_5_alt(n: int):\n \n n = n - (n + 1) % 2 # cut even number down to nearest odd\n for row in range(n):\n dist_from_half = (abs(n // 2 - row))\n cols = n - dist_from_half * 2\n\n for col in range((n - cols) // 2):\n print(' ', end='')\n \n for col in range(cols):\n print('*', end='')\n \n print()", "def create_star(rk_settings, screen, stars, star_number, row_number):\r\n\tstar = Star(rk_settings, screen)\r\n\tstar_width = star.rect.width\r\n\tstar.x = star_width + 2 * star_width * star_number\r\n\tstar.rect.x = star.x\r\n\tstar.rect.y = star.rect.height + 2 * star.rect.height * row_number\r\n\tstars.add(star)", "def while_five():\r\n row = 0\r\n while row<7:\r\n col = 0\r\n while col<7:\r\n if col==0 and row<6 and row!=4 or col>0 and col<3 and row%3==0 or col==3 and (row==0 or row>3) and row<6:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col += 1\r\n print()\r\n row += 1", "def draw_x(n: int):\n \n for row in range(n): # loops through as many rows as the user asked for\n for column in range(row+1): # loops through as many columns as current row number\n print('*', end='') # prints a star for each column, avoiding newline\n print() # print the newline only when the whole row is done", "def draw_4(n: int):\n\n # Top half + middle\n for row in range(n // 2 + (n % 2)):\n cols = n - row * 2\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()\n\n # Bottom half\n for row in range(n // 2):\n cols = (row + 1) * 2 + (n % 2)\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()", "def for_f():\r\n\r\n for row in range(6):\r\n for col in range(4):\r\n if col==1 and row>0 or row==3 and col<3 or col==2 and row==0 or col==3 and row==1:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "def while_f():\r\n\r\n row = 0\r\n while row<6:\r\n col = 0\r\n while col<4:\r\n if col==1 and row>0 or row==3 and col<3 or col==2 and row==0 or col==3 and row==1:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col += 1\r\n print()\r\n row += 1", "def draw_star(x=0,y=0,radius=10):\n cx = x\n cy = y+radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()\n cy = y-radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()", "def print_progression(self, n):\n print(\" \".join(str(round(next(self), 5)) for i in range(n)))", "def new_line():\n print()", "def main():\n\n print('# <-- This is where the edge is')\n print('space_line(3, 5) -->')\n print(space_line(3, 5))", "def nine_lines() -> str:\n print('now printing 9 lines')\n for _ in range(3):\n three_lines()", "def fill():\n print('#', end='')", "def print_giant():\n print()\n print(r\" ___I___ \")\n print(r\" /= | #\\ \")\n print(r\" /.__-| __ \\ \")\n print(r\" |/ _\\_/_ \\| \")\n print(r\" (( __ \\__)) \")\n print(r\" __ ((()))))()) __ \")\n print(r\" ,' |()))))(((()|# `. \")\n print(r\" / |^))()))))(^| =\\ \")\n print(r\" / /^v^(())()()v^;' .\\ \")\n print(r\" |__.'^v^v^))))))^v^v`.__| \")\n print(r\" /_ ' \\______(()_____( | \")\n print(r\" _..-' _//_____[xxx]_____\\.-| \")\n print(r\" /,_#\\.=-' /v^v^v^v^v^v^v^v^| _| \")\n print(r\" \\)|) v^v^v^v^v^v^v^v^v| _| \")\n print(r\" || :v^v^v^v^v^v`.-' |# \\, \")\n print(r\" || v^v^v^v`_/\\__,--.|\\_=_/ \")\n print(r\" >< :v^v____| \\_____|_ \")\n print(r\" , || v^ / \\ / \")\n print(r\" //\\_||_)\\ `/_..-._\\ )_...__\\ \")\n print(r\" || \\/ #| |_='_( | =_(_ \")\n print(r\" || _/\\_ | / =\\ / ' =\\ \")\n print(r\" \\\\\\/ \\/ )/ gnv |=____#| '=....#| \")\n print()", "def indent(text, n=4):\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)", "def print_f(msg, i, t):\n if i == t-1:\n sys.stdout.write('\\n')\n return\n t += 1\n i += 1\n msg += '\\t'\n sys.stdout.write('\\r')\n sys.stdout.write(\"%s%s%% |%s\" % (msg, int(i % t), int(i % t) * '#'))\n sys.stdout.flush()", "def fn_draw_square(n):\n turtle.shape('turtle')\n for i in range(4):\n turtle.forward(n)\n turtle.left(90)\n turtle.done()", "def draw_4_alt(n: int):\n \n n = n - (n + 1) % 2 # cut even number down to nearest odd\n for row in range(n):\n dist_from_half = (abs(n // 2 - row))\n cols = 1 + dist_from_half * 2\n\n for col in range((n - cols) // 2):\n print(' ', end='')\n \n for col in range(cols):\n print('*', end='')\n \n print()", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def for_o():\r\n for row in range(5):\r\n for col in range(5):\r\n if row%4 ==0 and col %4 !=0 or row%4 !=0 and col %4 ==0:\r\n print('*',end=' ')\r\n else:\r\n print(' ',end=' ')\r\n print()", "def draw_map(stars):\n say(\" STAR MAP\")\n say(\" ************\")\n for y in range(15, -16, -1):\n line = list(\" | \")\n if y == 0:\n line = list(\n \"+----+----+----+----+----*SOL-+----+----+----+----+ \")\n elif y % 3 == 0:\n line[25] = \"+\"\n y_hi = y * 10 / 3\n y_lo = (y + 1) * 10 / 3\n for star_index in range(1, len(stars)):\n if y_lo > stars[star_index].y >= y_hi:\n x = round(25 + stars[star_index].x / 2)\n name = stars[star_index].name\n line[x:x + len(name) + 1] = \"*\" + name\n break\n\n say(\"%s\" % \"\".join(line))\n say(\"\\nTHE MAP IS 100 LIGHT-YEARS BY 100 LIGHT-YEARS,\")\n say(\"SO THE CROSS-LINES MARK 10 LIGHT-YEAR DISTANCES\")", "def while_L():\r\n\r\n for row in range(6):\r\n for col in range(4):\r\n if col==0 or row==5:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "def drawstars(slist=[], best=None, outfile='/tmp/stars.jpg'):\n img = Image.new('RGB', (xmax,ymax), backcol) #blank 8-bit color image\n draw = ImageDraw.Draw(img)\n\n x,y,radius = 400, 300, hole_radius*Cscale\n draw.rectangle( (400+Xmin*Cscale, 300-Ymin*Cscale, 400+Xmax*Cscale, 300-Ymax*Cscale), outline=(0,128,0), fill=None)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,128,0), fill=None)\n\n for i in range(len(slist)):\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(0,0,0), fill=(0,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(0,0,0) )\n\n i = best #Redraw the 'best' star in red\n try:\n x,y,radius = 400+slist[i].x*Sscale, 300-slist[i].y*Sscale, rscale(slist[i].mag)\n draw.chord( (int(x-radius+0.5),int(y-radius+0.5),int(x+radius+0.5),int(y+radius+0.5)),\n 0, 360, outline=(192,0,0), fill=(192,0,0))\n draw.text( (400+slist[i].x*Sscale+3, 300-slist[i].y*Sscale+3), `i`, fill=(192,0,0) )\n except TypeError,IndexError:\n pass #There is no 'best' star\n\n img.save(outfile, quality=90)", "def _create_stars(self, stars_number, row_number):\n star = Star(self)\n stars_width, stars_height = star.rect.size\n star.x = stars_width + 2 * stars_width * stars_number\n star.rect.x = star.x\n star.rect.y = star.rect.height + 2 * star.rect.height * row_number\n self.stars.add(star)", "def example():\n print \"\"\"\n \"\"\"", "def example():\n print \"\"\"\n \"\"\"", "def html_space(n):\n return \" \" * n", "def my_print(self):\n if self.__size == 0:\n print()\n else:\n print(\"\\n\" * self.__position[1], end='')\n for x in range(self.__size):\n print(\" \" * self.__position[0], end='')\n print(\"#\" * self.__size)", "def one_line_print(length, player, pos):\n if pos == 0:\n print(player, 'in Home', end=' ')\n else:\n print('Home', end=' ')\n for i in range(1, length):\n if i == pos:\n print(player, end=' ')\n else:\n print('.', end=' ')\n if pos == length:\n print(player, 'in Finish')\n else:\n print('Finish')", "def while_o():\r\n row =0\r\n while row <5:\r\n col =0\r\n while col <5:\r\n if row%4 ==0 and col %4 !=0 or row%4 !=0 and col %4 ==0:\r\n print('*',end=' ')\r\n else:\r\n print(' ',end=' ')\r\n col+=1\r\n print()\r\n row +=1", "def double_line():\n print (\"=============================================================\")", "def for_X():\r\n for row in range(7):\r\n for col in range(7):\r\n if row-col==0 or row+col==6:\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n print()", "def _sig_stars(val):\n star = \"\"\n if 0 <= val < 0.001:\n star = \"***\"\n elif 0.001 <= val < 0.01:\n star = \"**\"\n elif 0.01 <= val < 0.05:\n star = \"*\"\n elif 0.05 <= val < 0.1:\n star = \".\"\n return star", "def draw():\r\n\r\n print('\\n+---+---+---+')\r\n for i in range(9):\r\n print('| ' + board[i] + ' ', end='')\r\n if (i + 1) % 3 == 0:\r\n print('|\\n+---+---+---+')", "def print(self,n):\r\n c = 0\r\n for i in n:\r\n for j in i:\r\n if c == 9:\r\n print()\r\n c = 0\r\n c = c+1\r\n print(j, end=\" \")", "def _spacer(self, msg):\n msg = str(msg)\n msg_len = len(msg)\n if msg_len == 1:\n print(\" \", end=\"\")\n elif msg_len == 2:\n print(\" \", end=\"\")", "def part_1():\n print(\"You finally get out of the forest\")\n time.sleep(1)\n print(\"You see a giant frost spider in the distance\")\n print(r\"\"\"\n (\n )\n (\n /\\ .-\" \"-. /\\\n //\\\\/ ,,, \\//\\\\\n |/\\| ,;;;;;, |/\\|\n //\\\\\\;-\" \"-;///\\\\\n // \\/ . \\/ \\\\\n (| ,-_| \\ | / |_-, |)\n //`__\\.-.-./__`\\\\\n // /.-(() ())-.\\ \\\\\n (\\ |) '---' (| /)\n ` (| |) `\n \\) (/)\"\"\")", "def for_L():\r\n\r\n for row in range(6):\r\n for col in range(4):\r\n if col==0 or row==5:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "def display_hline():\n for i in range(12):\n print(\"-\", end=\"\")\n print()", "def space(n):\n rstr = \"&nbsp;\" * 4 * n\n return rstr", "def for_P():\r\n for row in range(7):\r\n for col in range(4):\r\n if col==0 or row in (0,3) and col!=3 or col==3 and row in(1,2):\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n print()", "def alert(n):\n for i in range(n):\n print(''.join([start, ' ', end, '!']))", "def prow(x, y=2):\n for i in range(y):\n print(\"+\", \"- \" * x, end=\"\")\n print(\"+\")", "def printstringtp2(xs): #Printing function\n for x in range(xs+1): #Outer loop for line iteration\n print(\"\\n\")\n for y in range(x):\n print(y,end=' ')", "def draw_3_alt(n: int): \n\n n = n - (n + 1) % 2 # cut even number down to nearest odd\n for row in range(n):\n dist_from_half = (abs(n // 2 - row))\n cols = n - n // 2 - dist_from_half\n for col in range(cols):\n print('*', end='')\n print()", "def print_progression(self, n):\n print(\" \".join(str(next(self)) for i in range(n)))", "def my_print(self):\n if self.__size > 0:\n print(\"\\n\" * self.__position[1], end=\"\")\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.__size)\n else:\n print()", "def print_square(num):\n print(\"Square: {}\".format(num * num))", "def print_square(num):\n print(\"Square: {}\".format(num * num))", "def print_square(size):\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n if size < 0:\n raise ValueError(\"size must be >= 0\")\n row = \"#\" * size\n for i in range(size):\n print(\"{}\".format(row))", "def sequential_print_statements():\n pass", "def print_maze(maze: list):\n n = len(maze)\n for i in range(n):\n print(maze[i])", "def draw_3(n: int):\n\n # Top half + middle\n for row in range(n // 2):\n for col in range(row + 1):\n print('*', end='')\n print()\n\n # Bottom half\n for row in range(n // 2, n):\n for col in range(n - row):\n print('*', end='')\n print()", "def spiral(stairs):\n ws = ''\n \n print(' '.join(stairs))\n for _ in range(1, len(stairs)):\n stairs.append(stairs[0])\n ws += ' ' * (1 + len(stairs[0]))\n del stairs[0]\n print(ws + ' '.join(stairs))", "def my_print(self):\n if self.size == 0:\n print(\"\")\n return\n for j in range(self.__position[1]):\n print(\"\")\n for i in range(self.size):\n if self.__position[0] > 0:\n print(\" \" * self.__position[0], end=\"\")\n print('#' * self.size)", "def print_square(size):\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n if (size < 0):\n raise ValueError(\"size must be >= 0\")\n if size > 0:\n print(\"\\n\".join([\"#\" * size for j in range(size)]))", "def for_b():\r\n\r\n for row in range(7):\r\n for col in range(4):\r\n if col==0 or row!=0 and row%3==0 and col<3 or col==3 and row in (4,5):\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "def display_board(x):\n\n # For b in i in x\n c = ''\n for i in x:\n\n a = 0\n p = ''\n for b in i:\n ilen = len(b)\n a += 1\n if b == ' ':\n p = p + \"_\"\n elif b == '*':\n p = p + \"*\"\n elif a == ilen:\n p = p + '\\n'\n else:\n raise False\n c = c + p\n print(p)", "def printSeparator(count: int):\n if count == 0 or count == 3 or count == 6:\n print(\"|\", end='')\n return\n\n if count == 1 or count == 4 or count == 7:\n print(\"|\", end='')\n return\n\n if count == 2 or count == 5:\n print('')\n print(\"-+-+-\")\n return\n\n if count == 8:\n print('')\n return", "def pretty_print(linenum, todo):\n\n global COUNTER\n comm_endings = ['\"\"\"', \"'''\", '*/', '-->', '#}', '--}}', '}}', '%>']\n for i in comm_endings:\n if todo.endswith(i):\n todo = todo[:-len(i)]\n print(' line', linenum.rjust(4), '>>\\t', todo )\n COUNTER += 1", "def while_b():\r\n\r\n row = 0\r\n while row<7:\r\n col = 0\r\n while col<4:\r\n if col==0 or row!=0 and row%3==0 and col<3 or col==3 and row in (4,5):\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col += 1\r\n print()\r\n row += 1", "def insertnln(n=1):\r\n\tidx = 0\r\n\twhile idx < n:\r\n\t\tCONSOLE.insertln()\r\n\t\tidx = idx + 1", "def print_border_maze(maze, spacer=False):\n sp = ''\n if spacer:\n sp = ' '\n\n for row in range(-1, Y + 1):\n for col in range(-1, X + 1):\n #print(maze[(col, row)] + sp, end=\"\")\n print(maze[(row, col)] + sp, end=\"\")\n print()\n print()", "def compute_pattern(n):\n for x in range(1,n):\n for y in range(x, x*2):\n print(y, end= \" \")\n print()", "def write(string,indent=0,end=\"\"):\n sys.stdout.write(\" \"*indent+string+end)", "def draw_pentagram(size):\n count = 1\n while count <= 5:\n turtle.forward(size)\n turtle.right(144)\n count += 1", "def welcome():\n print(\"\"\"\n\n-----------------------------------\n Welcome to the Tip Calculator \n-----------------------------------\n\"\"\")", "def find_rating():\n print(\"***** Finding Star/Rating *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(\"This business is rated \" + str(\n business_object['stars']) + \" stars with \" + str(\n business_object['review_count']) + \" reviews.\\n\")\n\n print_business(business_object)", "def print_blank_lines():\n for _ in range(parameters[\"Trailing lines\"]):\n print()", "def indent(text, first_line=True, n=1, width=4):\n lines = text.split(\"\\n\")\n if not first_line:\n first = lines[0]\n lines = lines[1:]\n\n spaces = \" \" * (width * n)\n lines2 = [spaces + x for x in lines]\n\n if not first_line:\n lines2.insert(0, first)\n\n indented = \"\\n\".join(lines2)\n\n return indented", "def while_P():\r\n i=0\r\n while i<7:\r\n j=0\r\n while j<4:\r\n if j==0 or i in(0,3) and j%3!=0 or j==3 and i in(1,2):\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n j+=1 \r\n print()\r\n i+=1", "def netflix_print(writer, rating):\n if isinstance(rating, float):\n writer.write(('%.1f' % rating) + \"\\n\")\n elif isinstance(rating, int):\n writer.write(str(rating) + \":\\n\")\n else:\n writer.write(rating + \"\\n\")" ]
[ "0.78595215", "0.7326586", "0.6608579", "0.66084236", "0.64868563", "0.6461798", "0.62855875", "0.6244473", "0.6224193", "0.621459", "0.61932653", "0.61023444", "0.6071811", "0.59977406", "0.5921401", "0.5901315", "0.58597124", "0.5856981", "0.58483934", "0.5816308", "0.57875204", "0.5784535", "0.57806206", "0.574841", "0.5748191", "0.5731355", "0.5713836", "0.5659546", "0.56586206", "0.5626202", "0.5624615", "0.5593337", "0.55843973", "0.55826354", "0.5544504", "0.5538949", "0.5536504", "0.5533279", "0.5531501", "0.5529114", "0.5525491", "0.55219233", "0.5468092", "0.5450552", "0.54426223", "0.544228", "0.543125", "0.54232615", "0.5423099", "0.53939366", "0.5386472", "0.53792465", "0.5378336", "0.5378336", "0.5374596", "0.53570896", "0.53564054", "0.5355147", "0.53549737", "0.5341761", "0.53365284", "0.5323656", "0.5323578", "0.5322013", "0.53173083", "0.53114104", "0.52697325", "0.524747", "0.5246388", "0.5231619", "0.52241755", "0.5223885", "0.5216547", "0.5209319", "0.52042085", "0.52005595", "0.52005595", "0.51981133", "0.5195945", "0.51946384", "0.5192039", "0.5190634", "0.5183459", "0.51766336", "0.5173051", "0.5167828", "0.51589316", "0.5153617", "0.5147481", "0.5144571", "0.5144124", "0.51363176", "0.513438", "0.5132663", "0.5125111", "0.51232594", "0.5119811", "0.51185197", "0.51139075", "0.5109902" ]
0.8280417
0
Mutual Information by Entropy norm between specific items.
def mmi_norm(self, x, y, tuples): P_ = {x: self.P(x, tuples), y: self.P(y, tuples)} P_xy = self.condP(x, y, tuples) return - P_[x] * log2(P_[x]) - P_[y] * (-P_xy * log2(P_xy))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalized_mutual_information(cl: np.ndarray, org: np.ndarray):\n assert cl.shape == org.shape\n\n return mutual_info_score(org, cl) / (abs(entropy(cl) + entropy(org)) / 2)", "def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n axis=0)\n return mutual_info", "def mutual_information(x, y, bins, normalize=False):\n # assert array length\n assert len(x) == len(y)\n\n # get the bins\n bins = get_2D_bins(x, y, bins)\n\n # calculate entropy(x) and conditional_entropy(x,y)\n hx = entropy(x, bins[0])\n hcon = conditional_entropy(x, y, bins)\n\n if normalize:\n normalizer = np.min([entropy(x, bins[0]), entropy(y, bins[1])])\n mutual_info = hx - hcon\n\n # check if mutual info and normalizer are very small\n if mutual_info < 1e-4 and normalizer < 1e-4:\n # return zero to prevent very high values of normalized mutual information\n # e.g. mutual information = -1.3e-12, normalizer = -1.6e-12 \n # -> normalized conditional entropy = 812.5\n return 0\n else:\n return mutual_info / normalizer\n else:\n return hx - hcon", "def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))", "def mutual_information(co_freq, s_freq, t_freq, total_instances, mitype=None):\n if co_freq > 0:\n if mitype is not None:\n if mitype == \"expected\":\n mi = math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)\n ) * (co_freq / total_instances)\n elif mitype == \"normalized\":\n alpha = - math.log2(co_freq / total_instances)\n mi = (\n (math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)) / alpha)\n if alpha != 0 else 0\n )\n elif mitype == \"pmi2\":\n mi = math.log2((co_freq ** 2) / (s_freq * t_freq))\n elif mitype == \"pmi3\":\n mi = math.log2(\n (co_freq ** 3) / (s_freq * t_freq * total_instances))\n else:\n raise ValueError(\n \"Provided Mutual information score type (mitype) is not \"\n \"supported. Provide one value from the following list \"\n \"['expected', 'normalized','pmi2', 'pmi3'] \")\n else:\n mi = math.log2((total_instances * co_freq) / (s_freq * t_freq))\n else:\n mi = 0\n return mi if mi > 0 else 0", "def norm(self):", "def mutual_information_2d(x, y, sigma=1, normalized=False):\n \n bins = (256, 256)\n \n jh = np.histogram2d(x, y, bins=bins)[0]\n \n # smooth the jh with a gaussian filter of given sigma\n ndimage.gaussian_filter(jh, sigma=sigma, mode='constant',\n output=jh)\n \n # compute marginal histograms\n jh = jh + EPS\n sh = np.sum(jh)\n jh = jh / sh\n s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))\n s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))\n \n # Normalised Mutual Information of:\n # Studholme, jhill & jhawkes (1998).\n # \"A normalized entropy measure of 3-D medical image alignment\".\n # in Proc. Medical Imaging 1998, vol. 3338, San Diego, CA, pp. 132-143.\n if normalized:\n mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2)))\n / np.sum(jh * np.log(jh))) - 1\n else:\n mi = ( np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1))\n - np.sum(s2 * np.log(s2)))\n \n return mi", "def adjusted_mutual_info(self):\n # Prepare row totals and check for special cases\n row_totals = np.fromiter(self.iter_row_totals(), dtype=np.int64)\n col_totals = np.fromiter(self.iter_col_totals(), dtype=np.int64)\n R = len(row_totals)\n C = len(col_totals)\n if R == C == 1 or R == C == 0:\n # No clustering since the data is not split. This is a perfect match\n # hence return 1.0.\n return 1.0\n\n # In one step, calculate entropy for each labeling and mutual\n # information\n h_true, h_pred, mi = self._entropies()\n mi_max = max(h_true, h_pred)\n\n # Calculate the expected value for the MI\n emi = emi_from_margins(row_totals, col_totals)\n\n # Calculate the adjusted MI score\n ami = (mi - emi) / (mi_max - emi)\n return ami", "def get_mutual_information(c_wic, c_wioc, c_owic, c_owioc):\n # total word count\n c_total = c_wic + c_wioc + c_owic + c_owioc\n\n mi_1 = (c_wic / float(c_total)) * log10((c_total * c_wic) /\n float((c_wic + c_wioc) * (c_wic + c_owic)))\n mi_2 = (c_owic / float(c_total)) * log10((c_total * c_owic) /\n float((c_owic + c_owioc) * (c_wic + c_owic)))\n mi_3 = (c_wioc / float(c_total)) * log10((c_total * c_wioc) /\n float((c_wic + c_wioc) * (c_wioc + c_owioc)))\n mi_4 = (c_owioc / float(c_total)) * log10((c_total * c_owioc) /\n float((c_owic + c_owioc) * (c_wioc + c_owioc)))\n\n return mi_1 + mi_2 + mi_3 + mi_4", "def cmi_norm(self, query, tuples):\r\n P_, vocab, H_D = self.entropy_norm(tuples)\r\n I_D = [] # I(D; q) = H(D) - H(D|q)\r\n I_as = H_D # I(D; q1, q2,...,qn) = H(D) - H(D|q1) - ... - H(D|qn)\r\n\r\n for q in query:\r\n P_wq = odict({w + '|' + q: self.condP(w, q, tuples)\r\n for w in vocab})\r\n H_Dq = -P_[q] * sum(P_wq[pwq] * log2(P_wq[pwq])\r\n for pwq in P_wq.keys())\r\n I_D.append((q, H_D - H_Dq))\r\n I_as -= H_Dq\r\n # TODO: define a,b,c for giving negative reward\r\n # if not (mmi_norm(a,b, tuples) > mmi_norm(b,c, tuples) > mmi_norm(a,c, tuples)):\r\n # I_as = self.zeta\r\n return I_D, I_as", "def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total", "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')", "def norm(self):\n raise NotImplementedError", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def norm(self):\n # TODO: implement\n return", "def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG", "def mutual_information(transposed, transposed_2 = False):\n\tmi = []\n\tlength = range(len(transposed))\n\tfor i in length:\n\t\tentropy_i = entropy(transposed[i])\n\t\tmi_list = []\n\t\tif transposed_2 == False:\n\t\t\tfor j in length:\n\t\t\t\tentropy_j = entropy(transposed[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\n\t\telse:\n\t\t\tlength_2 = range(len(transposed_2))\n\t\t\tfor j in length_2:\n\t\t\t\tentropy_j = entropy(transposed_2[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed_2[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\treturn mi", "def mutual_information(transposed, transposed_2 = False):\n\tmi = []\n\tlength = range(len(transposed))\n\tfor i in length:\n\t\tentropy_i = entropy(transposed[i])\n\t\tmi_list = []\n\t\tif transposed_2 == False:\n\t\t\tfor j in length:\n\t\t\t\tentropy_j = entropy(transposed[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\n\t\telse:\n\t\t\tlength_2 = range(len(transposed_2))\n\t\t\tfor j in length_2:\n\t\t\t\tentropy_j = entropy(transposed_2[j])\n\t\t\t\tjoint = joint_entropy(transposed[i], transposed_2[j])\n\t\t\t\tmi_calc = entropy_i + entropy_j - joint\n\t\t\t\tmi_list.append(mi_calc)\n\t\t\tmi.append(mi_list)\n\treturn mi", "def mutual_info_fast(l1, l2, l1_entropy, l2_entropy):\n return l1_entropy + l2_entropy - entropy(joint_dataset(l1, l2))", "def pairwiseMutualInformation(align, nperms=1e4):\n L=len(align[align.index[0]])\n columns = [align.map(lambda s: s[i]) for i in np.arange(L)]\n M = np.nan*np.zeros((L, L))\n p = np.nan*np.zeros((L, L))\n Mstar = np.nan*np.zeros((L, L))\n for xi, yi in itertools.combinations(np.arange(L), 2):\n freqx = objhist(columns[xi])\n freqy = objhist(columns[yi])\n\n tmpM, tmpMstar, tmpp, Hx, Hy, Hxy= mutual_information(columns[xi],\n columns[yi],\n logfunc=np.log2,\n nperms=nperms)\n \n \"\"\"We wouldn't need to test invariant sites or a site with itself\"\"\"\n if len(freqx) == 1 or len(freqy) == 1:\n tmpp = np.nan\n elif xi == yi:\n tmpp = np.np.nan\n\n M[xi, yi] = tmpM\n p[xi, yi] = tmpp\n Mstar[xi, yi] = tmpMstar\n q = adjustnonnan(p)\n\n return M, Mstar, p, q", "def mutual_information(x, y, logfunc=np.log2, nperms=1e4):\n def entropy(freqDict):\n return -np.array([p*logFunc(p) for p in freqDict.values()]).sum()\n freqx = objhist(x)\n freqy = objhist(y)\n \n Hx = freqx.entropy()\n Hy = freqy.entropy()\n Hxy = objhist(zip(x,y)).entropy()\n M = Hx + Hy - Hxy\n Mstar = 2*M / (Hx+Hy)\n\n if len(freqx)==1 or len(freqy)==1:\n p = 1\n elif np.all([xi==yi for xi,yi in zip(x,y)]):\n p = 0\n else:\n Mperms = np.array([Hx + Hy - objhist(zip(permutation(x),y)).entropy() for i in np.arange(nperms)])\n p = (Mperms >= M).sum() / nperms\n\n return M, Mstar, p, Hx, Hy, Hxy", "def MutualInformation(x, y, bins):\n hist_xy, x_edges, y_edges = np.histogram2d(x, y, bins)\n return sklearn.metrics.mutual_info_score(None, None, hist_xy)", "def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))", "def metallicity(method, emsystem):\n if method == 'PG16':\n # Requires Hbeta, [OII], [OIII], [NII], [SII]\n R2 = (emsystem.get_emline('[OII] 3726').attrib['flux'] +\n emsystem.get_emline('[OII] 3729').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n R3 = (emsystem.get_emline('[OIII] 4959').attrib['flux'] +\n emsystem.get_emline('[OIII] 5007').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n N2 = (emsystem.get_emline('[NII] 6548').attrib['flux'] +\n emsystem.get_emline('[NII] 6584').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n S2 = (emsystem.get_emline('[SII] 6716').attrib['flux'] +\n emsystem.get_emline('[SII] 6731').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n # Proceed\n if np.log10(N2) < -0.6:\n r_val = 7.932 + 0.944*np.log10(R3/R2) + 0.695*np.log10(N2) + \\\n ((0.97 - 0.291*np.log10(R3/R2)) - 0.019*np.log10(N2))*np.log10(R2)\n\n s_val = 8.072 + 0.789*np.log10(R3/S2) + 0.726*np.log10(N2) + \\\n (1.069 - 0.170*np.log10(R3/S2) +0.022*np.log10(N2))*np.log10(S2)\n else:\n r_val = 8.589 + 0.022*np.log10(R3/R2) + 0.399*np.log10(N2) + \\\n (-0.137 + 0.164*np.log10(R3/R2) + 0.589*np.log10(N2))*np.log10(R2)\n\n s_val = 8.424 + 0.030*np.log10(R3/S2) + 0.751*np.log10(N2) + \\\n (-0.349 + 0.182*np.log10(R3/S2) +0.508*np.log10(N2))*np.log10(S2)\n return r_val.decompose().value, s_val.decompose().value", "def calc_mutual_information(probability_mat):\n\n marginals = sp.outer(\n sp.sum(probability_mat, axis=1), sp.sum(probability_mat, axis=0))\n p = probability_mat[probability_mat != 0.0]\n m = marginals[probability_mat != 0.0]\n return sp.sum(p * sp.log(p / m))", "def molar_mass_amu():\n return Equivalency([(si.g / si.mol, misc.u)], \"molar_mass_amu\")", "def compute_norm(self):\n\n # logger.info(\" Normalization factor:\")\n\n # loop over all the complexes in the database\n first = True\n for comp in tqdm(self.index_complexes):\n fname, molname = comp[0], comp[1]\n\n # get the feature/target\n if self.mapfly:\n feature, target = self.map_one_molecule(\n fname, mol=molname)\n else:\n feature, target = self.load_one_molecule(\n fname, mol=molname)\n\n # create the norm isntances at the first passage\n if first:\n self.param_norm = {'features': [], 'targets': None}\n for ifeat in range(feature.shape[0]):\n self.param_norm['features'].append(NormParam())\n self.param_norm['targets'] = MinMaxParam()\n first = False\n\n # update the norm instances\n for ifeat, mat in enumerate(feature):\n self.param_norm['features'][ifeat].add(\n np.mean(mat), np.var(mat))\n self.param_norm['targets'].update(target)\n\n # process the std of the features and make array for fast access\n nfeat, ncomplex = len(\n self.param_norm['features']), len(self.index_complexes)\n self.feature_mean, self.feature_std = [], []\n for ifeat in range(nfeat):\n\n # process the std and check\n self.param_norm['features'][ifeat].process(ncomplex)\n if self.param_norm['features'][ifeat].std == 0:\n logger.info(' Final STD Null. Changed it to 1')\n self.param_norm['features'][ifeat].std = 1\n\n # store as array for fast access\n self.feature_mean.append(\n self.param_norm['features'][ifeat].mean)\n self.feature_std.append(\n self.param_norm['features'][ifeat].std)\n\n self.target_min = self.param_norm['targets'].min[0]\n self.target_max = self.param_norm['targets'].max[0]\n\n logger.info(f'{self.target_min}, {self.target_max}')", "def get_center_of_mass_allies(self,obs):", "def norm(self):\n self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))\n self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))", "def tonality(self, mdct_norm):\n return self.psychoacoustic.tonality(mdct_norm)", "def mu(self):\n return self.mass * G", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def mass(self):\n\t\traise NotImplementedError", "def get_norms(self):\n l1_sum = 0\n l2_sum = 0\n actives = 0\n for lbl in self.labels:\n for fid in self.w[lbl]:\n # apply and remaing L1 penalities at the end of training.\n alpha = self.s - self.lastW[lbl].get(fid,0)\n self.w[lbl][fid] = self.w[lbl].get(fid, 0) - alpha\n weight = self.w[lbl][fid]\n l1_sum += weight if weight > 0 else -weight\n l2_sum += weight * weight\n if weight != 0:\n actives += 1\n l2_sum = math.sqrt(l2_sum)\n return (l1_sum,l2_sum,actives)", "def mu(self):\n return self.generic_getter(get_chemical_potential, \"mu\", \"convert_energy\")", "def mutual_information_brute_force(self, ret_prob_activity=False):\n base = 2 ** np.arange(0, self.Nr)\n\n # prob_a contains the probability of finding activity a as an output.\n prob_a = np.zeros(2**self.Nr)\n for c, prob_c in self._iterate_mixtures():\n # get the associated output ...\n a = np.dot(self.sens_mat, c).astype(np.bool)\n # ... and represent it as a single integer\n a = np.dot(base, a)\n\n prob_a[a] += prob_c\n \n # normalize the output to make it a probability distribution\n prob_a /= prob_a.sum()\n \n # calculate the mutual information\n MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0)\n \n if ret_prob_activity:\n return MI, prob_a\n else:\n return MI", "def get_mutual_information_table(self, dims_to_use=None, ignore_negative_values=True, use_correlation=False):\n from mlabwrap import mlab\n bad_dims = self.get_markers('surface_ignore')\n bad_dims.append('Cell Length')\n bad_dims.append('Time')\n bad_dims.append('191-DNA')\n bad_dims.append('193-DNA')\n bad_dims.append('103-Viability')\n bad_dims.append('cluster_name')\n bad_dims.append('stim')\n bad_dims.append('cluster_num')\n if not dims_to_use:\n dims_to_use = self.dims[:]\n dims_to_use = [d for d in dims_to_use if not d in bad_dims] \n num_dims = len(dims_to_use)\n res = np.zeros((num_dims, num_dims))\n logging.info(\n 'Calculating mutual information for %d pairs...' % ((num_dims ** 2 - num_dims) / 2))\n timer = MultiTimer((num_dims ** 2 - num_dims) / 2)\n for i in xrange(num_dims):\n for j in xrange(i):\n arr = self.get_points(dims_to_use[i], dims_to_use[j])\n if ignore_negative_values:\n arr = arr[np.all(arr > 0, axis=1)]\n if arr.shape[0] < 100:\n logging.warning('Less than 100 cells in MI calculation for (%s, %s)' % (dims_to_use[i], dims_to_use[j]))\n res[j,i] = 0\n res[i,j] = 0\n continue\n if use_correlation:\n res[i,j] = np.corrcoef(arr.T[0], arr.T[1])[0,1]\n else:\n res[i,j] = mlab.mutualinfo_ap(arr, nout=1)\n res[j,i] = res[i,j]\n timer.complete_task('%s, %s' % (dims_to_use[i], dims_to_use[j]))\n return DataTable(res, dims_to_use)", "def computeMuSigma(self):\n totedge = self.countEdges()\n for i in range(self.totbs):\n prob = 1.0 * self.totedge[i+1] / totedge\n self.mu += prob\n self.sigma += prob * (1 - prob)\n if self.sigma > 0:\n self.sigma = math.sqrt(self.sigma)\n print \"Mu = {}, Sigma = {}\".format(self.mu, self.sigma)", "def mutual_information_from_table(P):\n P_nan = P.copy()\n P_nan[P_nan == 0] = np.nan\n\n marginals_p1 = np.nansum(P_nan, axis=1)\n marginals_p2 = np.nansum(P_nan, axis=0)\n\n return np.nansum(np.multiply(P_nan, np.log2(P_nan / (np.tensordot(marginals_p1, marginals_p2, axes=0)))))", "def exo1():\n randu = randn(N/ 2, N/ 2, 2); % a random vector field\n b = 2\n for i in 1: 4:\n LLs_u = Li{i}(LiS{i}(randu))\n % relative error should be very small\n norm(abs(LLs_u(: ) - b*randu(: )))/ norm(randu(: ))", "def GetNormal(self):\n ...", "def fit_normal(distogram):\n L = distogram.shape[1]\n params = torch.empty((3, L, L))\n \n for i in range(L):\n for j in range(L):\n m, s = calc_moments(distogram[:, i, j])\n scalar = torch.max(distogram[:, i, j]) / normal_distr(m, m, s)\n params[0, i, j], params[1, i, j], params[2, i, j] = m, s, scalar\n \n return params", "def normalize_features(block, norm=1):\n for k in block:\n for b in block[k]:\n nrm = np.sqrt((block[k][b].reshape((block[k][b].shape[0],-1))**2).sum(axis=1).mean(axis=0))\n if nrm > 0.0:\n block[k][b] *= norm/nrm", "def compute_empirical_mutual_info_nats(var1_values, var2_values):\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n\n empirical_mutual_info_nats = 0.0\n \n var1_distribution = compute_empirical_distribution(var1_values)\n var2_distribution = compute_empirical_distribution(var2_values)\n joint_distribution = compute_empirical_distribution(list(zip(var1_values,var2_values)))\n \n empirical_mutual_info_nats = 0\n for var1 in var1_distribution:\n for var2 in var2_distribution:\n empirical_mutual_info_nats += joint_distribution[(var1, var2)] \\\n * np.log(joint_distribution[(var1,var2)]/(var1_distribution[var1]*var2_distribution[var2]))\n \n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return empirical_mutual_info_nats", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def calc_measure(self, measure='Statistical Complexity', coordmom=0, probmom=0, rllen=0, clusmom=0, samelev=True):\n\n self.measure = measure\n\n # Allow for changed values of the following class variables\n # to be passed to calc measure\n if coordmom != 0:\n self.coordmom = coordmom\n if probmom != 0:\n self.probmom = probmom\n if rllen != 0:\n self.rllen = rllen\n if clusmom != 0:\n self.clusmom = clusmom\n if samelev == False:\n self.samelev = False\n\n if self.measure == \"CM Entropy\":\n if np.isnan(self.cme):\n self.cme = np.sum(\n -np.where(self.comat > 0.0, self.comat, 1.0) * np.where(self.comat > 0.0, np.log2(self.comat), 0.0))\n\n self.val = self.cme\n self.currval = \"CM Entropy\"\n\n elif self.measure == \"EM Entropy\":\n if np.isnan(self.eme):\n import scipy.linalg as L\n\n if not self.emest:\n self.est_em()\n # get left eigenvector associated with lambda = 1\n # (largest eignevalue)\n [e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)\n # Node probabilities are elements of normalized left eigenvector\n # associated with eigenvale 1 (assumes Scipy convention of\n # returning sorted eignevalues so eignevalue 1 in this case is\n # the first element of the returned eigenvalue array)\n # nodep = v[:,0]/sum(v[:,0])\n # ---- no longer make the above assumption\n # found it was wrong - now specifically ask for eigenvector\n # associated with eigenvalue 1 (greatest real part)\n maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]\n nodep = v[:, maxind] / sum(v[:, maxind])\n self.eme = -np.sum(\n np.transpose(nodep * np.ones(self.emmat.shape)) * (self.emmat * np.nan_to_num(np.log2(self.emmat))))\n\n self.val = self.eme\n self.currval = \"EM Entropy\"\n\n elif self.measure == \"Statistical Complexity\":\n if np.isnan(self.stc):\n import scipy.linalg as L\n # estimate epsilon machine if it hasn't been made\n if not self.emest:\n self.est_em()\n # get left eigenvector associated with lambda = 1\n # (largest eignevalue)\n [e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)\n # Node probabilities are elements of normalized left eigenvector # associated with eigenvale 1 (assumes Scipy convention of\n # returning sorted eignevalues so eignevalue 1 in this case is\n # the first element of the returned eigenvalue array)\n # nodep = v[:,0]/sum(v[:,0])\n # ---- no longer make the above assumption\n # found it was wrong - now specifically ask for eigenvector\n # associated with eigenvalue 1 (greatest real part)\n maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]\n nodep = v[:, maxind] / sum(v[:, maxind])\n self.stc = -np.sum(nodep * np.log2(nodep))\n\n self.val = self.stc\n self.currval = \"Statistical Complexity\"\n\n elif self.measure == \"Energy Uniformity\":\n if np.isnan(self.enu):\n self.enu = np.sum(np.where(self.comat > 0.0, self.comat * self.comat, 0.0))\n self.val = self.enu\n self.currval = \"Energy Uniformity\"\n\n elif self.measure == \"Maximum Probability\":\n if self.map is np.nan:\n self.map = np.max(self.comat)\n\n self.val = self.map\n self.currval = \"Maximum Probability\"\n\n elif self.measure == \"Contrast\":\n if np.isnan(self.con):\n if self.coordmom == 0 or self.probmom == 0:\n if self.coordmom == 0:\n print(\"Nonzero coordinate moment is required for calculating Contrast\")\n if self.probmom == 0:\n print(\"Nonzero probability moment is required for calculating Contrast\")\n else:\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i\n ccols[:, i] = i\n\n self.con = np.sum((np.abs(crows - ccols) ** self.coordmom) * (self.comat ** self.probmom))\n\n self.val = self.con\n self.currval = \"Contrast\"\n\n elif self.measure == \"Inverse Difference Moment\":\n if np.isnan(self.idm):\n if self.coordmom == 0 or self.probmom == 0:\n if self.coordmom == 0:\n print(\"Nonzero coordinate moment is required for calculating Inverse Difference Moment\")\n if self.probmom == 0:\n print(\"Nonzero probability moment is required for calculating Inverse Difference Moment\")\n else:\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i\n ccols[:, i] = i\n codiffs = np.abs(crows - ccols) ** self.coordmom\n # Set minimum coordinate difference for which you allow\n # probability to be calculated\n codiff_eps = 0.0000001\n # Do following so test divides don't blow up and\n # generte a warning\n codiffs_ok = np.where(codiffs > codiff_eps, codiffs, 1.0)\n self.idm = np.sum(np.where(codiffs > codiff_eps, (self.comat ** self.probmom) / codiffs_ok, 0.0))\n\n self.val = self.idm\n self.currval = \"Inverse Difference Moment\"\n\n elif self.measure == \"Correlation\":\n if np.isnan(self.cor):\n import scipy.stats as ss\n\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.\n ccols[:, i] = i + 1\n rowmom = np.sum(crows * self.comat)\n colmom = np.sum(ccols * self.comat)\n comatvar = np.var(np.ravel(self.comat * crows))\n self.cor = np.sum((crows - rowmom) * (ccols - colmom) * self.comat) / comatvar\n self.val = self.cor\n self.currval = \"Correlation\"\n\n elif self.measure == \"Probability of Run Length\":\n if np.isnan(self.prl):\n if self.rllen == 0:\n print(\"Nonzero run length is required for calculating Probability of Run Length\")\n else:\n colprobs = np.zeros(self.comat.shape[0])\n for i in range(self.comat.shape[0]):\n colprobs[i] = np.sum(self.comat[i, :])\n self.prl = 0.0\n for i in range(self.comat.shape[0]):\n if colprobs[i] != 0.0:\n self.prl += ((colprobs[i] - self.comat[i, i]) ** 2 * (\n self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)\n self.val = self.prl\n self.currval = \"Probability of Run Length\"\n\n elif self.measure == \"Epsilon Machine Run Length\":\n if np.isnan(self.erl):\n if self.rllen == 0:\n print(\"Nonzero run length is required for calculating Epsilon Machine Run Length\")\n else:\n if not self.emest:\n self.est_em()\n self.erl = 0.0\n colprobs = np.zeros(self.emmat.shape[0])\n for i in range(self.emmat.shape[0]):\n colprobs[i] = np.sum(self.emmat[i, :])\n for i in range(self.emmat.shape[0]):\n self.erl += ((colprobs[i] - self.emmat[i, i]) ** 2 * (self.emmat[i, i] ** (self.rllen - 1))) / (\n colprobs[i] ** self.rllen)\n self.val = self.erl\n self.currval = \"Epsilon Machine Run Length\"\n\n elif self.measure == \"Run Length Asymmetry\":\n if np.isnan(self.rla):\n if self.rllen == 0:\n print(\"Nonzero run length is required for calculating Run Length Asymmetry\")\n else:\n colprobs = np.zeros(self.comat.shape[0])\n rowprobs = np.zeros(self.comat.shape[0])\n for i in range(self.comat.shape[0]):\n colprobs[i] = np.sum(self.comat[i, :])\n rowprobs[i] = np.sum(self.comat[:, i])\n colval = 0.0\n rowval = 0.0\n for i in range(self.comat.shape[0]):\n if colprobs[i] != 0.0:\n colval += ((colprobs[i] - self.comat[i, i]) ** 2 * (\n self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)\n if rowprobs[i] != 0.0:\n rowval += ((rowprobs[i] - self.comat[i, i]) ** 2 * (\n self.comat[i, i] ** (self.rllen - 1))) / (rowprobs[i] ** self.rllen)\n self.rla = np.abs(colval - rowval)\n self.val = self.rla\n self.currval = \"Run Length Asymmetry\"\n\n elif self.measure == \"Homogeneity\":\n if np.isnan(self.hom):\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i\n ccols[:, i] = i\n self.hom = np.sum((self.comat) / (1 + np.abs(crows - ccols)))\n self.val = self.hom\n self.currval = \"Homogeneity\"\n\n elif self.measure == \"Cluster Tendency\":\n if np.isnan(self.clt):\n if self.clusmom == 0:\n print(\"Nonzero cluster moment is required for calculating Cluster Tendency\")\n else:\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.\n ccols[:, i] = i + 1\n rowmom = np.sum(crows * self.comat)\n colmom = np.sum(ccols * self.comat)\n self.clt = np.sum(((crows + ccols - rowmom - colmom) ** self.clusmom) * self.comat)\n self.val = self.clt\n self.currval = \"Cluster Tendency\"\n\n elif self.measure == \"Multifractal Spectrum Energy Range\":\n if not self.emest: # estimate epsilon machine\n self.est_em()\n if not self.mfsest: # estimate multifractal spectrum\n self.est_multi_frac_spec()\n if self.mfsspec.size != 0:\n self.mfu = np.max(self.mfsspec[:, 0]) - np.min(self.mfsspec[:, 0])\n else:\n self.mfu = 0.0\n self.val = self.mfu\n self.currval = \"Multifractal Spectrum Energy Range\"\n\n elif self.measure == \"Multifractal Spectrum Entropy Range\":\n if not self.emest: # estimate epsilon machine\n self.est_em()\n if not self.mfsest: # estimate multifractal spectrum\n self.est_multi_frac_spec()\n if self.mfsspec.size != 0:\n self.mfs = np.max(self.mfsspec[:, 1]) - np.min(self.mfsspec[:, 1])\n else:\n self.mfs = 0.0\n self.val = self.mfs\n self.currval = \"Multifractal Spectrum Entropy Range\"\n\n else:\n \"Sorry don't know about texture measure \", self.measure", "def normalize(probabilities):\n for person in probabilities:\n\n # normalize the \"gene\"\n geneSum = probabilities[person][\"gene\"][0] + probabilities[person][\"gene\"][1] + probabilities[person][\"gene\"][2]\n for i in range(3):\n probabilities[person][\"gene\"][i] /= geneSum\n\n # normalize the \"trait\"\n traitSum = probabilities[person][\"trait\"][True] + probabilities[person][\"trait\"][False]\n probabilities[person][\"trait\"][True] /= traitSum\n probabilities[person][\"trait\"][False] /= traitSum", "def get_norma(self):\n return self.norma", "def mutual_information_from_data(X, Y, num_bins):\n N = X.size\n delta = 10e-10\n\n x_min, x_max = (X.min() - delta, X.max() + delta)\n y_min, y_max = (Y.min() - delta, Y.max() + delta)\n\n X_hist, X_bin = np.histogram(X, bins=num_bins, range=(x_min, x_max))\n Y_hist, Y_bin = np.histogram(Y, bins=num_bins, range=(y_min, y_max))\n\n X_states = np.digitize(X, X_bin)\n Y_states = np.digitize(Y, Y_bin)\n coords = Counter(zip(X_states, Y_states))\n\n joint_linear = np.zeros((config.NUM_STATES, config.NUM_STATES))\n for x, y in coords.keys():\n joint_linear[x - 1, y - 1] = coords[(x, y)] / N\n\n p_X = X_hist / N\n p_Y = Y_hist / N\n prod_XY = np.tensordot(p_X.T, p_Y, axes=0)\n\n div_XY = joint_linear / prod_XY\n div_XY[div_XY == 0] = np.nan\n\n return np.nansum(np.multiply(joint_linear, np.log2(div_XY)))", "def norm(self) -> \"Vector\":\n self.values = tuple(self/self.mag())\n return self", "def norm( self):\n return self._norm", "def mutual_information_estimate(self, approx_prob=False):\n \n # this might be not the right approach\n q_n = self.receptor_activity_estimate(approx_prob=approx_prob)\n q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob)\n \n # calculate the approximate mutual information\n return self._estimate_MI_from_q_values(q_n, q_nm)", "def semantic_vector(self,words, joint_words, info_content_norm):\n\t sent_set = set(words)\n\t semvec = np.zeros(len(joint_words))\n\t i = 0\n\t for joint_word in joint_words:\n\t if joint_word in sent_set:\n\t # if word in union exists in the sentence, s(i) = 1 (unnormalized)\n\t semvec[i] = 1.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * math.pow(self.info_content(joint_word), 2)\n\t else:\n\t # find the most similar word in the joint set and set the sim value\n\t sim_word, max_sim = self.most_similar_word(joint_word, sent_set)\n\t semvec[i] = self.PHI if max_sim > self.PHI else 0.0\n\t if info_content_norm:\n\t semvec[i] = semvec[i] * self.info_content(joint_word) * self.info_content(sim_word)\n\t i = i + 1\n\t return semvec", "def eml_use_pseudowords_and_mle(xi, yi, deml):\n if xi not in deml[yi]:\n xi = pw(xi) # use pseudo-word instead\n\n return (deml[yi][xi]) / (sum(deml[yi].values()))", "def get_perfect_information(self):\n raise NotImplementedError", "def compute_user_user_sim_base_on_common_items(self):\n self.sim_matrix = {}\n for item in self.items.values():\n # convert to list of tuples for indexing\n users = list(item.covered_users.items())\n item_popularity = len(users)\n # iter through all user pairs\n for i in range(len(users)-1):\n for j in range(i+1, len(users)):\n user_A_info, user_B_info = users[i], users[j]\n # remember to update pair wise!\n self.update_user_user_sim(user_A_info, user_B_info,\n item_popularity)\n self.update_user_user_sim(user_B_info, user_A_info,\n item_popularity)", "def kernel_mus(self, n_kernels: int):\n l_mu = [1.0]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n return l_mu", "def mass(self):\n\t\treturn self.volume*self.density", "def get_relevant_info(claim, articles, vectorizer, max_seq_len, spellcheck, use_ngrams):\n\n # Note: expects claim to be already cleaned\n vec_claim = vectorizer.transform_txt(claim, max_seq_len,\n use_ngrams=use_ngrams) # Claim vector - we'll use this to compare using cosine similarity\n similarities_and_sents = [] # Stores tuples of (cos sim, sentence)\n\n # Loop through all articles to construct supporting information\n for article in articles:\n sentences = tokenize_by_sentence(article)\n\n '''\n For each sentence, we clean and vectorize, then retrieve the cosine similarity of the claim vs the sentence\n '''\n for sentence in sentences:\n # Basic cleaning on sentence\n sentence = clean_txt(sentence, spellcheck)\n # Don't process for sentences less than 40 characters long - this usually means improper sentences/words\n if len(sentence) < 40:\n continue\n # Get vector of sentence and find cosine similarity\n vec_sent = vectorizer.transform_txt(sentence, max_seq_len,\n use_ngrams=use_ngrams)\n similarity = cos_sim(vec_claim, vec_sent)\n # Add to results\n similarities_and_sents.append((similarity, sentence))\n\n # Sort the similarities (in desc order) using their similarity\n sorted_sents = sorted(similarities_and_sents, key=lambda elem: elem[0], reverse=True)\n\n article_info = ''\n num_words = 0\n '''\n Construct relevant info - keep looping through sentences, adding sentences until we hit max_seq_len\n We'll surpass max_seq_len, but that's okay\n '''\n for similarity, sentence in sorted_sents:\n if num_words >= max_seq_len:\n break\n article_info += ' || ' + sentence # Add a separator\n num_words += len(sentence.split())\n return article_info", "def occupation_distribution(data):", "def make_male_3D_model\\\n (TABLE_info, m1_male_crvs, m2_male_left_crvs, m2_male_right_crvs,\\\n m3_male_left_crvs, m3_male_right_crvs, m4_male_crvs):\n \"\"\"\n 1 Get t_m from TABLE_info\n \"\"\"\n width = TABLE_info[0]\n t_m = TABLE_info[2]\n\n \"\"\"\n 2 Get crvs from list.\n \"\"\"\n # m1\n m1_male_upper_crv = m1_male_crvs[0]\n m1_male_middle_crv = m1_male_crvs[1]\n m1_male_lower_crv = m1_male_crvs[2]\n\n # m2\n m2_male_left_upper_crv = m2_male_left_crvs[0]\n m2_male_left_middle_crv = m2_male_left_crvs[1]\n m2_male_left_lower_crv = m2_male_left_crvs[2]\n\n m2_male_right_upper_crv = m2_male_right_crvs[0]\n m2_male_right_middle_crv = m2_male_right_crvs[1]\n m2_male_right_lower_crv = m2_male_right_crvs[2]\n\n # m3\n m3_male_left_upper_crv = m3_male_left_crvs[0]\n m3_male_left_middle_crv = m3_male_left_crvs[1]\n m3_male_left_lower_crv = m3_male_left_crvs[2]\n\n m3_male_right_upper_crv = m3_male_right_crvs[0]\n m3_male_right_middle_crv = m3_male_right_crvs[1]\n m3_male_right_lower_crv = m3_male_right_crvs[2]\n\n # m4\n m4_male_upper_crv = m4_male_crvs[0]\n m4_male_middle_crv = m4_male_crvs[1]\n m4_male_lower_crv = m4_male_crvs[2]\n\n \"\"\"\n 3 Make 3D.\n \"\"\"\n # path\n start = (0, 0, 0)\n end = (0, 0, t_m)\n path = rs.AddLine(start, end)\n\n # m1\n m1_male_upper_model = rs.ExtrudeCurve(m1_male_upper_crv, path)\n m1_male_middle_model = rs.ExtrudeCurve(m1_male_middle_crv, path)\n m1_male_lower_model = rs.ExtrudeCurve(m1_male_lower_crv, path)\n\n rs.CapPlanarHoles(m1_male_upper_model)\n rs.CapPlanarHoles(m1_male_middle_model)\n rs.CapPlanarHoles(m1_male_lower_model)\n\n # m2 left\n m2_male_left_upper_model = rs.ExtrudeCurve(m2_male_left_upper_crv, path)\n m2_male_left_middle_model = rs.ExtrudeCurve(m2_male_left_middle_crv, path)\n m2_male_left_lower_model = rs.ExtrudeCurve(m2_male_left_lower_crv, path)\n\n rs.CapPlanarHoles(m2_male_left_upper_model)\n rs.CapPlanarHoles(m2_male_left_middle_model)\n rs.CapPlanarHoles(m2_male_left_lower_model)\n\n # m2 right\n m2_male_right_upper_model = rs.ExtrudeCurve(m2_male_right_upper_crv, path)\n m2_male_right_middle_model = rs.ExtrudeCurve(m2_male_right_middle_crv, path)\n m2_male_right_lower_model = rs.ExtrudeCurve(m2_male_right_lower_crv, path)\n\n rs.CapPlanarHoles(m2_male_right_upper_model)\n rs.CapPlanarHoles(m2_male_right_middle_model)\n rs.CapPlanarHoles(m2_male_right_lower_model)\n\n # m3 left\n m3_male_left_upper_model = rs.ExtrudeCurve(m3_male_left_upper_crv, path)\n m3_male_left_middle_model = rs.ExtrudeCurve(m3_male_left_middle_crv, path)\n m3_male_left_lower_model = rs.ExtrudeCurve(m3_male_left_lower_crv, path)\n\n rs.CapPlanarHoles(m3_male_left_upper_model)\n rs.CapPlanarHoles(m3_male_left_middle_model)\n rs.CapPlanarHoles(m3_male_left_lower_model)\n\n # m3 right\n m3_male_right_upper_model = rs.ExtrudeCurve(m3_male_right_upper_crv, path)\n m3_male_right_middle_model = rs.ExtrudeCurve(m3_male_right_middle_crv, path)\n m3_male_right_lower_model = rs.ExtrudeCurve(m3_male_right_lower_crv, path)\n\n rs.CapPlanarHoles(m3_male_right_upper_model)\n rs.CapPlanarHoles(m3_male_right_middle_model)\n rs.CapPlanarHoles(m3_male_right_lower_model)\n\n # m4\n m4_male_upper_model = rs.ExtrudeCurve(m4_male_upper_crv, path)\n m4_male_middle_model = rs.ExtrudeCurve(m4_male_middle_crv, path)\n m4_male_lower_model = rs.ExtrudeCurve(m4_male_lower_crv, path)\n\n rs.CapPlanarHoles(m4_male_upper_model)\n rs.CapPlanarHoles(m4_male_middle_model)\n rs.CapPlanarHoles(m4_male_lower_model)\n\n male_upper_models =\\\n [m1_male_upper_model, m2_male_left_upper_model, m2_male_right_upper_model,\\\n m3_male_left_upper_model, m3_male_right_upper_model, m4_male_upper_model]\n\n male_middle_models =\\\n [m1_male_middle_model, m2_male_left_middle_model, m2_male_right_middle_model,\\\n m3_male_left_middle_model, m3_male_right_middle_model, m4_male_middle_model]\n\n male_lower_models =\\\n [m1_male_lower_model, m2_male_left_lower_model, m2_male_right_lower_model,\\\n m3_male_left_lower_model, m3_male_right_lower_model, m4_male_lower_model]\n\n # move objects\n trans_upper = (0, 0, 2 * t_m)\n trans_middle = (0, 0, t_m)\n rs.MoveObjects(male_upper_models, trans_upper)\n rs.MoveObjects(male_middle_models, trans_middle)\n\n\n # deploy models\n O = (0, 0, 0)\n angle = 90\n rs.RotateObjects(male_upper_models, O, angle, None, False)\n rs.RotateObjects(male_middle_models, O, angle, None, False)\n rs.RotateObjects(male_lower_models, O, angle, None, False)\n\n axis = (1, 0, 0)\n rs.RotateObjects(male_upper_models, O, angle, axis, False)\n rs.RotateObjects(male_middle_models, O, angle, axis, False)\n rs.RotateObjects(male_lower_models, O, angle, axis, False)\n\n trans = (-1.5 * width, 0, 0)\n rs.MoveObjects(male_upper_models, trans)\n rs.MoveObjects(male_middle_models, trans)\n rs.MoveObjects(male_lower_models, trans)\n\n rs.DeleteObject(path)\n\n male_models = [male_upper_models, male_middle_models, male_lower_models]", "def get_comp_vec(self, sim, field, start, end):\n # Compare all other sims to our best estimate, which is sim with highest number of\n # basis terms (last in list cuz sorting)\n\n # Get the proper file extension depending on the field.\n norm = 'norm'+field\n # Get the comparison vector\n vecs = [sim.data[field+comp][start:end] for comp in ('x', 'y', 'z')]\n normvec = sim.get_scalar_quantity('normE')\n normvec = normvec[start:end]**2\n return vecs, normvec", "def mutual_info_matrix(time_series, num_of_bins):\n num_of_rafts, interval_width = time_series.shape\n mi_matrix = np.zeros((num_of_rafts, num_of_rafts))\n\n for i in range(num_of_rafts):\n for j in range(i + 1):\n i0 = time_series[i, :].copy()\n j0 = time_series[j, :].copy()\n c_xy = np.histogram2d(i0, j0, num_of_bins)[0]\n mi = mutual_info_score(None, None, contingency=c_xy) * np.log2(np.e)\n # in unit of bits, * np.log2(np.e) to convert nats to bits\n mi_matrix[i, j] = mi\n mi_matrix[j, i] = mi\n\n return mi_matrix", "def CDFMeasureNorm(self):\n norm = 1.0/2.0\n return norm", "def example_A():\n d = dit.example_dists.Xor()\n\n # Calculate marginal maximum entropy distributions up to order 3.\n maxent_dists = dit.algorithms.marginal_maxent_dists(d, 3)\n\n print_output(d, maxent_dists)\n\n return maxent_dists", "def wm(mu,se):\n\tnomsum =[]\n\tw = []\n\tfor i in range(len(mu)):\n\t\tnom = mu[i] / (se[i])**2\n\t\tnomsum.append(nom)\n\t\tw.append(1/(se[i])**2)\n\twmu = sum(nomsum) / sum(w)\n\treturn (wmu)", "def info_materials_polymer_get():\n materials = _material_by_group(974) # 974 == intermediate group\n return materials, 200", "def energy_snorm(s_mat):\n \n itr = int(np.shape(s_mat)[0])\n \n fnorm = np.linalg.norm(s_mat)\n \n norm_2 = s_mat[0]/fnorm\n norm_c = []\n norm_i = []\n \n for i in range(itr):\n \n norm_c.append(np.linalg.norm(s_mat[0:i+1])/fnorm)\n norm_i.append(np.linalg.norm(s_mat[i])/fnorm)\n return(norm_2, norm_c, norm_i)", "def __post_init__(self):\n all_vecs = {}\n for n2 in self._get_n2():\n all_vecs[n2] = all_vecs.get(n2, 0) + 1\n\n norms = {}\n\n object.__setattr__(self, \"_n2\", np.array(list(all_vecs.keys())).reshape(-1, 1))\n object.__setattr__(\n self, \"_multiplicity\", np.array(list(all_vecs.values())).reshape(-1, 1)\n )\n object.__setattr__(\n self, \"_normalization\", np.pi ** 2 * self.N * norms[self.nstep]\n )\n\n raise NotImplementedError(\"Need to implement dispersion Lüscher counter terms.\")", "def test_normalize(self):\n normal_vec = np.array([3, 4]) # |normal_vec| = 5\n support_factor = 1\n scaling_factor = 1.\n normed_normal_vec = np.array([3 / 5, 4 / 5])\n normed_support_factor = 5\n normed_scaling_factor = 5\n # noinspection PyTypeChecker\n emb = ConceptEmbedding(normal_vec=normal_vec,\n support_factor=support_factor)\n assert emb.scaling_factor == scaling_factor, \\\n \"Scaling factor wrongly initialized.\"\n normed_emb = emb.normalize()\n\n # Normalization yields new instance\n assert normed_emb is not emb, \"Normalization did not yield new instance\"\n\n # Format checks\n assert normed_emb.normal_vec.shape == emb.normal_vec.shape\n assert np.array(normed_emb.support_factor).shape == np.array(\n emb.support_factor).shape\n assert np.array(normed_emb.scaling_factor).shape == np.array(\n emb.scaling_factor).shape\n\n # Value checks\n for key, (expected, obtained) in \\\n {\"normal_vec\": (normed_normal_vec, normed_emb.normal_vec),\n \"support_factor\": (normed_support_factor,\n normed_emb.support_factor),\n \"scaling_factor\": (normed_scaling_factor,\n normed_emb.scaling_factor)\n }.items():\n assert np.allclose(obtained, expected), \\\n (\"Wrong normalized {}: expected {}, but was {}\"\n .format(key, expected, obtained))", "def test_get_unnormed_E(self):\n # Test that error is raised if spw_Ndlys is not set\n uvd = copy.deepcopy(self.uvd)\n ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], labels=['red', 'blue'])\n ds.spw_Ndlys = None\n pytest.raises(ValueError, ds.get_unnormed_E, 'placeholder', 'placeholder')\n\n # Test that if R1 = R2, then the result is Hermitian\n ds.spw_Ndlys = 7\n random_R = generate_pos_def_all_pos(ds.spw_Nfreqs)\n wgt_matrix_dict = {} # The keys here have no significance except they are formatted right\n wgt_matrix_dict[('red', (24, 25))] = random_R\n wgt_matrix_dict[('blue', (24, 25))] = random_R\n ds.set_R(wgt_matrix_dict)\n E_matrices = ds.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)))\n multiplicative_tolerance = 0.0000001\n for matrix in E_matrices:\n diff_norm = np.linalg.norm(matrix.T.conj() - matrix)\n self.assertLessEqual(diff_norm, multiplicative_tolerance)\n\n #Test for the correct shape when exact_norm is True\n ds_c = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], labels=['red', 'blue'], beam=self.bm)\n ds_c.spw_Ndlys = 10\n random_R = generate_pos_def_all_pos(ds_c.spw_Nfreqs)\n wgt_matrix_dict = {}\n wgt_matrix_dict[('red', (24, 25))] = random_R\n wgt_matrix_dict[('blue', (24, 25))] = random_R\n ds_c.set_R(wgt_matrix_dict)\n E_matrices = ds_c.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)), exact_norm=True, pol='xx')\n self.assertEqual(E_matrices.shape, (ds_c.spw_Ndlys, ds_c.spw_Nfreqs, ds_c.spw_Nfreqs))\n\n # Test that if R1 != R2, then i) E^{12,dagger} = E^{21}\n random_R2 = generate_pos_def_all_pos(ds.spw_Nfreqs)\n wgt_matrix_dict = {}\n wgt_matrix_dict[('red', (24, 25))] = random_R\n wgt_matrix_dict[('blue', (24, 25))] = random_R2\n ds.set_R(wgt_matrix_dict)\n E12_matrices = ds.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)))\n E21_matrices = ds.get_unnormed_E(('blue', (24, 25)), ('red', (24, 25)))\n multiplicative_tolerance = 0.0000001\n for mat12,mat21 in zip(E12_matrices,E21_matrices):\n diff_norm = np.linalg.norm(mat12.T.conj() - mat21)\n self.assertLessEqual(diff_norm, multiplicative_tolerance)\n\n # Test that if there is only one delay bin and R1 = R2 = I, then\n # the E matrices are all 0.5s exept in flagged channels.\n ds.spw_Ndlys = 1\n wgt_matrix_dict = {}\n wgt_matrix_dict[('red', (24, 25))] = np.eye(ds.spw_Nfreqs)\n wgt_matrix_dict[('blue', (24, 25))] = np.eye(ds.spw_Nfreqs)\n flags1 = np.diag(ds.Y(('red', (24, 25))))\n flags2 = np.diag(ds.Y(('blue', (24, 25))))\n ds.set_R(wgt_matrix_dict)\n E_matrices = ds.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)))\n multiplicative_tolerance = 0.0000001\n for matrix in E_matrices:\n for i in range(ds.spw_Nfreqs):\n for j in range(ds.spw_Nfreqs):\n if flags1[i] * flags2[j] == 0: # either channel flagged\n self.assertAlmostEqual(matrix[i,j], 0.)\n else:\n self.assertAlmostEqual(matrix[i,j], 0.5)", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def __normalize_mode(M, mode, normtype):\n colNorm = np.apply_along_axis(np.linalg.norm, 0, M.U[mode], normtype)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n llmbda = M.lmbda * colNorm\n tempB = M.U[mode] / colNorm[np.newaxis, :]\n return llmbda,tempB", "def info(probs):\n e = probs.reshape(-1, 3).dot(_alleles)\n f = probs.reshape(-1, 3).dot(_sq_alleles)\n theta_hat = e.sum() / (2 * len(e))\n info = 1\n if theta_hat > 0 and theta_hat < 1:\n info -= (f - numpy.square(e)).sum() / (2 * len(e) * theta_hat * (1 - theta_hat))\n return e, info", "def get_LDAU(self, U_Fe_N = 7., U_Fe_C = 5.):\n\n self.check_structure_is_read()\n\n self._modify_structure()\n\n\n # Initialize various strings\n LDAUJ = ''\n LDAUL = ''\n LDAUU = ''\n MAGMOM = ''\n\n # count the number of each species. \n # NOTE: pymatgen.composition does not understand decorations;\n # we have to hack this way.\n\n self.species_dict = OrderedDict()\n\n for s in self.structure.types_of_specie:\n self.species_dict[s] = 0.\n\n for s in self.structure.sites:\n self.species_dict[s.specie] += 1.\n\n # Generate MAGMOM, which must distinguish every magnetization state\n for s in self.structure.types_of_specie:\n\n if s == self.Fe_HS_2plus:\n MAGMOM += ' %i*4'%self.species_dict[s] # low spin\n elif s == self.Fe_HS_3plus:\n MAGMOM += ' %i*5'%self.species_dict[s] # high spin\n elif s == self.Fe_LS_2plus:\n MAGMOM += ' %i*0'%self.species_dict[s] # low spin\n elif s == self.Fe_LS_3plus:\n MAGMOM += ' %i*1'%self.species_dict[s] # high spin\n else:\n MAGMOM += ' %i*0.6'%self.species_dict[s] \n\n\n for s in self.structure.types_of_specie:\n LDAUJ += ' 0'\n\n if s == self.Fe_LS_2plus or s == self.Fe_LS_3plus: \n LDAUL += ' 2'\n LDAUU += ' %2.1f'%U_Fe_C\n elif s == self.Fe_HS_2plus or s == self.Fe_HS_3plus: \n LDAUL += ' 2'\n LDAUU += ' %2.1f'%U_Fe_N\n else:\n LDAUL += ' 0'\n LDAUU += ' 0'\n\n\n LDAU_dict = { 'LDAU':True, # use LDA+U (GGA+U in fact)\n 'LDAUTYPE':2, # simplified Dudarev Formalism\n 'LDAUPRINT':1, # talk to me\n 'MAGMOM':MAGMOM, # magnetic moments\n 'LDAUL':LDAUL, \n 'LDAUJ':LDAUJ, \n 'LDAUU':LDAUU,\n 'LMAXMIX':4} # this is essential for d-element GGA+U, but not the VASP default\n\n poscar_need_hack = True\n potcar_need_hack = True\n\n return LDAU_dict, poscar_need_hack, potcar_need_hack", "def _compute_nmig(mus_train, ys_train, active):\n print(\"start nmig\")\n score_dict = {}\n discretized_mus = utils.make_discretizer(mus_train)\n m = utils.discrete_mutual_info(discretized_mus, ys_train)\n # m shape: (10, nr_ground_truth)\n print(\"finished discretizing\")\n assert m.shape[0] == mus_train.shape[0]\n assert m.shape[1] == ys_train.shape[0]\n entropy = utils.discrete_entropy(ys_train)\n if active is not None:\n assert len(active) <= ys_train.shape[0]\n m = m[:, active]\n entropy = entropy[active]\n nr_lt = m.shape[0]\n nr_gt = m.shape[1]\n # m is [num_latents, num_factors]\n\n sorted_m = np.sort(m, axis=0)[::-1]\n individual_mig = np.divide(sorted_m[0, :] - sorted_m[1, :], entropy[:])\n print(\"ind mig\", individual_mig)\n mig = np.mean(individual_mig)\n\n if nr_gt == 1:\n nmig = np.max(np.divide(m, entropy[:]))\n else:\n m = np.divide(m, entropy[:])\n partials = np.zeros((nr_gt))\n best_ids = np.argmax(m, axis=0)\n for i in range(nr_gt):\n mask = np.ones((nr_gt), dtype=np.bool)\n mask[i] = 0\n best_id = best_ids[i]\n partials[i] = m[best_id, i] - np.max(m[best_id, mask])\n nmig = np.mean(partials)\n print(\"ind nmig\", partials)\n score_dict[\"discrete_mig\"] = mig\n score_dict[\"discrete_nmig\"] = nmig\n\n return score_dict", "def calculate_dist_mat(embeddings: np.ndarray, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n condensed_dist = pdist(embeddings, metric='minkowski', **kwargs)\n dist_mat = squareform(condensed_dist)\n return dist_mat", "def norm_bound(self, input_mags):\n return input_mags[0]", "def mu(self):\n return self._mu", "def _compute_moments(self, u):\n\n # Get the moments from the parent Gaussian Markov Chain\n #u = self.parents[0].get_moments() #message_to_child()\n\n # Send only moments <X(n)> and <X(n)X(n)> but not <X(n-1)X(n)>\n return u[:2]", "def mutations(self, mu):\n # make a copy of the data, and make it an integer\n new_alleles = np.copy(self.geno)\n\n # for an array of the same shape as newAlleles, draw mutations at each\n # position with probability mu.\n vals = np.random.binomial(1, mu, self.size * self.nloci * 2)\n mutate = np.reshape(vals, [ self.size, self.nloci, 2])\n mutate = (mutate == 1)\n # swap zeroes and ones.\n new_alleles[mutate] = 1 - new_alleles[mutate] \n\n # Apply to geno_probs\n new_geno_probs = calculate_geno_probs(new_alleles, mu=mu)\n\n output = genotypeArray(\n geno = new_alleles,\n geno_probs = new_geno_probs,\n names = self.names,\n mothers= self.mothers,\n fathers = self.fathers\n )\n\n return output", "def alignment_uncertainty(w, I, d=0):\n return sqrt(w**2/I + d**2/12.)", "def getNorm(self, norm=lambda l: (sum(map(lambda x: x ** 2, l))) ** (1 / 2)):\n return norm(self.components)", "def calc_metrics(self, data, output):\n\n L1NormITAE = self.calcL1NormITAE(data)\n L1NormAbs = self.calcL1NormAbs(data)\n #\n # print 'ITAE score: ', errorIntegral\n print 'L1NormITAE: ', L1NormITAE\n print 'L1NormAbs: ', L1NormAbs\n print '\\n'\n output.update({'L1NormITAE': L1NormITAE, 'L1NormAbs': L1NormAbs})", "def magni(vector):\n return(np.linalg.norm(vector))", "def pmi(cls, *marginals):\n return (_log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) -\n _log2(_product(marginals[UNIGRAMS])))", "def alon_matias_szegedy(self, item):\n for i, seed in enumerate(self.random_seeds):\n self.ams_estimates[i] += _one_sign(item, seed)", "def stdProbabilityNorm(self):\n return 0.5", "def norm2(self):\n return getattr(self, self.norm2_name)", "def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]", "def mutual_information(self, excitation_method='auto', **kwargs):\n if excitation_method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n excitation_method = 'brute_force'\n else:\n excitation_method = 'monte_carlo'\n \n if excitation_method == 'brute_force' or excitation_method == 'brute-force':\n return self.mutual_information_brute_force(**kwargs)\n elif excitation_method == 'monte_carlo' or excitation_method == 'monte-carlo':\n return self.mutual_information_monte_carlo(**kwargs)\n elif excitation_method == 'estimate':\n return self.mutual_information_estimate(**kwargs)\n else:\n raise ValueError('Unknown excitation_method `%s`.' % excitation_method)", "def mortality(self):\n pass", "def __init__(self, data, m=100, eta=0.1, seq_length=25, sigma= 0.01):\n\n self.m, self.eta, self.seq_length = m, eta, seq_length\n self.vocab_len = data['vocab_len']\n self.ind_to_char = data['ind_to_char']\n self.char_to_ind = data['char_to_ind']\n self.book_data = data['book_data']\n\n self.b = np.zeros((m, 1))\n self.c = np.zeros((self.vocab_len, 1))\n\n self.U = np.random.normal(0, sigma, size=(m, self.vocab_len))\n self.W = np.random.normal(0, sigma, size=(m, m))\n self.V = np.random.normal(0, sigma, size=(self.vocab_len, m))", "def normal(self) -> 'MultiVector':\n\n return self / np.sqrt(abs(self.mag2()))", "def update_entity_embedding(self, entity, ims, mu):\n self.source_entity = self.ent_embs.ent_embs.weight.data[entity]\n self.ent_embs.ent_embs.weight.data[entity] = mu * self.source_entity + (1 - mu) * torch.mean(ims, dim=0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def kernal_mus(n_kernels):\n l_mu = [1]\n if n_kernels == 1:\n return l_mu\n\n bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]\n l_mu.append(1 - bin_size / 2) # mu: middle of the bin\n for i in range(1, n_kernels - 1):\n l_mu.append(l_mu[i] - bin_size)\n print(l_mu)\n return l_mu", "def UniformDigest(self, mol_, at_, mxstep, num):\n ncase = num*num*num\n samps=MakeUniform(mol_.coords[at_],mxstep,num)\n if (self.name==\"SymFunc\"):\n inputs = self.Emb(self, mol_, at_, samps, None, False) #(self.EmbF())(mol_.coords, samps, mol_.atoms, self.eles , self.SensRadius, self.ngrid, at_, 0.0)\n inputs = np.asarray(inputs)\n else:\n inputs = self.Emb(self, mol_, at_, samps, None, False)\n inputs = np.assrray(inputs[0])\n return samps, inputs", "def iteration( M, sign_num):\n M_bootstrap = bootstrap(M)\n model = NMF(n_components = sign_num, solver = 'mu', max_iter = 10000000, init = 'random')\n #P = np.random.rand(len(M_bootstrap), sign_num)\n #E = np.random.rand(sign_num, len(M_bootstrap[0]))\n P = model.fit_transform(M_bootstrap)\n E = model.components_\n error = model.reconstruction_err_\n P , E = normalize(P, E)\n return P, error", "def mutual_information_spatial(self,max_lag,percent_calc=.5,digitize=True):\n if digitize:\n M = utilities.mi_digitize(self.X)\n else:\n M = self.X\n\n rs, cs = np.shape(M)\n\n rs_iters = int(rs*percent_calc)\n cs_iters = int(cs*percent_calc)\n\n r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)\n c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)\n\n\n # The r_picks are used to calculate the MI in the columns\n # and the c_picks are used to calculate the MI in the rows\n\n c_mi = np.zeros((rs_iters,max_lag))\n r_mi = np.zeros((cs_iters,max_lag))\n\n for i in range(rs_iters):\n for j in range(max_lag):\n\n ind = j+1\n unshift = M[r_picks[i],ind:]\n shift = M[r_picks[i],:-ind]\n c_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n for i in range(cs_iters):\n for j in range(max_lag):\n\n ind=j+1\n unshift = M[ind:, c_picks[i]]\n shift = M[:-ind, c_picks[i]]\n r_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n r_mut = np.mean(r_mi,axis=0)\n c_mut = np.mean(c_mi,axis=0)\n\n return r_mut, c_mut, r_mi, c_mi" ]
[ "0.6674753", "0.61827624", "0.60942334", "0.6003122", "0.5959863", "0.5958628", "0.5811477", "0.57843643", "0.569512", "0.5683817", "0.56066775", "0.55940205", "0.5578646", "0.557442", "0.5545874", "0.55332446", "0.55060184", "0.55060184", "0.5491687", "0.5481766", "0.54527575", "0.5422514", "0.5364004", "0.5353071", "0.53511477", "0.531772", "0.52617013", "0.5259322", "0.5242674", "0.524014", "0.5223366", "0.5206835", "0.5191282", "0.51895845", "0.51831937", "0.5164161", "0.5136283", "0.51253736", "0.5113209", "0.51040375", "0.5080616", "0.50646406", "0.5050378", "0.50489116", "0.5040769", "0.5030197", "0.5025283", "0.50185853", "0.501359", "0.500485", "0.49803153", "0.49801862", "0.49727085", "0.4949949", "0.4937977", "0.49332651", "0.49304178", "0.49231175", "0.4917045", "0.4901601", "0.48995227", "0.48990625", "0.48990414", "0.48917538", "0.48908246", "0.488264", "0.48760206", "0.4874157", "0.48722038", "0.48705995", "0.48686978", "0.48675376", "0.48672593", "0.48668882", "0.48640266", "0.4855906", "0.48552653", "0.48551592", "0.48471847", "0.4847093", "0.4844418", "0.4843666", "0.48396307", "0.4838193", "0.48376304", "0.48359132", "0.483562", "0.48352602", "0.48307392", "0.482448", "0.48217243", "0.48186138", "0.4816964", "0.4816235", "0.481368", "0.4811366", "0.48100293", "0.48100254", "0.4806269", "0.4802796" ]
0.5009674
49
Mutual Information by Entropy norm between specific item and the D_k text sample.
def cmi_norm(self, query, tuples): P_, vocab, H_D = self.entropy_norm(tuples) I_D = [] # I(D; q) = H(D) - H(D|q) I_as = H_D # I(D; q1, q2,...,qn) = H(D) - H(D|q1) - ... - H(D|qn) for q in query: P_wq = odict({w + '|' + q: self.condP(w, q, tuples) for w in vocab}) H_Dq = -P_[q] * sum(P_wq[pwq] * log2(P_wq[pwq]) for pwq in P_wq.keys()) I_D.append((q, H_D - H_Dq)) I_as -= H_Dq # TODO: define a,b,c for giving negative reward # if not (mmi_norm(a,b, tuples) > mmi_norm(b,c, tuples) > mmi_norm(a,c, tuples)): # I_as = self.zeta return I_D, I_as
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalized_mutual_information(cl: np.ndarray, org: np.ndarray):\n assert cl.shape == org.shape\n\n return mutual_info_score(org, cl) / (abs(entropy(cl) + entropy(org)) / 2)", "def mutual_information(mc_preds):\n mutual_info = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),\n axis=0)\n return mutual_info", "def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD", "def alignment_uncertainty(w, I, d=0):\n return sqrt(w**2/I + d**2/12.)", "def mutual_information(co_freq, s_freq, t_freq, total_instances, mitype=None):\n if co_freq > 0:\n if mitype is not None:\n if mitype == \"expected\":\n mi = math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)\n ) * (co_freq / total_instances)\n elif mitype == \"normalized\":\n alpha = - math.log2(co_freq / total_instances)\n mi = (\n (math.log2(\n (total_instances * co_freq) / (s_freq * t_freq)) / alpha)\n if alpha != 0 else 0\n )\n elif mitype == \"pmi2\":\n mi = math.log2((co_freq ** 2) / (s_freq * t_freq))\n elif mitype == \"pmi3\":\n mi = math.log2(\n (co_freq ** 3) / (s_freq * t_freq * total_instances))\n else:\n raise ValueError(\n \"Provided Mutual information score type (mitype) is not \"\n \"supported. Provide one value from the following list \"\n \"['expected', 'normalized','pmi2', 'pmi3'] \")\n else:\n mi = math.log2((total_instances * co_freq) / (s_freq * t_freq))\n else:\n mi = 0\n return mi if mi > 0 else 0", "def mutual_information(x, y, w):\r\n \r\n\r\n total_entropy = entropy(y,w)\r\n\r\n partitioned_x = partition(x)\r\n weighted_entropy = 0\r\n # calculate the weighted entropy over the partition of x\r\n vals,counts= np.unique(x,return_counts=True)\r\n for key in partitioned_x:\r\n weighted_entropy += np.sum([(np.sum(w[partitioned_x[key]])/np.sum(w)) * entropy(y[partitioned_x[key]],w[partitioned_x[key]])])\r\n\r\n information_gain = total_entropy - weighted_entropy\r\n return information_gain", "def adjusted_mutual_info(self):\n # Prepare row totals and check for special cases\n row_totals = np.fromiter(self.iter_row_totals(), dtype=np.int64)\n col_totals = np.fromiter(self.iter_col_totals(), dtype=np.int64)\n R = len(row_totals)\n C = len(col_totals)\n if R == C == 1 or R == C == 0:\n # No clustering since the data is not split. This is a perfect match\n # hence return 1.0.\n return 1.0\n\n # In one step, calculate entropy for each labeling and mutual\n # information\n h_true, h_pred, mi = self._entropies()\n mi_max = max(h_true, h_pred)\n\n # Calculate the expected value for the MI\n emi = emi_from_margins(row_totals, col_totals)\n\n # Calculate the adjusted MI score\n ami = (mi - emi) / (mi_max - emi)\n return ami", "def get_mutual_information(c_wic, c_wioc, c_owic, c_owioc):\n # total word count\n c_total = c_wic + c_wioc + c_owic + c_owioc\n\n mi_1 = (c_wic / float(c_total)) * log10((c_total * c_wic) /\n float((c_wic + c_wioc) * (c_wic + c_owic)))\n mi_2 = (c_owic / float(c_total)) * log10((c_total * c_owic) /\n float((c_owic + c_owioc) * (c_wic + c_owic)))\n mi_3 = (c_wioc / float(c_total)) * log10((c_total * c_wioc) /\n float((c_wic + c_wioc) * (c_wioc + c_owioc)))\n mi_4 = (c_owioc / float(c_total)) * log10((c_total * c_owioc) /\n float((c_owic + c_owioc) * (c_wioc + c_owioc)))\n\n return mi_1 + mi_2 + mi_3 + mi_4", "def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))", "def eml_use_pseudowords_and_mle(xi, yi, deml):\n if xi not in deml[yi]:\n xi = pw(xi) # use pseudo-word instead\n\n return (deml[yi][xi]) / (sum(deml[yi].values()))", "def normQnA(dfin):\n\n update_log(er='norming Q',upload=False)\n dfin['normQ']= dfin.query_text_raw.apply(normalize_text, args=(True,True))\n dfin = dfin[dfin.normQ != 'dud_drop_me']\n\n #update_log(er='norming A',upload=True)\n #dfin['normA']= dfin.kcc_answer_raw.apply(normalize_text , args=(False,False))\n #dfin = dfin[dfin.normA != 'dud_drop_me']\n return(dfin)", "def get_m1_m4_SEN_info(tx, ty, m1_info, y_k):\n \"\"\"\n 1 Get information from m1_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n t_m = z_m / 3\n\n t_sen = m1_info[4]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n b_p = (tx, ty)\n\n u_distance = rs.Distance(m1_p3, b_p) - y_k / 2\n l_distance = rs.Distance(m1_p0, b_p) - y_k / 2\n\n \"\"\"\n 2 Get SEN information\n \"\"\"\n # Automatically fixed---------------------------------------------------\n # t_sen = rs.GetReal(\"Put Int(mm): Thickness of material to cut SEN.\", t_m / 2, None, None)\n w_sen = t_sen\n n_w_sen = w_sen / 2\n h_sen = z_m\n\n u_max_n = u_distance / (2 * w_sen - n_w_sen) # NOTE: divide max_n by 2 to controll \"n\"\n u_max_n = int(u_max_n)\n\n u_n = u_max_n / 4\n u_n = int(u_n)\n\n l_max_n = l_distance / (2 * w_sen - n_w_sen) # NOTE: divide max_n by 2 to controll \"n\"\n l_max_n = int(l_max_n)\n\n l_n = l_max_n / 4\n l_n = int(l_n)\n\n\n set = 20\n u_offset = (u_distance - 2 * set) / (u_n - 1)\n l_offset = (l_distance - 2 * set) / (l_n - 1)\n\n SEN_info = [w_sen, n_w_sen, h_sen, t_sen, u_n, l_n, set, u_offset, l_offset]\n\n return SEN_info", "def _compute_nmig(mus_train, ys_train, active):\n print(\"start nmig\")\n score_dict = {}\n discretized_mus = utils.make_discretizer(mus_train)\n m = utils.discrete_mutual_info(discretized_mus, ys_train)\n # m shape: (10, nr_ground_truth)\n print(\"finished discretizing\")\n assert m.shape[0] == mus_train.shape[0]\n assert m.shape[1] == ys_train.shape[0]\n entropy = utils.discrete_entropy(ys_train)\n if active is not None:\n assert len(active) <= ys_train.shape[0]\n m = m[:, active]\n entropy = entropy[active]\n nr_lt = m.shape[0]\n nr_gt = m.shape[1]\n # m is [num_latents, num_factors]\n\n sorted_m = np.sort(m, axis=0)[::-1]\n individual_mig = np.divide(sorted_m[0, :] - sorted_m[1, :], entropy[:])\n print(\"ind mig\", individual_mig)\n mig = np.mean(individual_mig)\n\n if nr_gt == 1:\n nmig = np.max(np.divide(m, entropy[:]))\n else:\n m = np.divide(m, entropy[:])\n partials = np.zeros((nr_gt))\n best_ids = np.argmax(m, axis=0)\n for i in range(nr_gt):\n mask = np.ones((nr_gt), dtype=np.bool)\n mask[i] = 0\n best_id = best_ids[i]\n partials[i] = m[best_id, i] - np.max(m[best_id, mask])\n nmig = np.mean(partials)\n print(\"ind nmig\", partials)\n score_dict[\"discrete_mig\"] = mig\n score_dict[\"discrete_nmig\"] = nmig\n\n return score_dict", "def mutual_info(l1, l2):\n return entropy(l1) + entropy(l2) - entropy(joint_dataset(l1, l2))", "def norm(self):", "def get_relevant_info(claim, articles, vectorizer, max_seq_len, spellcheck, use_ngrams):\n\n # Note: expects claim to be already cleaned\n vec_claim = vectorizer.transform_txt(claim, max_seq_len,\n use_ngrams=use_ngrams) # Claim vector - we'll use this to compare using cosine similarity\n similarities_and_sents = [] # Stores tuples of (cos sim, sentence)\n\n # Loop through all articles to construct supporting information\n for article in articles:\n sentences = tokenize_by_sentence(article)\n\n '''\n For each sentence, we clean and vectorize, then retrieve the cosine similarity of the claim vs the sentence\n '''\n for sentence in sentences:\n # Basic cleaning on sentence\n sentence = clean_txt(sentence, spellcheck)\n # Don't process for sentences less than 40 characters long - this usually means improper sentences/words\n if len(sentence) < 40:\n continue\n # Get vector of sentence and find cosine similarity\n vec_sent = vectorizer.transform_txt(sentence, max_seq_len,\n use_ngrams=use_ngrams)\n similarity = cos_sim(vec_claim, vec_sent)\n # Add to results\n similarities_and_sents.append((similarity, sentence))\n\n # Sort the similarities (in desc order) using their similarity\n sorted_sents = sorted(similarities_and_sents, key=lambda elem: elem[0], reverse=True)\n\n article_info = ''\n num_words = 0\n '''\n Construct relevant info - keep looping through sentences, adding sentences until we hit max_seq_len\n We'll surpass max_seq_len, but that's okay\n '''\n for similarity, sentence in sorted_sents:\n if num_words >= max_seq_len:\n break\n article_info += ' || ' + sentence # Add a separator\n num_words += len(sentence.split())\n return article_info", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight", "def CDFMeasureNorm(self):\n norm = 1.0/2.0\n return norm", "def tonality(self, mdct_norm):\n return self.psychoacoustic.tonality(mdct_norm)", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def mutual_information_2d(x, y, sigma=1, normalized=False):\n \n bins = (256, 256)\n \n jh = np.histogram2d(x, y, bins=bins)[0]\n \n # smooth the jh with a gaussian filter of given sigma\n ndimage.gaussian_filter(jh, sigma=sigma, mode='constant',\n output=jh)\n \n # compute marginal histograms\n jh = jh + EPS\n sh = np.sum(jh)\n jh = jh / sh\n s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))\n s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))\n \n # Normalised Mutual Information of:\n # Studholme, jhill & jhawkes (1998).\n # \"A normalized entropy measure of 3-D medical image alignment\".\n # in Proc. Medical Imaging 1998, vol. 3338, San Diego, CA, pp. 132-143.\n if normalized:\n mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2)))\n / np.sum(jh * np.log(jh))) - 1\n else:\n mi = ( np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1))\n - np.sum(s2 * np.log(s2)))\n \n return mi", "def get_LDAU(self, U_Fe_N = 7., U_Fe_C = 5.):\n\n self.check_structure_is_read()\n\n self._modify_structure()\n\n\n # Initialize various strings\n LDAUJ = ''\n LDAUL = ''\n LDAUU = ''\n MAGMOM = ''\n\n # count the number of each species. \n # NOTE: pymatgen.composition does not understand decorations;\n # we have to hack this way.\n\n self.species_dict = OrderedDict()\n\n for s in self.structure.types_of_specie:\n self.species_dict[s] = 0.\n\n for s in self.structure.sites:\n self.species_dict[s.specie] += 1.\n\n # Generate MAGMOM, which must distinguish every magnetization state\n for s in self.structure.types_of_specie:\n\n if s == self.Fe_HS_2plus:\n MAGMOM += ' %i*4'%self.species_dict[s] # low spin\n elif s == self.Fe_HS_3plus:\n MAGMOM += ' %i*5'%self.species_dict[s] # high spin\n elif s == self.Fe_LS_2plus:\n MAGMOM += ' %i*0'%self.species_dict[s] # low spin\n elif s == self.Fe_LS_3plus:\n MAGMOM += ' %i*1'%self.species_dict[s] # high spin\n else:\n MAGMOM += ' %i*0.6'%self.species_dict[s] \n\n\n for s in self.structure.types_of_specie:\n LDAUJ += ' 0'\n\n if s == self.Fe_LS_2plus or s == self.Fe_LS_3plus: \n LDAUL += ' 2'\n LDAUU += ' %2.1f'%U_Fe_C\n elif s == self.Fe_HS_2plus or s == self.Fe_HS_3plus: \n LDAUL += ' 2'\n LDAUU += ' %2.1f'%U_Fe_N\n else:\n LDAUL += ' 0'\n LDAUU += ' 0'\n\n\n LDAU_dict = { 'LDAU':True, # use LDA+U (GGA+U in fact)\n 'LDAUTYPE':2, # simplified Dudarev Formalism\n 'LDAUPRINT':1, # talk to me\n 'MAGMOM':MAGMOM, # magnetic moments\n 'LDAUL':LDAUL, \n 'LDAUJ':LDAUJ, \n 'LDAUU':LDAUU,\n 'LMAXMIX':4} # this is essential for d-element GGA+U, but not the VASP default\n\n poscar_need_hack = True\n potcar_need_hack = True\n\n return LDAU_dict, poscar_need_hack, potcar_need_hack", "def informationGain2(data, attribute):\n \n split_data = splitBy(data, attribute) \n weighted_entropies = 0\n \n for set in split_data:\n weighted_entropies += len(set) / len(data) * entropy2(set) \n \n columnIG = entropy2(data) - weighted_entropies\n \n return columnIG", "def test_derive_euclidean_dm(self):\r\n cat_mat = [('6.66', '46.8'),\r\n ('7.27', '36.05'),\r\n ('4.6', '44.86666667'),\r\n ('5.68', '40.58333333'),\r\n ('4.23', '68.63333333'),\r\n ('5.74', '36.45')]\r\n\r\n dm_lbls = ['MT2.141698', 'CA1.141704', 'BB2.141659',\r\n 'CO2.141657', 'TL3.141709', 'SN3.141650']\r\n\r\n mtx = [\r\n [0.0, 10.7672930674, 2.82513322958, 6.29343661968,\r\n 21.9681438519, 10.3908084382],\r\n [10.7672930674, 0.0, 9.21208506093, 4.80408275125,\r\n 32.7248408842, 1.58142340946],\r\n [2.82513322958, 9.21208506093, 0.0, 4.41739114202,\r\n 23.7695465697, 8.49351975531],\r\n [6.29343661968, 4.80408275125, 4.41739114202, 0.0,\r\n 28.0874527147, 4.13376879093],\r\n [21.9681438519, 32.7248408842, 23.7695465697,\r\n 28.0874527147, 0.0, 32.2187374711],\r\n [10.3908084382, 1.58142340946, 8.49351975531,\r\n 4.13376879093, 32.2187374711, 0.0]]\r\n\r\n exp = DistanceMatrix(asarray(mtx), dm_lbls)\r\n obs = self.best._derive_euclidean_dm(cat_mat,\r\n self.bv_dm_88soils.shape[0])\r\n self.assertEqual(obs.ids, exp.ids)\r\n assert_almost_equal(obs.data, exp.data)", "def measureNorm(self,qtype):\n return self.measureNormDict[qtype]()", "def mutual_info_fast(l1, l2, l1_entropy, l2_entropy):\n return l1_entropy + l2_entropy - entropy(joint_dataset(l1, l2))", "def get_m2_m3_SEN_info(dx, dy, m_info, x_k):\n \"\"\"\n 1 Get information from m_info\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n t_m = z_m / 3\n\n t_sen = m_info[4]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n b_p = (dx, dy)\n\n l_distance = rs.Distance(m_p0, b_p)\n r_distance = rs.Distance(m_p3, b_p) - x_k\n\n \"\"\"\n 2 Get SEN information\n \"\"\"\n # Automatically fixed---------------------------------------------------\n w_sen = t_sen\n n_w_sen = w_sen / 2\n h_sen = z_m\n\n l_max_n = l_distance / (2 * w_sen - n_w_sen) # NOTE: divide max_n by 2 to controll \"n\"\n l_max_n = int(l_max_n)\n\n l_n = l_max_n / 6\n l_n = int(l_n)\n\n r_max_n = r_distance / (2 * w_sen - n_w_sen) # NOTE: divide max_n by 2 to controll \"n\"\n r_max_n = int(r_max_n)\n\n r_n = r_max_n / 6\n r_n = int(r_n)\n\n set = 20\n l_offset = (l_distance - 2 * set) / (l_n - 1)\n r_offset = (r_distance - 2 * set) / (r_n - 1)\n\n SEN_info = [w_sen, n_w_sen, h_sen, t_sen, l_n, r_n, set, l_offset, r_offset]\n\n return SEN_info", "def update_entity_embedding(self, entity, ims, mu):\n self.source_entity = self.ent_embs.ent_embs.weight.data[entity]\n self.ent_embs.ent_embs.weight.data[entity] = mu * self.source_entity + (1 - mu) * torch.mean(ims, dim=0)", "def evaluate():\n global dictionary, wv\n count = 0\n # To save the scores by distance and similarity\n scores = np.zeros(6)\n similar = np.zeros(6)\n itr = len(dictionary)\n logging.info('running evaluation for {0} samples'.format(itr))\n for key in dictionary:\n progress = (count / itr) * 100\n d = dictionary[key].split('resource/')\n d = [idx.split()[0].translate(table).lower() for idx in d[1:]]\n try:\n r = np.array(list(map(lambda x: wv.get_vector(x), d)),\n dtype=np.float32)\n except KeyError:\n itr -= 1\n continue\n if np.any(np.isnan(r)):\n itr -= 1\n continue\n else:\n if r.ndim == 2:\n try:\n # Mean of vector containing all word vectors\n # obtained from abstract.\n r = r.mean(axis=0).reshape(1, -1)\n \n # Obtain the vectors for the entity\n mean_vec = mean_encoder(dictionary[key])\n mean_vec = mean_vec.reshape(1, -1) / norm(mean_vec)\n mean_dist_vec = distance_encoder(dictionary[key])\n mean_dist_vec = mean_dist_vec.reshape(1, -1)\n mean_dist_vec = mean_dist_vec / norm(mean_dist_vec)\n title_vec = title_mean(key)\n title_vec = title_vec.reshape(1, -1) / norm(title_vec)\n abstract_vec = abstract_encoder(key)\n abstract_vec = abstract_vec.reshape(1, -1)\n abstract_vec = abstract_vec / norm(abstract_vec)\n random_vec = np.random.randn(100).reshape(1, -1)\n zero_vec = np.zeros(100).reshape(1, -1)\n \n # Score the entity vectors\n scores[0] += norm(r - mean_vec)\n scores[1] += norm(r - mean_dist_vec)\n scores[2] += norm(r - title_vec)\n scores[3] += norm(r - abstract_vec)\n scores[4] += norm(r - random_vec)\n scores[5] += norm(r - zero_vec)\n similar[0] += cosine_similarity(r, mean_vec)\n similar[1] += cosine_similarity(r, mean_dist_vec)\n similar[2] += cosine_similarity(r, title_vec)\n similar[3] += cosine_similarity(r, abstract_vec)\n similar[4] += cosine_similarity(r, random_vec)\n similar[5] += cosine_similarity(r, zero_vec)\n count += 1\n print(count, end='\\r')\n except (ValueError, KeyError) as _:\n itr -= 1\n continue\n else:\n itr -= 1\n continue\n # Normalize the scores to get a better\n # comparison against the baselines.\n scores = scores / norm(scores)\n similar = similar / norm(similar)\n print_summary(scores, similar)", "def __init__(self, sents, n, corpus='', D=None):\n\n self.n = n\n self.D = D\n self.corpus = corpus\n self.smoothingtechnique = 'Kneser Ney Smoothing'\n # N1+(·w_<i+1>)\n self._N_dot_tokens_dict = N_dot_tokens = defaultdict(set)\n # N1+(w^<n-1> ·)\n self._N_tokens_dot_dict = N_tokens_dot = defaultdict(set)\n # N1+(· w^<i-1>_<i-n+1> ·)\n self._N_dot_tokens_dot_dict = N_dot_tokens_dot = defaultdict(set)\n self.counts = counts = defaultdict(int)\n vocabulary = []\n\n if D is None:\n total_sents = len(sents)\n k = int(total_sents*9/10)\n training_sents = sents[:k]\n held_out_sents = sents[k:]\n training_sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], training_sents))\n for sent in training_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n - 1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n D_candidates = [i*0.12 for i in range(1, 9)]\n xs = []\n for D in D_candidates:\n self.D = D\n aux_perplexity = self.perplexity(held_out_sents)\n xs.append((D, aux_perplexity))\n xs.sort(key=lambda x: x[1])\n self.D = xs[0][0]\n with open('old-stuff/kneserney_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('D: {}\\n'.format(self.D))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n # discount value D provided\n else:\n sents = list(map(lambda x: ['<s>']*(n-1) + x + ['</s>'], sents))\n for sent in sents:\n for j in range(n+1):\n # all k-grams for 0 <= k <= n\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n if ngram:\n if len(ngram) == 1:\n vocabulary.append(ngram[0])\n else:\n # e.g., ngram = (1,2,3,4,5,6,7,8)\n # right_token = (8,)\n # left_token = (1,)\n # right_kgram = (2,3,4,5,6,7,8)\n # left_kgram = (1,2,3,4,5,6,7)\n # middle_kgram = (2,3,4,5,6,7)\n right_token, left_token, right_kgram, left_kgram, middle_kgram =\\\n ngram[-1:], ngram[:1], ngram[1:], ngram[:-1], ngram[1:-1]\n N_dot_tokens[right_kgram].add(left_token)\n N_tokens_dot[left_kgram].add(right_token)\n if middle_kgram:\n N_dot_tokens_dot[middle_kgram].add(right_token)\n N_dot_tokens_dot[middle_kgram].add(left_token)\n if n-1:\n counts[('<s>',)*(n-1)] = len(sents)\n self.vocab = set(vocabulary)\n\n aux = 0\n for w in self.vocab:\n aux += len(self._N_dot_tokens_dict[(w,)])\n self._N_dot_dot_attr = aux\n\n xs = [k for k, v in counts.items() if v == 1 and n == len(k)]\n ys = [k for k, v in counts.items() if v == 2 and n == len(k)]\n n1 = len(xs)\n n2 = len(ys)\n self.D = n1 / (n1 + 2 * n2)", "def mutual_information(x, y, bins, normalize=False):\n # assert array length\n assert len(x) == len(y)\n\n # get the bins\n bins = get_2D_bins(x, y, bins)\n\n # calculate entropy(x) and conditional_entropy(x,y)\n hx = entropy(x, bins[0])\n hcon = conditional_entropy(x, y, bins)\n\n if normalize:\n normalizer = np.min([entropy(x, bins[0]), entropy(y, bins[1])])\n mutual_info = hx - hcon\n\n # check if mutual info and normalizer are very small\n if mutual_info < 1e-4 and normalizer < 1e-4:\n # return zero to prevent very high values of normalized mutual information\n # e.g. mutual information = -1.3e-12, normalizer = -1.6e-12 \n # -> normalized conditional entropy = 812.5\n return 0\n else:\n return mutual_info / normalizer\n else:\n return hx - hcon", "def mixture_vMF_density(x, mu_list, k_list):\n return_value = 0\n \n nr_mixtures = len(mu_list)\n \n for mu, k in zip(mu_list,k_list):\n \n Z = 2 * np.pi * ( np.exp(k) - np.exp(- k) ) / k\n \n return_value += 1 / Z * np.exp( k * np.dot(x, mu) )\n \n return return_value / nr_mixtures", "def calc_measure(self, measure='Statistical Complexity', coordmom=0, probmom=0, rllen=0, clusmom=0, samelev=True):\n\n self.measure = measure\n\n # Allow for changed values of the following class variables\n # to be passed to calc measure\n if coordmom != 0:\n self.coordmom = coordmom\n if probmom != 0:\n self.probmom = probmom\n if rllen != 0:\n self.rllen = rllen\n if clusmom != 0:\n self.clusmom = clusmom\n if samelev == False:\n self.samelev = False\n\n if self.measure == \"CM Entropy\":\n if np.isnan(self.cme):\n self.cme = np.sum(\n -np.where(self.comat > 0.0, self.comat, 1.0) * np.where(self.comat > 0.0, np.log2(self.comat), 0.0))\n\n self.val = self.cme\n self.currval = \"CM Entropy\"\n\n elif self.measure == \"EM Entropy\":\n if np.isnan(self.eme):\n import scipy.linalg as L\n\n if not self.emest:\n self.est_em()\n # get left eigenvector associated with lambda = 1\n # (largest eignevalue)\n [e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)\n # Node probabilities are elements of normalized left eigenvector\n # associated with eigenvale 1 (assumes Scipy convention of\n # returning sorted eignevalues so eignevalue 1 in this case is\n # the first element of the returned eigenvalue array)\n # nodep = v[:,0]/sum(v[:,0])\n # ---- no longer make the above assumption\n # found it was wrong - now specifically ask for eigenvector\n # associated with eigenvalue 1 (greatest real part)\n maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]\n nodep = v[:, maxind] / sum(v[:, maxind])\n self.eme = -np.sum(\n np.transpose(nodep * np.ones(self.emmat.shape)) * (self.emmat * np.nan_to_num(np.log2(self.emmat))))\n\n self.val = self.eme\n self.currval = \"EM Entropy\"\n\n elif self.measure == \"Statistical Complexity\":\n if np.isnan(self.stc):\n import scipy.linalg as L\n # estimate epsilon machine if it hasn't been made\n if not self.emest:\n self.est_em()\n # get left eigenvector associated with lambda = 1\n # (largest eignevalue)\n [e, v] = L.eig(np.nan_to_num(self.emmat), left=True, right=False)\n # Node probabilities are elements of normalized left eigenvector # associated with eigenvale 1 (assumes Scipy convention of\n # returning sorted eignevalues so eignevalue 1 in this case is\n # the first element of the returned eigenvalue array)\n # nodep = v[:,0]/sum(v[:,0])\n # ---- no longer make the above assumption\n # found it was wrong - now specifically ask for eigenvector\n # associated with eigenvalue 1 (greatest real part)\n maxind = np.where(np.real(e) == np.max(np.real(e)))[0][0]\n nodep = v[:, maxind] / sum(v[:, maxind])\n self.stc = -np.sum(nodep * np.log2(nodep))\n\n self.val = self.stc\n self.currval = \"Statistical Complexity\"\n\n elif self.measure == \"Energy Uniformity\":\n if np.isnan(self.enu):\n self.enu = np.sum(np.where(self.comat > 0.0, self.comat * self.comat, 0.0))\n self.val = self.enu\n self.currval = \"Energy Uniformity\"\n\n elif self.measure == \"Maximum Probability\":\n if self.map is np.nan:\n self.map = np.max(self.comat)\n\n self.val = self.map\n self.currval = \"Maximum Probability\"\n\n elif self.measure == \"Contrast\":\n if np.isnan(self.con):\n if self.coordmom == 0 or self.probmom == 0:\n if self.coordmom == 0:\n print(\"Nonzero coordinate moment is required for calculating Contrast\")\n if self.probmom == 0:\n print(\"Nonzero probability moment is required for calculating Contrast\")\n else:\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i\n ccols[:, i] = i\n\n self.con = np.sum((np.abs(crows - ccols) ** self.coordmom) * (self.comat ** self.probmom))\n\n self.val = self.con\n self.currval = \"Contrast\"\n\n elif self.measure == \"Inverse Difference Moment\":\n if np.isnan(self.idm):\n if self.coordmom == 0 or self.probmom == 0:\n if self.coordmom == 0:\n print(\"Nonzero coordinate moment is required for calculating Inverse Difference Moment\")\n if self.probmom == 0:\n print(\"Nonzero probability moment is required for calculating Inverse Difference Moment\")\n else:\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i\n ccols[:, i] = i\n codiffs = np.abs(crows - ccols) ** self.coordmom\n # Set minimum coordinate difference for which you allow\n # probability to be calculated\n codiff_eps = 0.0000001\n # Do following so test divides don't blow up and\n # generte a warning\n codiffs_ok = np.where(codiffs > codiff_eps, codiffs, 1.0)\n self.idm = np.sum(np.where(codiffs > codiff_eps, (self.comat ** self.probmom) / codiffs_ok, 0.0))\n\n self.val = self.idm\n self.currval = \"Inverse Difference Moment\"\n\n elif self.measure == \"Correlation\":\n if np.isnan(self.cor):\n import scipy.stats as ss\n\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.\n ccols[:, i] = i + 1\n rowmom = np.sum(crows * self.comat)\n colmom = np.sum(ccols * self.comat)\n comatvar = np.var(np.ravel(self.comat * crows))\n self.cor = np.sum((crows - rowmom) * (ccols - colmom) * self.comat) / comatvar\n self.val = self.cor\n self.currval = \"Correlation\"\n\n elif self.measure == \"Probability of Run Length\":\n if np.isnan(self.prl):\n if self.rllen == 0:\n print(\"Nonzero run length is required for calculating Probability of Run Length\")\n else:\n colprobs = np.zeros(self.comat.shape[0])\n for i in range(self.comat.shape[0]):\n colprobs[i] = np.sum(self.comat[i, :])\n self.prl = 0.0\n for i in range(self.comat.shape[0]):\n if colprobs[i] != 0.0:\n self.prl += ((colprobs[i] - self.comat[i, i]) ** 2 * (\n self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)\n self.val = self.prl\n self.currval = \"Probability of Run Length\"\n\n elif self.measure == \"Epsilon Machine Run Length\":\n if np.isnan(self.erl):\n if self.rllen == 0:\n print(\"Nonzero run length is required for calculating Epsilon Machine Run Length\")\n else:\n if not self.emest:\n self.est_em()\n self.erl = 0.0\n colprobs = np.zeros(self.emmat.shape[0])\n for i in range(self.emmat.shape[0]):\n colprobs[i] = np.sum(self.emmat[i, :])\n for i in range(self.emmat.shape[0]):\n self.erl += ((colprobs[i] - self.emmat[i, i]) ** 2 * (self.emmat[i, i] ** (self.rllen - 1))) / (\n colprobs[i] ** self.rllen)\n self.val = self.erl\n self.currval = \"Epsilon Machine Run Length\"\n\n elif self.measure == \"Run Length Asymmetry\":\n if np.isnan(self.rla):\n if self.rllen == 0:\n print(\"Nonzero run length is required for calculating Run Length Asymmetry\")\n else:\n colprobs = np.zeros(self.comat.shape[0])\n rowprobs = np.zeros(self.comat.shape[0])\n for i in range(self.comat.shape[0]):\n colprobs[i] = np.sum(self.comat[i, :])\n rowprobs[i] = np.sum(self.comat[:, i])\n colval = 0.0\n rowval = 0.0\n for i in range(self.comat.shape[0]):\n if colprobs[i] != 0.0:\n colval += ((colprobs[i] - self.comat[i, i]) ** 2 * (\n self.comat[i, i] ** (self.rllen - 1))) / (colprobs[i] ** self.rllen)\n if rowprobs[i] != 0.0:\n rowval += ((rowprobs[i] - self.comat[i, i]) ** 2 * (\n self.comat[i, i] ** (self.rllen - 1))) / (rowprobs[i] ** self.rllen)\n self.rla = np.abs(colval - rowval)\n self.val = self.rla\n self.currval = \"Run Length Asymmetry\"\n\n elif self.measure == \"Homogeneity\":\n if np.isnan(self.hom):\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i\n ccols[:, i] = i\n self.hom = np.sum((self.comat) / (1 + np.abs(crows - ccols)))\n self.val = self.hom\n self.currval = \"Homogeneity\"\n\n elif self.measure == \"Cluster Tendency\":\n if np.isnan(self.clt):\n if self.clusmom == 0:\n print(\"Nonzero cluster moment is required for calculating Cluster Tendency\")\n else:\n crows = np.zeros(self.comat.shape)\n ccols = np.zeros(self.comat.shape)\n for i in range(self.comat.shape[0]):\n crows[i, :] = i + 1 # need to start at 1 for Correlation calcs.\n ccols[:, i] = i + 1\n rowmom = np.sum(crows * self.comat)\n colmom = np.sum(ccols * self.comat)\n self.clt = np.sum(((crows + ccols - rowmom - colmom) ** self.clusmom) * self.comat)\n self.val = self.clt\n self.currval = \"Cluster Tendency\"\n\n elif self.measure == \"Multifractal Spectrum Energy Range\":\n if not self.emest: # estimate epsilon machine\n self.est_em()\n if not self.mfsest: # estimate multifractal spectrum\n self.est_multi_frac_spec()\n if self.mfsspec.size != 0:\n self.mfu = np.max(self.mfsspec[:, 0]) - np.min(self.mfsspec[:, 0])\n else:\n self.mfu = 0.0\n self.val = self.mfu\n self.currval = \"Multifractal Spectrum Energy Range\"\n\n elif self.measure == \"Multifractal Spectrum Entropy Range\":\n if not self.emest: # estimate epsilon machine\n self.est_em()\n if not self.mfsest: # estimate multifractal spectrum\n self.est_multi_frac_spec()\n if self.mfsspec.size != 0:\n self.mfs = np.max(self.mfsspec[:, 1]) - np.min(self.mfsspec[:, 1])\n else:\n self.mfs = 0.0\n self.val = self.mfs\n self.currval = \"Multifractal Spectrum Entropy Range\"\n\n else:\n \"Sorry don't know about texture measure \", self.measure", "def stats(self, s_sample, d_sample, x_sample, wav_len):\n\t\tpass\n\t\t# s_STMS_sample, d_STMS_sample, x_STMS_sample = self.transfrom_stats(s_sample,\n\t\t# \td_sample, x_sample, wav_len)\n\t\t# smm_sample = tf.math.truediv(s_STMS_sample, x_STMS_sample)\n\t\t# self.smm_map.stats(smm_sample)", "def wordSimilarityRatio(sent_1,sent_2):", "def map2mw_D(d,k1,entry,mwverbs,cformsd):\n if k1 in map2mw_special_D:\n return map2mw_special_D[k1]\n ans = map2mw_D_1(d,k1,cformsd)\n if ans:\n return ans\n return '?'\n ans = map2mw_D_2(d,k1,entry,mwverbs)\n if ans:\n return ans\n \n k = re.sub(r'Ami$','',k1) \n if k in d:\n mwrec = d[k]\n if mwrec.cat == 'preverb':\n return k\n\n return '?'", "def __init__(self, data, m=100, eta=0.1, seq_length=25, sigma= 0.01):\n\n self.m, self.eta, self.seq_length = m, eta, seq_length\n self.vocab_len = data['vocab_len']\n self.ind_to_char = data['ind_to_char']\n self.char_to_ind = data['char_to_ind']\n self.book_data = data['book_data']\n\n self.b = np.zeros((m, 1))\n self.c = np.zeros((self.vocab_len, 1))\n\n self.U = np.random.normal(0, sigma, size=(m, self.vocab_len))\n self.W = np.random.normal(0, sigma, size=(m, m))\n self.V = np.random.normal(0, sigma, size=(self.vocab_len, m))", "def _DocSim(self,df,a):\r\n #Obtain the descriptions of the two input courses.\r\n textA = df['description'][a]\r\n #Obtain the document embedding vector for each description.\r\n vectorA = self.DocVecModel.infer_vector([textA], alpha=0.1, min_alpha=0.0001, steps=300)\r\n return vectorA", "def measureAll(authors_texts,sectorialized_agents):\n authors_texts=P.text.aux.textFromAuthors(authors_texts,self.topm_dict[\"sectorialized_agents\"])\n authors_measures={}\n # análise de cada mensagem e de cada autor\n for author in authors_texts:\n authors_measures[author]={}\n texts=authors_texts[author]\n authors_measures[author][\"raw_strings\"]=P.text.raw.analyseAll(texts)\n authors_measures[author][\"pos\"]= P.text.pos.analyseAll(authors_analysis[author][\"raw_analysis\"])\n authors_measures[author][ \"wordnet\" ]=P.text.wordnet.analyseAll(authors_analysis[author][\"pos_analysis\"])\n authors_measures[author][\"tfIdf\"]=P.text.tfIdf.analyseAll(texts) # tfIdf de cada texto e do autor, numeric: mean e std das distancias\n # análise de cada setor e da estrutura toda\n# sectors_texts=P.text.aux.textFromSectors(authors_text,sectorialized_agents)\n sectors_measures={}\n for sector in sectorialized_agents:\n sectors_measures[sector][\"raw_strings\"]=P.text.raw.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"pos\"]= P.text.pos.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"wordnet\"]= P.text.wordnet.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n # tfIdf de cada texto e de cada autor, numeric: mean e std das distancias por texto e por autor, e media e etd dos autores\n sectors_measures[sector][\"tfIdf\"]= P.text.tfIdf.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n\n# texts=[sectors_texts[i] for i in (\"peripherals\",\"intermediaries\",\"hubs\")]\n# sectors_analysis[\"raw_strings\"]=P.text.raw.analyseAll(texts)\n# sectors_analysis[\"pos\"]= P.text.pos.analyseAll(sectors_analysis[\"raw_analysis\"])\n# sectors_analysis[ \"wordnet\" ]=P.text.wordnet.analyseAll(sectors_analysis[\"pos_analysis\"])\n# sectors_analysis[\"tfIdf\"]=P.text.tfIdf.tfIdf(texts)\n\n overall_measures[\"raw_strings\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"pos\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"wordnet\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n # tfIdf measurespor texto, autor e setor, numeric: media e desvio das distancias por cada grupo, media e desvio dos setores e dos autores\n overall_measures[\"tfIdf\"]=P.text.tfIdf.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n\n del authors_texts,sectorialized_agents,author, sector\n return locals()", "def get_mlt_phys(sed_name):\n\n new_name = sed_name.replace('+','-').replace('a','-').split('-')\n\n logg_sgn_dex = len(new_name[0])\n\n if sed_name[logg_sgn_dex] == '-':\n logg_sgn = 1.0\n elif sed_name[logg_sgn_dex] == '+':\n logg_sgn = -1.0\n else:\n raise RuntimeError('Cannot get logg_sgn for %s' % sed_name)\n\n metallicity_sgn_dex = len(new_name[0]) + len(new_name[1]) + 1\n\n if sed_name[metallicity_sgn_dex] == '-':\n metallicity_sgn = -1.0\n elif sed_name[metallicity_sgn_dex] == '+':\n metallicity_sgn = 1.0\n else:\n raise RuntimeError('Cannot get metallicity_sgn for %s' % sed_name)\n\n teff = 100.0*float(new_name[0][3:])\n metallicity = metallicity_sgn*float(new_name[2])\n logg = logg_sgn*float(new_name[1])\n\n return teff, metallicity, logg", "def normalize_sen_scores_corpus_smoothing():\n param_len = 10*11# for alpha*beta =110 \n k_len = 1\n# sen_res_files_path = linux_base_path+r\"/claimLM_senLM_sen_ret_output_corpus_smoothing_corpus_beta_\"+str(corpus_beta)\n sen_res_files_path = base_path+r\"\\claimLM_senLM_sen_ret_output_corpus_smoothing_corpus_beta_\"+str(corpus_beta)+\"\\\\\"\n norm_sen_res_path = base_path+r\"\\sen_norm_scores_dicts_corpus_smoothing_corpus_beta_\"+str(corpus_beta)+\"\\\\\"\n \n claims_file_counters_dict = {} #for each claim numas key, have the val a counter - if not alpha_beta_len*k_lem per claim -> problem!\n claim_list = [4]#,41,42,45,46,47,50,51,53,54,55,57,58,59,60,61,62,66,69,70,79,80]\n top_k_docs_vals = [50]# ,100,500] \n for claim_num in claim_list:\n for k_val in top_k_docs_vals:\n for alpha in range(4,5,1):\n for beta in range(8,9,1):\n (alpha_f,beta_f) = turn_to_float([alpha,beta]) \n filename = \"sen_res_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_clm_\"+str(claim_num)\n sen_file = open(sen_res_files_path+filename,'r')\n print \"in filename: \"+filename\n sen_score_dict = {} # key is docno, val is the exp(score)\n curr_dict_name = \"sen_scores_norm_clm_\"+str(claim_num)+\"_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_dict\"\n sen = sen_file.read().strip() # score\n scores_sum = 0.0\n if claim_num in claims_file_counters_dict.keys():\n claims_file_counters_dict[claim_num] += 1 \n else:\n claims_file_counters_dict[claim_num] = 1\n for i, line in enumerate(sen.split('\\n')):\n if i%2==0: # a data line\n try:\n data = line.split(' ')\n query_Id = data[0]\n doc_id = data[2]\n if (data[4])!= \"nan\":\n norm_score = math.exp(float(data[4]))\n scores_sum += norm_score\n if os.path.exists(curr_dict_name) == True:\n sen_score_dict = read_pickle(curr_dict_name)\n if doc_id in sen_score_dict:\n raise Exception(\"DOC ID %s already in dict\" % doc_id)\n except Exception as err:\n sys.stderr.write('problem in filename:'+ filename +' line: '+line) \n print err.args \n print err\n else:\n if len(line) >0: \n sen_score_dict[query_Id,line] = (doc_id,norm_score)\n # divide by scores_sum\n for ((query_Id,sen),(doc_id,score)) in sen_score_dict.items():\n new_score = float(float(score)/float(scores_sum))\n sen_score_dict[query_Id,sen] = (doc_id,new_score)\n #rank according to score\n sen_score_dict_sorted = collections.OrderedDict(sorted(sen_score_dict.items(), key= lambda x: (-int(x[0][0]),x[1][1]),reverse=True))\n # save_pickle(base_path+r\"sen_norm_scores_dicts\"+\"\\\\\"+curr_dict_name+\"_sorted\",doc_score_dict_sorted)\n save_pickle(norm_sen_res_path+curr_dict_name+\"_sorted\",sen_score_dict_sorted) \n for (claim_num,counter) in claims_file_counters_dict.items():\n if counter!=(param_len*k_len):\n print str(claim_num)+\" not \"+str(param_len*k_len)+\" files , but \" +str(counter) +\" files\"\n print \"finished\"", "def calc_metrics(self, data, output):\n\n L1NormITAE = self.calcL1NormITAE(data)\n L1NormAbs = self.calcL1NormAbs(data)\n #\n # print 'ITAE score: ', errorIntegral\n print 'L1NormITAE: ', L1NormITAE\n print 'L1NormAbs: ', L1NormAbs\n print '\\n'\n output.update({'L1NormITAE': L1NormITAE, 'L1NormAbs': L1NormAbs})", "def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity", "def recalculate_emission(self, i, k, corpus):\n num = sum(sum(self.gamma(i, t, O) for t in xrange(len(O)) if O[t] == k) for O in corpus)\n denom = sum(sum(self.gamma(i,t, O) for t in xrange(len(O))) for O in corpus)\n\n return num / denom", "def normal_subjects_out(sub):\n baseDIR = 'the root directory'\n ## read data (contains training data (all subjects except one) and test data (the rest subject) and corresponding labels) \n matContent = sio.loadmat(baseDIR +'/Data/' +'normal_subject_out'+str(sub+1) +'.mat')\n normal_x_train = matContent['trainingFeatures']\n y_train = np.squeeze(matContent['trainingLabels'])\n x_test = matContent['testFeatures']\n y_test = matContent['testLabels'] \n ## permute data\n rand_idx = np.random.permutation(normal_x_train.shape[0])\n normal_x_train = normal_x_train[rand_idx,:,:]\n \n ## normalize data based on StandardScaler function in Sklearn\n normal_x_train, x_test, scaler = standardization(normal_x_train, x_test)\n\n ## pick 80% of normal training data to train DAE architecture and the rest 20% is used for computing NPM and fitting GEVD\n train_perc = np.int(normal_x_train.shape[0] - np.round(0.2 * normal_x_train.shape[0]))\n normal_x_train1 = normal_x_train[0:train_perc,:]\n normal_x_train2 = normal_x_train[train_perc:,:]\n \n ## training labels are always 0s \n training_labels = y_train[0:train_perc,] \n training_labels1 = y_train[train_perc:,]\n \n ## make the inverse of standardization on both training parts \n normal_x_gevd = scaler.inverse_transform(np.reshape(normal_x_train2,[normal_x_train2.shape[0],\n normal_x_train2.shape[1]*normal_x_train2.shape[2]*normal_x_train2.shape[3]]))\n normal_x_train_inverseScale = scaler.inverse_transform(np.reshape(normal_x_train1,[normal_x_train1.shape[0],\n normal_x_train1.shape[1]*normal_x_train1.shape[2]*normal_x_train1.shape[3]]))\n \n #save data both for before and after standardization\n sio.savemat(baseDIR + 'Dropout/'+ 'normal_train_for_NPM_sub_out' + str(sub+1)+ '.mat', {'normal_train_x':normal_x_train_inverseScale,'normal_train_npm':normal_x_gevd})\n sio.savemat(baseDIR + 'Dropout/' + 'data_after_normalization' + str(sub+1) + '.mat',{'normal_x_train1':normal_x_train1,'normal_x_train2':normal_x_train2,'scaler':scaler,'x_test':x_test})\n return normal_x_train1, normal_x_train2, x_test, training_labels, training_labels1, y_test, scaler", "def mass(self):\n\t\treturn self.volume*self.density", "def whiskerStat_multiext(filename,sigma,noise=False,mag=None,exptime=None):\n hdu=pf.open(filename)\n data = []\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n Mcc=np.zeros(Nobj)\n Mrr = np.zeros(Nobj)\n Mrc = np.zeros(Nobj)\n r50 = np.zeros(Nobj)\n for i in range(Nobj):\n print i\n imgo = hdui.data[i][4:].reshape(160,160)\n psf = rebin(imgo,(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n Mcc[i],Mrr[i],Mrc[i]=complex2ndMoments(img,sigma)\n r50[i] = mfwhm(img)[5]\n data.append([np.mean(Mcc),np.mean(Mrr),np.mean(Mrc),np.mean(r50)])\n data = np.array(data)\n datamean =np.array([robust_mean(data[:,0]),robust_mean(data[:,1]),robust_mean(data[:,2]),robust_mean(data[:,3])])\n #r50 = 0.5*2.35482*np.sqrt((datamean[0]+datamean[1])/2.)*0.27\n r50moffat = datamean[3]*0.27\n whk = ((datamean[0]-datamean[1])**2 + (2.*datamean[2])**2)**(0.25)*0.27\n phi = np.rad2deg(0.5*np.arctan2(2.*datamean[2],(datamean[0]-datamean[1])))\n datasubmean = data - datamean\n whkrms = (robust_mean((datasubmean[:,0] - datasubmean[:,1])**2 + 4.*datasubmean[:,2]**2))**(0.25)*0.27\n np.savetxt(filename[0:-6]+'txt',[r50moffat,whk,phi,whkrms,datamean[0],datamean[1],datamean[2]],fmt='%10.5f')\n return '---done !-----'", "def visualize_attention(test_seq, i,\n model,\n id2wrd, # word_index.\n n):\n \n id2wrd = dict((v, k) for k, v in id2wrd.items()) # Exchange key with values.\n\n get_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[4].output])\n out = get_layer_output([test_seq, ])[0] # test mode\n\n att_w = model.layers[5].get_weights() # The attention layer is the sixth layer.\n\n eij = np.tanh(np.dot(out[i], att_w[0]))\n# print(\"1 eij is ............\")\n# print(eij)\n ListToCSV(eij, 'eij.csv') # added by Shigang\n ai = np.exp(eij)\n# print(\"1 ai is ............\")\n# print(ai)\n ListToCSV(ai, 'ai.csv') # added by Shigang\n weights = ai/np.sum(ai)\n# print(\"1 weights is ............\")\n# print(weights)\n ListToCSV(weights, '1weights.csv') # added by Shigang\n weights = np.sum(weights,axis=1)\n# print(\"2 weights is ............\")\n# print(weights)\n ListToCSV(weights, '2weights.csv') # added by Shigang\n topKeys = np.argpartition(weights,-n)[-n:]\n\n print (' '.join([id2wrd[wrd_id] for wrd_id in test_seq[i] if wrd_id != 0.]))\n print ('--------------------------Attentive Words start: --------------------------------------')\n \n for k in test_seq[i][topKeys]:\n if k != 0.:\n print (id2wrd[k])\n print ('--------------------------Attentive Words end: --------------------------------------')\n \n return", "def h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U=True):\r\n Kxxy = torch.cat((Kx,Kxy),1)\r\n Kyxy = torch.cat((Kxy.transpose(0,1),Ky),1)\r\n Kxyxy = torch.cat((Kxxy,Kyxy),0)\r\n nx = Kx.shape[0]\r\n ny = Ky.shape[0]\r\n is_unbiased = True\r\n if is_unbiased:\r\n xx = torch.div((torch.sum(Kx) - torch.sum(torch.diag(Kx))), (nx * (nx - 1)))\r\n yy = torch.div((torch.sum(Ky) - torch.sum(torch.diag(Ky))), (ny * (ny - 1)))\r\n # one-sample U-statistic.\r\n if use_1sample_U:\r\n xy = torch.div((torch.sum(Kxy) - torch.sum(torch.diag(Kxy))), (nx * (ny - 1)))\r\n else:\r\n xy = torch.div(torch.sum(Kxy), (nx * ny))\r\n mmd2 = xx - 2 * xy + yy\r\n else:\r\n xx = torch.div((torch.sum(Kx)), (nx * nx))\r\n yy = torch.div((torch.sum(Ky)), (ny * ny))\r\n # one-sample U-statistic.\r\n if use_1sample_U:\r\n xy = torch.div((torch.sum(Kxy)), (nx * ny))\r\n else:\r\n xy = torch.div(torch.sum(Kxy), (nx * ny))\r\n mmd2 = xx - 2 * xy + yy\r\n if not is_var_computed:\r\n return mmd2, None, Kxyxy\r\n hh = Kx+Ky-Kxy-Kxy.transpose(0,1)\r\n V1 = torch.dot(hh.sum(1)/ny,hh.sum(1)/ny) / ny\r\n V2 = (hh).sum() / (nx) / nx\r\n varEst = 4*(V1 - V2**2)\r\n if varEst == 0.0:\r\n print('error_var!!'+str(V1))\r\n return mmd2, varEst, Kxyxy", "def test_2_normal(self):\n print(\"test 2: normal distributions\")\n\n mean = self.means[0]\n dispersion = self.dispersions[0]\n\n for i, x in enumerate(self.X):\n print(i+1, normal(x, mean, dispersion), sep=' : ')", "def mutual_info_score(self):\n _, _, I_CK = self._entropies()\n return I_CK / self.grand_total", "def melting_temp_gc(seq):\n\n Na = 0.050 # log10([Na+] adjusts for the salt adjustment at 50 mM Na+\n total_nucleotide = sum([(seq.count('A') + seq.count('T') + seq.count('G')+ seq.count('C'))])\n\n tm_marmurdoty = 64.9 + 41.0 * (((seq.count('G')+ seq.count('C')) -16.4)/ total_nucleotide)\n\n tm_howley = 100.5 + 41.0 * (((seq.count('G')+ seq.count('C')) -16.4 )/ total_nucleotide)\n\n - (820.0/total_nucleotide) + 16.6 * math.log10(Na)\n \n # result_tm = \"Melting Temp by:: Marmur:{},Howley:{}\".format(tm_marmurdoty,tm_howley)\n result_tm = {\"mt_marmur\":tm_marmurdoty,\"mt_howley\":tm_howley}\n return result_tm", "def information_gain(features, attribute_index, targets):\r\n\r\n possible_feature_values = [0,1]\r\n \r\n possible_classifications = [0,1]\r\n \r\n feature = features[:,attribute_index]\r\n \r\n \r\n number_of_samples = len(feature)\r\n \r\n import math\r\n \r\n \r\n #current_entropy = np.sum([-(len(targets[targets==possible_classification])/number_of_samples)*math.log(len(targets[targets==possible_classification])/number_of_samples, 2) for possible_classification in possible_classifications])\r\n \r\n terms_to_be_summed_for_current_entropy = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_elements_with_this_classification = len(targets[targets==classification])\r\n \r\n p_for_this_classification = number_of_elements_with_this_classification/len(targets)\r\n \r\n if p_for_this_classification != 0:\r\n terms_to_be_summed_for_current_entropy.append(-p_for_this_classification*math.log(p_for_this_classification,2))\r\n else:\r\n terms_to_be_summed_for_current_entropy.append(0)\r\n \r\n current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n \r\n \r\n terms_to_be_summed_for_weighted_entropy = []\r\n \r\n for possible_value in possible_feature_values:\r\n \r\n targets_split_by_feature_value = targets[feature.flatten() == possible_value]\r\n \r\n if len(targets_split_by_feature_value) != 0:\r\n \r\n \r\n weight_of_feature_value = len(targets_split_by_feature_value)/len(targets)\r\n \r\n terms_for_entropy_within_subset = []\r\n \r\n for classification in possible_classifications:\r\n \r\n number_of_subset_elements_with_this_classification = len(targets_split_by_feature_value[targets_split_by_feature_value==classification])\r\n \r\n p_in_subset_for_this_classification = number_of_subset_elements_with_this_classification/len(targets_split_by_feature_value)\r\n \r\n if p_in_subset_for_this_classification != 0:\r\n terms_for_entropy_within_subset.append(-p_in_subset_for_this_classification*math.log(p_in_subset_for_this_classification,2))\r\n else:\r\n terms_for_entropy_within_subset.append(0)\r\n \r\n entropy_within_subset = np.sum(terms_for_entropy_within_subset)\r\n \r\n terms_to_be_summed_for_weighted_entropy.append(weight_of_feature_value*entropy_within_subset)\r\n \r\n weighted_entropy = np.sum(terms_to_be_summed_for_weighted_entropy)\r\n \r\n \r\n #current_entropy = np.sum(terms_to_be_summed_for_current_entropy)\r\n \r\n #weighted_entropy = np.sum([(len(feature[feature==possible_value])/number_of_samples)*(len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value]))*math.log((len(targets[feature==possible_value][targets[feature==possible_value]==possible_classification])/len(targets[feature==possible_value])), 2) for possible_classification in possible_classifications for possible_value in possible_feature_values])\r\n\r\n information_gain = current_entropy - weighted_entropy \r\n \r\n return information_gain", "def __init__(self,sentences):\n self.data_set = sentences\n self.sum1=0\n for sentence in self.data_set:\n sentence.weight = 1/len(self.data_set)\n self.sum1 += sentence.weight\n\n self.sum2=1", "def findAtypicalTerms(self):\n self.atypicalTermsDict = collections.OrderedDict()\n distanceList = list()\n distance = 0\n for key in self.summaryFilteredDict:\n partitionName = str(key).split(\" :\")[0]\n partition = voc.getPartition(partitionName)\n modNames = partition.getModNames()\n currentModality = str(key).split(\": \")[1]\n indexCurrentModality = modNames.index(currentModality)\n coverCurrentModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + currentModality) #cover(v',R)\n if coverCurrentModality > 0:\n for modality in partition.getModalities():\n coverModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + modality.getName()) # cover(v,R)\n if modality.isTrapeziumModality():\n indexModality = modNames.index(modality.getName())\n distance = abs(indexCurrentModality - indexModality) / (partition.getNbModalities() - 1) #d(v,v')\n elif modality.isEnumModality():\n if (modality.getName() == currentModality):\n distance = 0\n else:\n distance = 1\n distanceList.append(min(distance, 1 - coverCurrentModality, coverModality)) # min(d(v,v'),cover(v,R),1-cover(v',R))\n self.atypicalTermsDict[partitionName + \" : \" + currentModality] = max(distanceList) # D(v',R)\n distanceList = list()", "def gen_Modal_Question(keyword_dic_sents):\n try:\n # txt = TextBlob(string)\n # for line in txt.sentences:\n for key in keyword_dic_sents.keys():\n \"\"\"\n outputs question from the given text\n \"\"\"\n # print(keyword_dic_sents[line])\n # print(entity.text, entity.label_)\n\n answers.append(key)\n # print(key)\n for sentence in keyword_dic_sents[key]:\n # print(sentence)\n if type(sentence) is str: # If the passed variable is of type string.\n line = TextBlob(sentence) # Create object of type textblob.blob.TextBlob\n # print(line)\n bucket = {} # Create an empty dictionary\n\n for i, j in enumerate(line.tags): # line.tags are the parts-of-speach in English\n if j[1] not in bucket:\n bucket[j[1]] = i # Add all tags to the dictionary or bucket variable\n\n question = '' # Create an empty string\n\n # With the use of conditional statements the dictionary is compared with the list created above\n # print(line.tags)\n #####################################################################gen modal ##########################################################################################\n ######################################## VBN ##################################################\n if all(key in bucket for key in VBNDT1): # 'NNP', 'VBZ' ,'VBN' , 'IN , 'DT' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT1'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN1): # 'NNP', 'VBZ' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN1'\n questions.append(question)\n\n\n elif all(key in bucket for key in VBN1): # 'NNP', 'VBZ' ,'VBN' in sentence.\n question = 'Has' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT2): # 'PRP', 'VBZ' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT2'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN2): # 'PRP', 'VBZ' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN2'\n questions.append(question)\n\n elif all(key in bucket for key in VBN2): # 'PRP', 'VBZ' ,'VBN' in sentence.\n question = 'Has' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN2'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT3): # 'NNP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT3'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN3): # 'NNP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN3'\n questions.append(question)\n\n elif all(key in bucket for key in VBN3): # 'NNP', 'VBP' ,'VBN' in sentence.\n question = 'Have' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN3'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT4): # 'PRP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT4'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN4): # 'PRP', 'VBP' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN4'\n questions.append(question)\n\n elif all(key in bucket for key in VBN4): # 'PRP', 'VBP' ,'VBN' in sentence.\n question = 'Have' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN4'\n questions.append(question)\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT5): # 'NNP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT5'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN5): # 'NNP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN5'\n questions.append(question)\n\n elif all(key in bucket for key in VBN5): # 'NNP', 'VBD' ,'VBN' in sentence.\n question = 'Had' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN5'\n questions.append(question)\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT6): # 'PRP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT6'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN6): # 'PRP', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN6'\n questions.append(question)\n\n elif all(key in bucket for key in VBN6): # 'PRP', 'VBD' ,'VBN' in sentence.\n question = 'Had' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN6'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT7): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT7'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN7): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Had' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN7'\n questions.append(question)\n\n elif all(key in bucket for key in VBN7): # 'NNPS', 'VBD' ,'VBN' in sentence.\n question = 'Had' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN7'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT8): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT8'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN8): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Have' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'VBNIN8'\n questions.append(question)\n\n elif all(key in bucket for key in VBN8): # 'NNPS', 'VBD' ,'VBN' in sentence.\n question = 'Have' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBN']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'VBN8'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n elif all(key in bucket for key in VBNDT9): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBN']] + ' ' + 'anything' + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'VBNDT9'\n questions.append(question)\n\n elif all(key in bucket for key in VBNIN9): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBN']] + ' ' + 'anything' + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'VBNIN9'\n questions.append(question)\n\n elif all(key in bucket for key in VBN9): # 'NNPS', 'VBD' ,'VBN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Has' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBN']] + ' ' + j[0] + ' ' + '?'\n pattern_name = 'VBN9'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------#\n\n ########################################### End VBN ##################################?????????????????????????!!!!!!!!!!!!!!!!!!'''\n\n ########################################### present continouse #############################\n elif all(key in bucket for key in PRCDT1): # 'NNP', 'VBG', 'VBZ', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[\n bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRCDT1'\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN1): # 'NNP', 'VBG', 'VBZ', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRCIN1'\n questions.append(question)\n\n\n elif all(key in bucket for key in PRC1): # 'NNP', 'VBG', 'VBZ', 'IN' in sentence.\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRC1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT2): # 'NNP', 'VBG', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT2\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN2): # 'NNP', 'VBG', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[bucket['VBG']] + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = \"PRCIN2\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC2): # 'NNP', 'VBG', 'VBZ' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC2\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT3): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT3\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN3): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PRCIN3\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC3): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC3\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT4): # 'NNPS', 'VBG', 'VBP' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT4\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN4): # 'NNPS', 'VBG', 'VBP' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PRCIN4\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC4): # 'NNPS', 'VBG', 'VBP' in sentence.\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC4\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT5): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + \"anything\" + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT5\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN5): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + \"anything\" + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = \"PRCIN5\"\n questions.append(question)\n\n elif all(key in bucket for key in PRC5): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + j[0] + ' ' + '?'\n pattern_name = \"PRC5\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRCDT6): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PRCDT6\"\n questions.append(question)\n\n elif all(key in bucket for key in PRCIN6): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PRCIN6\"\n questions.append(question)\n\n\n elif all(key in bucket for key in PRC6): # 'NNPS', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PRC6\"\n questions.append(question)\n\n ########################## Past Cont. ###################################\n elif all(key in bucket for key in PACDT1): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT1\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN1): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PACIN1\"\n questions.append(question)\n\n\n elif all(key in bucket for key in PAC1): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Was' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC1\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT2): # 'NNP', 'VBG', 'VBD', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT2\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN2): # 'NNP', 'VBG', 'VBD', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PACIN2\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC2): # 'NNP', 'VBG', 'VBD', 'IN' in sentence.\n question = 'Were' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC2\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT3): # 'NNP', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT3\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN3): # 'NNP', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n j[0] + '?'\n pattern_name = \"PACIN3\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC3): # 'NNP', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n question = 'Was' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC3\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT4): # 'NNPS', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT4\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN4): # 'NNPS', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n j[0] + '?'\n pattern_name = \"PACIN4\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC4): # 'NNPS', 'VBG', 'VBD' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n question = 'Were' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC4\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT5): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + \"anything\" + ' ' + line.words[bucket['IN']] + ' ' + line.words[\n bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT5\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN5): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + 'anything' + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = \"PACIN5\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC5): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Was' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBG']] + ' ' + j[0] + ' ' + '?'\n pattern_name = \"PAC5\"\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PACDT6): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = \"PACDT6\"\n questions.append(question)\n\n elif all(key in bucket for key in PACIN6): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Were' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = \"PACIN6\"\n questions.append(question)\n\n elif all(key in bucket for key in PAC6): # 'PRP', 'VBG', 'VBP', 'IN' in sentence.\n question = 'Were' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBG']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = \"PAC6\"\n questions.append(question)\n\n ############################## Present Simple ######################################\n elif all(key in bucket for key in PRSDT1): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT1'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN1): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN1'\n questions.append(question)\n\n\n elif all(key in bucket for key in PRS1): # 'NNP', 'VBZ', 'NN' in sentence\n question = 'Does' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Does ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT2'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Does ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN2'\n questions.append(question)\n\n\n elif all(key in bucket for key in PRS2): # 'NNP', 'VBZ' in sentence.\n question = 'Does ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS2'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT3): # 'NNPS', 'VBP', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT3'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN3): # 'NNPS', 'VBP', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN3'\n questions.append(question)\n\n elif all(key in bucket for key in PRS3): # 'NNPS', 'VBP', 'NN' in sentence\n question = 'Do' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS3'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Do ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT4'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0]:\n question = 'Do ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN4'\n questions.append(question)\n\n elif all(key in bucket for key in PRS4): # 'NNP', 'VBZ' in sentence.\n question = 'Do ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS4'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT5): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + \"anything\" + ' ' + \\\n line.words[bucket['IN']] + ' ' + line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT5'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN5): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + \"anything\" + ' ' + \\\n line.words[bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PRSIN5'\n questions.append(question)\n\n elif all(key in bucket for key in PRS5): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Does' + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VBZ']].singularize() + ' ' + j[0] + ' ' + '?'\n pattern_name = 'PRS5'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PRSDT6): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PRSDT6'\n questions.append(question)\n\n elif all(key in bucket for key in PRSIN6): # 'NNP', 'VBZ', 'NN' in sentence\n if line.words[bucket['NN']] != j[0]:\n question = 'Do' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[\n 0] + '?'\n pattern_name = 'PRSIN6'\n questions.append(question)\n\n elif all(key in bucket for key in PRS6): # 'NNP', 'VBZ', 'NN' in sentence\n question = 'Do' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[\n bucket['VBP']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PRS6'\n questions.append(question)\n\n ########################################### End present simple #################################\n\n ##################################################### MD ###########################################\n elif all(key in bucket for key in MD1): # 'NNP', 'VB' in sentence.\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['NNP']] + ' ' + line.words[\n bucket['VB']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD1'\n questions.append(question)\n\n elif all(key in bucket for key in MD2): # 'PRP', 'VB' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VB']].singularize() + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD2'\n questions.append(question)\n\n elif all(key in bucket for key in MD3): # 'NNPS', 'VB' in sentence.\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[\n bucket['NNPS']] + ' ' + line.words[bucket['VB']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD3'\n questions.append(question)\n\n elif all(key in bucket for key in MD4): # 'NNS', 'VB' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['PRP']] + ' ' + line.words[\n bucket['VB']] + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'MD4'\n questions.append(question)\n\n elif all(key in bucket for key in MD5): # 'NNP', 'VB' in sentence.\n if line.words[bucket['NN']] != j[0]:\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['VB']].singularize() + ' ' + j[0] + '?'\n pattern_name = 'MD5'\n questions.append(question)\n\n elif all(key in bucket for key in MD6): # 'NNP', 'VB' in sentence.\n md_word = line.words[bucket['MD']]\n question = md_word.capitalize() + ' ' + line.words[\n bucket['NNS']] + ' ' + line.words[bucket['VB']].singularize() + ' ' + line.words[\n bucket['NN']] + ' ' + '?'\n pattern_name = 'MD6'\n questions.append(question)\n ####################################### End MD ###############################################\n ###################################### JJ ####################################################\n elif all(key in bucket for key in JJ1): # 'NNP', 'VB' in sentence.\n question = 'Is' + ' ' + line.words[bucket['NNP']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ1'\n questions.append(question)\n\n elif all(key in bucket for key in JJ2): # 'PRP', 'VB' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNPS']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ2'\n questions.append(question)\n\n elif all(key in bucket for key in JJ3): # 'NNPS', 'VB' in sentence.\n question = 'Is' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ3'\n questions.append(question)\n\n elif all(key in bucket for key in JJ4): # 'NNPS', 'VB' in sentence.\n question = 'Are' + ' ' + line.words[bucket['PRP']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ4'\n questions.append(question)\n\n elif all(key in bucket for key in JJ5): # 'NNS', 'VB' in sentence.\n question = 'Is' + ' ' + line.words[bucket['NN']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ5'\n questions.append(question)\n\n elif all(key in bucket for key in JJ6): # 'NNS', 'VB' in sentence.\n question = 'Are' + ' ' + line.words[bucket['NNS']] + ' ' + line.words[bucket['JJ']] + '?'\n pattern_name = 'JJ6'\n questions.append(question)\n ####################################### END JJ ###########################################################\n ########################################### Past simple #################################\n try:\n if all(key in bucket for key in PASDT1): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did' + ' ' + line.words[bucket['NNP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PASDT1'\n questions.append(question)\n\n\n elif all(key in bucket for key in PASIN1): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['NNP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PASIN1'\n questions.append(question)\n\n elif all(key in bucket for key in PAS1): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['NNP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS1'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PASDT2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + \\\n line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASDT2'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + \\\n line.words[bucket['NN']] + ' ' + line.words[bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN2'\n questions.append(question)\n\n elif all(key in bucket for key in PAS2): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['PRP']] in ['he', 'she', 'it']:\n if line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[\n bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS2'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PASDT3): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNPS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n pattern_name = 'PASDT3'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN3): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNPS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n pattern_name = 'PASIN3'\n questions.append(question)\n\n elif all(key in bucket for key in PAS3): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['NNPS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n pattern_name = 'PAS3'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n elif all(key in bucket for key in PASDT4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + \\\n line.words[bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASDT4'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + \\\n line.words[bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN4'\n questions.append(question)\n\n elif all(key in bucket for key in PAS4): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['PRP']] in ['i', 'you', 'we', 'they']:\n if line.words[bucket['VBD']] not in ['was', 'were']:\n question = 'Did ' + line.words[bucket['PRP']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS4'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n ## PASDT6 before PASDT5 ##\n elif all(key in bucket for key in PASDT6): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + ' ' + '?'\n\n pattern_name = 'PASDT6'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN6): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN6'\n questions.append(question)\n\n elif all(key in bucket for key in PAS6): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NNS']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + line.words[bucket['NN']] + ' ' + '?'\n\n pattern_name = 'PAS6'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n\n elif all(key in bucket for key in PASDT5): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NN']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + \"anything\" + ' ' + line.words[\n bucket['IN']] + ' ' + \\\n line.words[bucket['DT']] + ' ' + j[0] + ' ' + '?'\n\n pattern_name = 'PASDT5'\n questions.append(question)\n\n elif all(key in bucket for key in PASIN5): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NN']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + \"anything\" + ' ' + line.words[\n bucket['IN']] + ' ' + j[0] + '?'\n\n pattern_name = 'PASIN5'\n questions.append(question)\n\n\n elif all(key in bucket for key in PAS5): # 'NNP', 'VBZ' in sentence.\n if line.words[bucket['NN']] != j[0] and line.words[bucket['VBD']] not in ['was', 'were', 'had']:\n question = 'Did ' + line.words[bucket['NN']] + ' ' + lemmatizer.lemmatize(\n line.words[bucket['VBD']], pos=\"v\") + ' ' + j[0] + ' ' + '?'\n\n pattern_name = 'PAS5'\n questions.append(question)\n\n # -----------------------------------------------------------------------------------------------------------#\n\n except:\n a = 'a'\n ############################################### End past simple #####################################\n # When the tags are generated 's is split to ' and s. To overcome this issue.\n if 'VBZ' in bucket and line.words[bucket['VBZ']] == \"’\":\n question = question.replace(\" ’ \", \"'s \")\n questions.append(question)\n\n # Print the genetated questions as output.\n # if question != '':\n # print('\\n', 'Question: ' + question)\n # print('\\n', 'pattern_name: ' + pattern_name)\n keyword_Questions_dic[key] = questions.copy()\n questions.clear()\n\n\n except:\n # print(' ')\n # print(\"No Modal Questions Generated! Please revise your text.\")\n keyword_Questions_dic[key] = \"No Modal Questions Generated! Please revise your text.\"\n\n return (keyword_Questions_dic)", "def _find_magnet_strength(line):\n line = line.lower()\n matches = list(re.finditer(\n r\",\\s*k((?P<s>[ns])l:=\\{(?P<knl>[^\\}]+)\\}|(?P<n>\\d+s?):=(?P<k>[^,]+))\", line))\n\n if len(matches) > 0:\n magnet = re.match(r\"[\\w.]*\", line).group(0)\n\n knl_dict = {}\n for match in matches:\n if match.group(\"knl\") is not None:\n skew = \"S\" if match.group('s') == \"s\" else \"\"\n knls = match.group('knl').split(',')\n for n, knl in enumerate(knls):\n try:\n float(knl) # check could also be \"len(knl) > 1\"\n except ValueError:\n ########## HACK TO AVOID DIPOLES AS THEY ARE DEFINED BY LRAD!\n # TODO: Find a way to change dipoles in MADX!?\n if n == 0 and not re.search(r\":\\s*multipole\\s*,\", line):\n return None\n ##############################################################\n order = \"K{n:d}{skew:s}L\".format(n=n, skew=skew)\n knl_dict[order] = knl.replace(\" \", \"\")\n else:\n if match.group(\"n\") in ['0', '0s']:\n # dipole strength are defined by their angles\n knl = re.search(r\"angle\\s*:=\\s*([^,]+)\", line).group(1)\n else:\n length = \"l.\" + re.search(r\":(?!=)\\s*([^,]+)\", line).group(1)\n knl = \"({kn:s}) * {l:s}\".format(kn=match.group(\"k\"), l=length)\n\n order = \"K{:s}L\".format(match.group(\"n\").upper())\n knl_dict[order] = knl.replace(\" \", \"\")\n\n return magnet, knl_dict\n else:\n return None", "def eml_use_pseudowords_and_smooth(xi, yi, deml):\n if xi not in deml[yi]:\n xi = pw(xi) # use pseudo-word instead\n\n return (deml[yi][xi] + 1) / (sum(deml[yi].values()) + train_set_size)", "def WordSim(self,testDF,listCourse,inCourse):\r\n #Obtain a single vector embedding for each course description (calculated by taking an average of each word \r\n #embedding that makes up each description)\r\n \r\n #Get the embedding from the dictionary for the list (reference) course\r\n aVec = self.VDF[\"Word\"][listCourse]\r\n #Calculate the embedding with the doc2Vec model.\r\n bVec = self._WordSimAveVec(testDF,inCourse)\r\n #Convert vectors to column vectors to be fed into the cosine_similarity function.\r\n A = np.expand_dims(aVec,0)\r\n B = np.expand_dims(bVec,0)\r\n #Calculate the cosine similarity between the two vectors.\r\n sim = cosine_similarity(A,B)\r\n return float(sim)", "def cmu(df, mu, alphamu=0.0, alphacov=2.0):\r\n c = alphacov * (alphamu + mu - 2 + 1/mu) / ((N + 2)**2 + alphacov * mu / 2)\r\n # c = alphacov * (alphamu + mu - 2 + 1/mu) / (2 * (N + 2)**1.5 + alphacov * mu / 2)\r\n # print 'cmu =', c\r\n return c", "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')", "def _construct_mom_stuff(self):\n a = self.mom_mix_rate\n dist_mean = self.GN.dist_mean\n dist_cov = self.GN.dist_cov\n # Get the generated sample observations for this batch, transformed\n # linearly into the desired space for moment matching...\n X_b = T.dot(self.GN.output, self.mom_match_proj)\n # Get their mean\n batch_mean = T.mean(X_b, axis=0)\n # Get the updated generator distribution mean\n new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)\n # Use the mean to get the updated generator distribution covariance\n X_b_minus_mean = X_b - new_mean\n # Whelp, I guess this line needs the cast... for some reason...\n batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')\n new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)\n # Get the cost for deviation from the target distribution's moments\n mean_err = new_mean - self.target_mean\n cov_err = (new_cov - self.target_cov)\n mm_cost = self.mom_match_weight[0] * \\\n (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))\n # Construct the updates for the running estimates of the generator\n # distribution's first and second-order moments.\n mom_updates = OrderedDict()\n mom_updates[self.GN.dist_mean] = new_mean\n mom_updates[self.GN.dist_cov] = new_cov\n return [mm_cost, mom_updates]", "def infoGain(self,attr, data, target_attr):\n remainder = 0\n p = 0\n ent = 0\n for ele in target_attr:\n if ele == 1:\n p +=1\n \n q = p / (len(target_attr)) \n if 0 < q < 1:\n ent = -((q * math.log2(q)) + ((1-q) * math.log2(1-q))) \n \n unique = list(pd.unique(self.data_set[attr])) \n l = self.data_set[attr]\n for ele in unique:\n pk =0\n nk=0\n j=0\n for i in range (0, len(data)): #len (l) changed to len(data)\n j = j+1\n ele1 = l[i]\n if ele1 == ele:\n out = target_attr[i]\n if out == 1:\n pk += 1\n else:\n nk += 1\n if (pk+nk) != 0:\n q1 = pk / (pk +nk)\n if 0 < q1 < 1:\n e = -((q1 * math.log2(q1)) + ((1-q1) * math.log2(1-q1)))\n remainder += (pk + nk)/(len(target_attr)) * e\n \n return (ent - remainder, attr)", "def parse_MedMentions(kb_data):\n \n print(\"Parsing MedMentions corpus...\")\n output_MedMentions = dict()\n filepath = \"./retrieved_data/corpora/MedMentions/corpus_pubtator.txt\"\n \n with open(filepath, 'r', buffering=1, encoding=\"utf-8\") as corpus_file:\n\n for line in corpus_file:\n if \"|t|\" not in line and \"|a|\" not in line and line != \"\\n\":\n doc_id = line.split(\"\\t\")[0]\n annotation_str = line.split(\"\\t\")[3]\n umls_id =line.split(\"\\t\")[5].strip(\"\\n\")\n start_pos, end_pos = line.split(\"\\t\")[1], line.split(\"\\t\")[2]\n \n if umls_id in kb_data.umls_to_hp.keys(): # UMLS concept has an equivalent HPO concept\n hp_id = kb_data.umls_to_hp[umls_id]\n\n if hp_id in kb_data.child_to_parent.keys(): # Consider only HPO concepts with ONE direct ancestor\n direct_ancestor = kb_data.child_to_parent[hp_id].strip(\"\\n\")\n annotation = (annotation_str, start_pos, end_pos, hp_id, direct_ancestor)\n output_MedMentions = add_annotation_to_output_dict(doc_id, annotation, output_MedMentions)\n \n corpus_file.close()\n\n print(\"...Done!\")\n return output_MedMentions", "def mu(self):\n return self.mass * G", "def eval_mu(self, mu, T, Du, Dx):\n sample_u = mu[:, slice(Dx, Dx + Du)]\n l = 0.5 * np.sum(self._hyperparams['wu'] * (sample_u ** 2), axis=1)\n lu = self._hyperparams['wu'] * sample_u\n lx = np.zeros((T, Dx))\n luu = np.tile(np.diag(self._hyperparams['wu']), [T, 1, 1])\n lxx = np.zeros((T, Dx, Dx))\n lux = np.zeros((T, Du, Dx))\n return l, lx, lu, lxx, luu, lux", "def cmudf(df, mu, alphamu):\r\n return (alphamu + mu - 2. + 1./mu) / (df + 4.*sqrt(df) + mu/2.)", "def DM_YMW16(self, source, distance):\n\n\n if not isinstance(distance, astropy.units.quantity.Quantity):\n # assume kpc\n distance=distance*u.kpc \n if (len(distance.shape)>0 and distance.value.any() <= 0) or (len(distance.shape)==0 and distance.value < 0):\n raise ValueError('distance must be > 0')\n\n if not isinstance(source, astropy.coordinates.sky_coordinate.SkyCoord):\n if isinstance(source,str):\n # assume .par file\n source=parfile2SkyCoord(source)\n else:\n raise TypeError('Do not know how to interpret an object of type %s' % source.__class__)\n source=source.galactic\n\n\n if len(source.l.shape)==0:\n results=ymw16.dmdtau_c(source.l.value,\n source.b.value,\n distance.to(u.pc).value,\n 2,\n self.datadir)\n\n return results*u.pc/u.cm**3,None\n else:\n dm=np.zeros_like(source.l.value)\n it = np.nditer(source.l, flags=['multi_index'])\n if not (len(distance.shape)==0 or distance.shape==source.l.shape):\n raise IndexError('Shape of distance must be scalar or the same as shape of coordinates')\n d=distance.to(u.pc).value\n while not it.finished:\n if len(d.shape)==0:\n d_touse=distance\n else:\n d_touse=distance[it.multi_index]\n results=ymw16.dmdtau_c(source[it.multi_index].l.value,\n source[it.multi_index].b.value,\n d_touse.to(u.pc).value,\n 2,\n self.datadir)\n \n dm[it.multi_index]=results\n it.iternext()\n \n return dm*u.pc/u.cm**3,None", "def metallicity(method, emsystem):\n if method == 'PG16':\n # Requires Hbeta, [OII], [OIII], [NII], [SII]\n R2 = (emsystem.get_emline('[OII] 3726').attrib['flux'] +\n emsystem.get_emline('[OII] 3729').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n R3 = (emsystem.get_emline('[OIII] 4959').attrib['flux'] +\n emsystem.get_emline('[OIII] 5007').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n N2 = (emsystem.get_emline('[NII] 6548').attrib['flux'] +\n emsystem.get_emline('[NII] 6584').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n S2 = (emsystem.get_emline('[SII] 6716').attrib['flux'] +\n emsystem.get_emline('[SII] 6731').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n # Proceed\n if np.log10(N2) < -0.6:\n r_val = 7.932 + 0.944*np.log10(R3/R2) + 0.695*np.log10(N2) + \\\n ((0.97 - 0.291*np.log10(R3/R2)) - 0.019*np.log10(N2))*np.log10(R2)\n\n s_val = 8.072 + 0.789*np.log10(R3/S2) + 0.726*np.log10(N2) + \\\n (1.069 - 0.170*np.log10(R3/S2) +0.022*np.log10(N2))*np.log10(S2)\n else:\n r_val = 8.589 + 0.022*np.log10(R3/R2) + 0.399*np.log10(N2) + \\\n (-0.137 + 0.164*np.log10(R3/R2) + 0.589*np.log10(N2))*np.log10(R2)\n\n s_val = 8.424 + 0.030*np.log10(R3/S2) + 0.751*np.log10(N2) + \\\n (-0.349 + 0.182*np.log10(R3/S2) +0.508*np.log10(N2))*np.log10(S2)\n return r_val.decompose().value, s_val.decompose().value", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def pmi(cls, *marginals):\n return (_log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) -\n _log2(_product(marginals[UNIGRAMS])))", "def init_sims(self, replace=False):\n super(FastTextKeyedVectors, self).init_sims(replace)\n if getattr(self, 'syn0_ngrams_norm', None) is None or replace:\n logger.info(\"precomputing L2-norms of ngram weight vectors\")\n if replace:\n for i in range(self.syn0_ngrams.shape[0]):\n self.syn0_ngrams[i, :] /= sqrt((self.syn0_ngrams[i, :] ** 2).sum(-1))\n self.syn0_ngrams_norm = self.syn0_ngrams\n else:\n self.syn0_ngrams_norm = \\\n (self.syn0_ngrams / sqrt((self.syn0_ngrams ** 2).sum(-1))[..., newaxis]).astype(REAL)", "def to_meme(self):\n motif_id = self.id.replace(\" \", \"_\")\n m = \"MOTIF %s\\n\" % motif_id\n m += \"BL MOTIF %s width=0 seqs=0\\n\"% motif_id\n m += \"letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\\n\" % (len(self), np.sum(self.pfm[0]))\n m +=\"\\n\".join([\"\\t\".join([\"%s\" % x for x in row]) for row in self.pwm])\n return m", "def constraint_B_k_mu_mu(self):\n ms = self.ms\n width_contr = 0.0\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in B_k_mu_mu_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mB - mk - ms) * (mB + mk - ms) * (mB - mk + ms) * (mB + mk + ms)\n ) / (2.0 * mB)\n # Probability that S decays close to the primary vertex\n pr_vis = 1.0 - np.exp(-B_k_mu_mu_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # print(pr_vis)\n # print(widths_s[\"mu mu\"] / width_s)\n\n # Compute the contribution to the mu mu decay width\n width_contr = self.width_B_k_s() * widths_s[\"mu mu\"] / width_s * pr_vis\n\n return B_k_mu_mu_obs.width_bound - width_contr", "def mu_en(matID, keV, density=None):\n mat = goodID(matID)\n if density == None:\n density = defaultDensity(matID)\n if np.isscalar(keV):\n energies = np.array([keV], dtype=np.double)\n else:\n energies = np.array(keV, dtype=np.double)\n _mu = np.array([xl.CS_Energy_CP(mat, eng) * density * u['cm'] for eng in energies])\n if np.isscalar(keV):\n return np.asscalar(_mu)\n else:\n return _mu", "def _evaluate(self):\n coherence = gensim.models.coherencemodel.CoherenceModel(model=self.ldamodel,\n corpus=self.gensim_corpus,\n dictionary=self.ldamodel.id2word,\n coherence='u_mass')\n self.score = coherence.get_coherence()\n if self.verbose:\n print('LDA achieved a coherence (u_mass) of: ', self.score)", "def get_information_gain(self, word, documents):\n gain = self.get_entropy(documents)\n with_word, without_word = self.get_split_data(word, documents)\n gain -= self.get_entropy(with_word) * len(with_word) / len(documents)\n gain -= self.get_entropy(without_word) * len(without_word) / len(documents)\n return gain", "def normalize():\n\n for k in allkernels:\n beta = 0.0\n for key,value in grammar.items():\n if key.startswith(k):\n beta += value\n print()\n for key,value in grammar.items():\n if key.startswith(k):\n value = float(value)/beta\n grammar[key] = value\n for key,value in grammar.items():\n print()\n print(key.ljust(30),str(value))", "def norm(self):\n # TODO: implement\n return", "def __init__(self):\n self._pronunciations = nltk.corpus.cmudict.dict()\n \"\"\"\n API Documentation for CMU dictionary corpus\n http://www.nltk.org/api/nltk.corpus.reader.html#module-nltk.corpus.reader.cmudict\n \"\"\"", "def mel_spectrogram(self, y):\n assert torch.min(y.data) >= -1\n assert torch.max(y.data) <= 1\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n energy = torch.norm(magnitudes, dim=1)\n return mel_output, energy", "def update_entropy(self, save=True):\n\n #min_consensus = self.mturk_assignment.hit.hit_type \\\n #.experiment_settings.min_output_consensus\n min_consensus = 3\n\n # update substance label and entropy\n self.substance = None\n substances = self.substances.filter(invalid=False) \\\n .values_list('substance_id', flat=True)\n if substances:\n self.substance_entropy = compute_entropy(substances)\n hist = Counter(substances).most_common(2)\n substance_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.substance_id = substance_id\n self.quality_method = 'M'\n\n # update name label and entropy\n self.name = None\n names = self.names.filter(invalid=False) \\\n .values_list('name_id', flat=True)\n if names.exists():\n self.name_entropy = compute_entropy(names)\n hist = Counter(names).most_common(2)\n name_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.name_id = name_id\n self.quality_method = 'M'\n\n # update rectified normal\n self.rectified_normal = None\n if self.planar:\n for n in self.rectified_normals.all():\n if n.better_than(self.rectified_normal):\n self.rectified_normal = n\n if self.rectified_normal and not self.rectified_normal.correct:\n self.rectified_normal = None\n\n # update bsdf\n self.bsdf_wd = None\n for b in self.bsdfs_wd.all():\n if b.gloss_correct and b.color_correct and b.better_than(self.bsdf_wd):\n self.bsdf_wd = b\n\n if save:\n self.save()", "def get_LDAU(self):\n\n # let's simply use the default as a first step\n LDAU_dict, poscar_need_hack, potcar_need_hack = super(U_Strategy_MaterialsProject_V2, self).get_LDAU()\n\n Na_indices = self.structure.indices_from_symbol('Na')\n\n # hack MAGMOM\n list_oxidizable_site_indices = self.sort_TM_sites_by_Na_distance(Na_indices)\n\n MAGMOM = self.build_magmom(list_oxidizable_site_indices)\n LDAU_dict['MAGMOM'] = MAGMOM \n\n return LDAU_dict, poscar_need_hack, potcar_need_hack", "def attenuationLength(matID, keV, density=None):\n return 1.0 / mu(matID, keV, density)", "def inference(self):\n for m, doc in enumerate(self.docs):\n # Be careful followings are views\n # So self.hoge will be change, when changing variant\n zs_j = self.zs_m_j[m]\n zk_j = self.zk_m_j[m]\n n_m_zs = self.n_m_zs[m]\n n_m_zk = self.n_m_zk[m]\n for j, t in enumerate(doc):\n # discount for n-th word t with topic z\n zs = zs_j[j]\n zk = zk_j[j]\n n_m_zs[zs] -= 1\n n_m_zk[zs, zk] -= 1\n self.n_zk_t[zk, t] -= 1\n self.n_zk[zk] -= 1\n\n # sampling topic new_z for t\n \"\"\"\n n_s = n_m_zs + self.alphas # mth doc, S vec\n p_s = n_s / np.sum(n_s)\n n_k = n_m_zk + self.alphask # mth doc, SxK matrix\n p_k = n_k / n_s.reshape(len(n_s), 1)\n n_v = self.n_zk_t[:, t] + self.beta\n p_v = n_v / (self.n_zk + self.beta)\n\n p_zsk = p_s.reshape(len(p_s), 1) * p_k * p_v # SxK matrix\n \"\"\"\n\n p_zsk = (n_m_zk + self.alphask) * self.n_zk_t[:, t] \\\n / (np.sum(n_m_zs + self.alphas) * self.n_zk)\n\n p_zs = np.sum(p_zsk, axis=1) / np.sum(p_zsk)\n p_zk = np.sum(p_zsk, axis=0) / np.sum(p_zsk)\n\n new_zs = np.random.multinomial(1, p_zs).argmax()\n new_zk = np.random.multinomial(1, p_zk).argmax()\n\n # print(\"arg\", np.argmax(p_s), np.argmax(p_k, axis=1),\n # np.argmax(p_k, axis=0), np.argmax(p_zk))\n # print('probs', p_s, p_zs)\n # print('probk', p_k, p_zk)\n # print('old', zs, zk)\n # print('new', new_zs, new_zk)\n\n # set z the new topic and increment counters\n zs_j[j] = new_zs\n zk_j[j] = new_zk\n n_m_zs[new_zs] += 1\n n_m_zk[new_zs, new_zk] += 1\n self.n_zk_t[new_zk, t] += 1\n self.n_zk[new_zk] += 1", "def simple_lemmatise(subword_dict):\n return norm_word_id(subword_dict[\"word_id\"])", "def __init__(self, n, sents, corpus='', gamma=None, addone=True):\n self.n = n\n self.smoothingtechnique = 'Interpolated (Jelinek Mercer) Smoothing'\n self.gamma = gamma\n self.addone = addone\n self.counts = counts = defaultdict(int)\n self.gamma_flag = True\n self.corpus = corpus\n # way more efficient than use set unions\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = list(set(voc))\n\n if gamma is None:\n self.gamma_flag = False\n\n # if not gamma given\n if not self.gamma_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent for training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n\n for sent in train_sents:\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(train_sents)\n # variable only for tests\n self.tocounts = counts\n # search the gamma that gives lower perplexity\n gamma_candidates = [i*50 for i in range(1, 15)]\n # xs is a list with (gamma, perplexity)\n xs = []\n sents = train_sents\n for aux_gamma in gamma_candidates:\n self.gamma = aux_gamma\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_gamma, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.gamma = xs[0][0]\n with open('old-stuff/interpolated_' + str(n) + '_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Gamma: {}\\n'.format(self.gamma))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n\n else:\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n sents = list(map((lambda x: x + ['</s>']), sents))\n\n for sent in sents:\n # counts now holds all k-grams for 0 < k < n + 1\n for j in range(n+1):\n # move along the sent saving all its j-grams\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # added by hand\n counts[('</s>',)] = len(sents)", "def GetNormal(self):\n ...", "def norm(self):\n self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))\n self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))", "def norm(self):\n raise NotImplementedError", "def get_stats_SWE(self, kelly_list, svalex_list, svalex2_list, word_pictures={}, use_deprel=True,use_ngrams=False):\n s = self.sent\n root_ref = check_root(s)\n\n tokens = []\n\n verb_args = {} #{\"verb1\": {arg1_pos:\"PN\", arg1_deprel:\"SS\", etc.},..}\n self.stats[\"finite\"] = []\n self.stats[\"heads\"] = {}\n self.stats[\"keyword\"] = {}\n self.stats[\"has_subject\"] = 0\n self.stats[\"used_rel_lemmas\"] = [] \n\n # Collect the position ('ref') of the dependency head of each token\n # +1 compared to regular indexes, string type\n for tkn in s.nodes:\n if tkn.deprel == \"ROOT\":\n put_feature_value_list(self.stats[\"heads\"], tkn.ref, tkn) #\"ROOT\"\n else:\n put_feature_value_list(self.stats[\"heads\"], tkn.depheadid, tkn)\n\n for i,t in enumerate(s.nodes):\n\n mapped_token = map_Token_to_dict(t) #just a fix, see dset_proc_aux.py\n tokens.append(mapped_token)\n\n #get statistics from different liguistic levels\n self.stats = self.get_len_stats(t)\n self.stats = self.get_kelly_stats(t, kelly_list)\n if svalex_list:\n self.stats = self.get_svalex_stats(t, svalex_list)\n if svalex2_list:\n self.stats = self.get_svalex2_stats(t, svalex2_list)\n self.stats = self.get_semantic_stats(t)\n self.stats = self.get_morpho_synt_stats(s,t,i)\n if self.params:\n self.stats = self.get_sentmatch_stats(t,s.nodes,word_pictures)\n \n #add lemma unigrams\n lm_ngram = get_lemma_ngrams(s, t, i, \"uni\")\n if lm_ngram:\n put_feature_value_list(self.stats,\"lemma_unigr\", lm_ngram)\n\n if use_ngrams: #bi- and trigrams \n self.stats = get_ngrams(self.stats,s,t,i)\n\n if use_deprel:\n self.stats = self.deprel_stats(t,root_ref, verb_args)\n\n #fix for JSON serialization issue with the Token class\n self.stats[\"tokens\"] = s.nodes # retain a copy of Token instances\n s.nodes = tokens # change Token instances to 'dict'-s\n \n return self.stats", "def sample_cmal(model: 'BaseModel', data: Dict[str, torch.Tensor], n_samples: int,\n scaler: Dict[str, Union[pd.Series, xarray.Dataset]]) -> Dict[str, torch.Tensor]:\n setup = _SamplingSetup(model, data, \"cmal\")\n\n # force model into train mode if mc_dropout\n if setup.mc_dropout:\n model.train()\n\n # make predictions:\n pred = model(data)\n\n # sample for different frequencies:\n samples = {}\n for freq_suffix in setup.freq_suffixes:\n # get predict_last_n for the given the mode:\n frequency_last_n = setup._get_frequency_last_n(freq_suffix=freq_suffix)\n\n # CMAL has 4 parts: means (m/mu), scales (b), asymmetries (t/) and weights (p/pi):\n m = pred[f'mu{freq_suffix}']\n b = pred[f'b{freq_suffix}']\n t = pred[f'tau{freq_suffix}']\n p = pred[f'pi{freq_suffix}']\n\n sample_points = []\n for nth_target in range(setup.number_of_targets):\n # sampling presets:\n m_target = _subset_target(m[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n b_target = _subset_target(b[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n t_target = _subset_target(t[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n p_target = _subset_target(p[:, -frequency_last_n:, :], nth_target, setup.cfg.n_distributions)\n\n m_target = torch.repeat_interleave(m_target, n_samples, dim=0)\n b_target = torch.repeat_interleave(b_target, n_samples, dim=0)\n t_target = torch.repeat_interleave(t_target, n_samples, dim=0)\n p_target = torch.repeat_interleave(p_target, n_samples, dim=0)\n\n # sampling procedure:\n values = torch.zeros((setup.batch_size_data * n_samples, frequency_last_n)).to(setup.device)\n values *= torch.tensor(float('nan')) # set target sample_points to nan\n for nth_timestep in range(frequency_last_n):\n\n mask_nan = ~torch.isnan(p_target[:, nth_timestep, 0])\n if any(mask_nan): # skip if the complete mini-batch is invalid\n sub_choices = torch.multinomial(p_target[mask_nan, nth_timestep, :], num_samples=1)\n t_sub = t_target[mask_nan, nth_timestep, :].gather(1, sub_choices)\n m_sub = m_target[mask_nan, nth_timestep, :].gather(1, sub_choices)\n b_sub = b_target[mask_nan, nth_timestep, :].gather(1, sub_choices)\n\n ids = np.ones(b_sub.shape, dtype=bool)\n values_unbound = _sample_asymmetric_laplacians(ids, m_sub, b_sub, t_sub)\n values[mask_nan, nth_timestep] = _handle_negative_values(\n setup.cfg,\n values_unbound,\n sample_values=lambda ids: _sample_asymmetric_laplacians(ids, m_sub, b_sub, t_sub),\n scaler=scaler,\n nth_target=nth_target)\n\n # add the values to the sample_points:\n values = values.permute(1, 0).reshape(frequency_last_n, -1, n_samples).permute(1, 0, 2)\n values = values.detach().cpu()\n sample_points.append(values)\n\n # add sample_points to dictionary of samples:\n freq_key = f'y_hat{freq_suffix}'\n samples.update({freq_key: torch.stack(sample_points, 2)})\n return samples", "def similarity(query,id):\n similarity = 0.0\n for term in query:\n if term in dictionary:\n similarity += inverse_document_frequency(term)*imp(term,id)\n similarity = similarity / length[id]\n return similarity", "def makeD2MuMuMuMu(name,inputSel) :\n from Configurables import OfflineVertexFitter\n D2MuMuMuMu = CombineParticles(\"Combine\"+name)\n\n D2MuMuMuMu.DecayDescriptor = \"D0 -> mu+ mu- mu+ mu-\"\n D2MuMuMuMu.addTool( OfflineVertexFitter )\n D2MuMuMuMu.ParticleCombiners.update( { \"\" : \"OfflineVertexFitter\"} )\n\n D2MuMuMuMu.DaughtersCuts = { \"mu+\" : \"(TRCHI2DOF < 3.0 ) \"\\\n \" & (MIPCHI2DV(PRIMARY)> 4.)\"\\\n \" & (TRGHOSTPROB<0.3) \"\\\n \" & (P> 3000.*MeV)\"}\n\n D2MuMuMuMu.CombinationCut = \"(ADAMASS('D0')<300*MeV) \"\\\n \"& (AMAXDOCA('')<0.2*mm) \"\n\n \n D2MuMuMuMu.MotherCut = \"(VFASPF(VCHI2/VDOF)<12.) \"\\\n\t\t\t \"& (BPVVDZ > 0.) \" \\\n\t\t\t \"& (MIPCHI2DV(PRIMARY) < 25. )\"\n\n return Selection (name,\n Algorithm = D2MuMuMuMu,\n RequiredSelections = inputSel)", "def fit_normal(distogram):\n L = distogram.shape[1]\n params = torch.empty((3, L, L))\n \n for i in range(L):\n for j in range(L):\n m, s = calc_moments(distogram[:, i, j])\n scalar = torch.max(distogram[:, i, j]) / normal_distr(m, m, s)\n params[0, i, j], params[1, i, j], params[2, i, j] = m, s, scalar\n \n return params", "def adaSynAdd(self, data, labels):\n r = {}\n g = {}\n rnorm = {}\n rsum = 0\n self.fit(data, labels)\n self.densityclf = neighbors.KNeighborsClassifier(n_neighbors=self.k) \n self.densityclf.fit(data, labels)\n \n #Note that this is an alternative approach for extracting the minority examples\n #in the *same* order as described in smoteTransform.fit()\n for index in xrange(0, len(data)):\n if labels[index] == abs(1 - self.minorityLabel):\n continue\n \n nrpoints = self.densityclf.kneighbors(data[index,:], return_distance=False)\n nrpoints = numpy.setdiff1d(nrpoints, [index])\n if self.minorityLabel == 1:\n num_majority = self.k - numpy.count_nonzero(labels[nrpoints])\n else:\n num_majority = numpy.count_nonzero(data[nrpoints])\n \n r[index] = float(num_majority) / float(self.k)\n assert(r[index] >= 0)\n \n \n for k, v in r.viewitems(): \n #print(k,v)\n rsum += v\n for k, v in r.viewitems():\n rnorm[k] = r[k] / rsum\n \n rnormsum = 0\n for k, v in rnorm.viewitems(): rnormsum += v\n #print(rnormsum)\n \n #m = mj + ml, -> if mj = m - ml, mj - ml = m - 2(ml)\n #where len(data) = m and len(r) = mj\n \n #Number of synthetic samples to generate\n G = float(len(data) - len(r) - len(r)) * float(self.beta)\n index = 0\n numNewPoints = 0\n #Convert normalised density distribution values to the number of values\n #to generate for each minority sample.\n for k, v in rnorm.viewitems():\n g[index] = int(round(rnorm[k] * G))\n numNewPoints += g[index]\n index += 1\n \n #print(numNewPoints)\n #print(self.minorityData)\n #Use this information to the smoteTransform transfer function.\n #for k, v in g.viewitems(): print(k,v)\n #len(g)\n #len(data[labels == 1])\n assert len(g) == len(data[labels == 1]), \"length of g ({0}) is different from num_minority ({1})\".format(len(g), len(data[labels == 1]))\n return self.transform(numRepeatArray = g)", "def Langmuir_Knudsen_mdot(D, T_p, Psat, Re, mu_g, cp_g, lambda_g, P_g, R_g, Sc_g, R_v, Yinf):\n Pr_g = mu_g * cp_g / lambda_g # Gas Prandtl Number\n Sh = 2.0 + 0.552 * math.sqrt(Re) * Sc_g ** (1.0/3.0) \n Re_b = 0.0 #Blowing Reynolds number \n Re_b0 = Re_b \n Xseq = min(Psat / P_g, 1.0) #Molar mass fraction\n theta2 = R_v / R_g ;\n Yseq = min(Xseq /max(Xseq + (1.0 - Xseq) * theta2, 1e-30), 1.0) \n yMin = min(Yseq, Yinf) \n yMax = max(Yseq, Yinf) \n\n # Iterate to converge on Re_b\n # This part could be optimized\n Lk = computeLK(T_p,R_v,mu_g,Sc_g,P_g) \n Re_b0 = estimate_re_b(yMin, yMax, Yinf, Sh, Sc_g)\n \n max_solver_iterations = 100\n for i in range(max_solver_iterations): \n Ysneq = computeYsneq(Xseq,Lk,D,theta2,Pr_g,Re_b) \n #Bound Ysneq so that it lies between Yseq and Yinf\n Ysneq = max(yMin, min(yMax, Ysneq)) \n BMneq = (Ysneq - Yinf) / max(1.0 - Ysneq, 1e-30) \n Hm = math.log(max(1.0 + BMneq, 1e-40)) \n Re_b0 = Re_b \n Re_b = Hm * Sh / Sc_g \n factor = min(0.8, 0.5 * D / Lk) #Damping factor\n\n if i >= max_solver_iterations:\n print('Mdot Calculation failed to converge')\n\n if abs(Re_b - Re_b0) < 1.0e-6:\n break \n\n #Relax update to help convergence\n Re_b = factor * Re_b + (1.0 - factor) * Re_b0 \n \n #Chris debug\n beta = 0.5 * Pr_g * Re_b ;\n if i > -1: \n format_string = 'i= {0:<4d} Re_b= {1:<8.4f} Ysneq= {2:<8.4f} Hm= {3:<8.4f} ' \\\n 'BMneq= {4:<8.4f} Lk= {5:<8.4e} Lk/D= {6:<8.4e} factor= {7:<8.4e} beta= {8:<8.4f}'\n print(format_string.format(i, Re_b, computeYsneq(Xseq,Lk,D,theta2,Pr_g,Re_b), Hm, BMneq, Lk, Lk/D, factor, beta))\n \n # Back out mdot from blowing reunolds number\n mdot = -Re_b * D * mu_g * math.pi \n return mdot", "def test_norm(self):\n self.assertEqual(\"Maximaal 3 dagen. Meer dan 7 dagen is rood.\", self.__metric.norm())", "def get_vectors_and_labels_self():\n pos_t, pos_post_t = ngram.generate_n_gram_dict(ds.POS_DICT_SELF, 1)\n neg_t, neg_post_t = ngram.generate_n_gram_dict(ds.NEG_DICT_SELF, 1)\n neu_t, neu_post_t = ngram.generate_n_gram_dict(ds.NEU_DICT_SELF, 1)\n ds.POS_UNI_GRAM_SELF, is_success = commons.dict_update(ds.POS_UNI_GRAM, pos_t)\n ds.NEG_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEG_UNI_GRAM, neg_t)\n ds.NEU_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEU_UNI_GRAM, neu_t)\n ds.POS_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.POS_POST_UNI_GRAM, pos_post_t)\n ds.NEG_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEG_POST_UNI_GRAM, neg_post_t)\n ds.NEU_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEU_POST_UNI_GRAM, neu_post_t)\n temp_pos_dict = ds.POS_DICT.copy()\n temp_neg_dict = ds.NEG_DICT.copy()\n temp_neu_dict = ds.NEU_DICT.copy()\n temp_pos_dict_self = ds.POS_DICT_SELF.copy()\n temp_neg_dict_self = ds.NEG_DICT_SELF.copy()\n temp_neu_dict_self = ds.NEU_DICT_SELF.copy()\n temp_pos_dict_final = {}\n temp_neg_dict_final = {}\n temp_neu_dict_final = {}\n temp_pos_dict_final.update(temp_pos_dict)\n temp_neg_dict_final.update(temp_neg_dict)\n temp_neu_dict_final.update(temp_neu_dict)\n temp_pos_dict_final.update(temp_pos_dict_self)\n temp_neg_dict_final.update(temp_neg_dict_self)\n temp_neu_dict_final.update(temp_neu_dict_self)\n pos_vec, pos_lab = load_matrix_sub(temp_pos_dict_final, cons.LABEL_POSITIVE, True)\n neg_vec, neg_lab = load_matrix_sub(temp_neg_dict_final, cons.LABEL_NEGATIVE, True)\n neu_vec, neu_lab = load_matrix_sub(temp_neu_dict_final, cons.LABEL_NEUTRAL, True)\n ds.VECTORS_SELF = pos_vec + neg_vec + neu_vec\n ds.LABELS_SELF = pos_lab + neg_lab + neu_lab\n return is_success" ]
[ "0.5616423", "0.5585292", "0.5472835", "0.53419083", "0.5322406", "0.5318265", "0.5269511", "0.526485", "0.5195288", "0.5182691", "0.51384085", "0.5133426", "0.5114074", "0.5107224", "0.5082305", "0.50695926", "0.5055395", "0.50359356", "0.5018013", "0.5010474", "0.50092155", "0.5006338", "0.49920177", "0.49910104", "0.49533874", "0.4950662", "0.49401516", "0.49255636", "0.49231446", "0.49157473", "0.49120337", "0.49117056", "0.49082106", "0.4902918", "0.4895348", "0.48939475", "0.48923418", "0.48912162", "0.48900908", "0.48826316", "0.48704568", "0.48616034", "0.48555163", "0.48477298", "0.4846134", "0.4842745", "0.48368254", "0.48334137", "0.48316443", "0.48280126", "0.4825867", "0.48251846", "0.48196873", "0.48185742", "0.48179847", "0.48157337", "0.48126248", "0.48114735", "0.48092732", "0.4802723", "0.4792061", "0.47805476", "0.47803992", "0.4775752", "0.47754914", "0.47706816", "0.4761288", "0.47597596", "0.4758519", "0.47537667", "0.47532848", "0.47518557", "0.47449338", "0.47416618", "0.47411662", "0.47253934", "0.47239357", "0.47179523", "0.47163114", "0.47153845", "0.47080645", "0.47028852", "0.47012854", "0.4697897", "0.46944663", "0.46937087", "0.46927065", "0.46869317", "0.46789128", "0.4678248", "0.4675532", "0.46631286", "0.4662585", "0.4655852", "0.46532527", "0.4653127", "0.4648868", "0.46390653", "0.46373862", "0.46373525" ]
0.51792824
10
a RPA that adds a word to Anki
def add_word(word, option, scraped_info, t_sleep=2.75): subprocess.Popen('C:\\Program Files\\Anki\\anki.exe') # opening the anki program time.sleep(t_sleep+5) focus_screen() time.sleep(t_sleep) pyautogui.hotkey('a') # opening the add window - in the front area n_example = len(glob.glob(f'./words/{word}/meaning_{option}/example[0-9].txt')) # numbers of examples time.sleep(t_sleep) pyautogui.write(word + '\n') try: # try to write the inflections with open(f'./words/{word}/inflections.txt') as file: # add inflection (if exist) pyautogui.write('Inflections: ' + file.readline() + '\n\n') except FileNotFoundError: # inflections not found, pass pass if scraped_info['searched word']['mp3'] != None: # adding the word pronunciation pyautogui.hotkey('f3') # attach picture/audio/video time.sleep(t_sleep) pyautogui.hotkey('ctrl', 'l') # path insert mode pyautogui.write(os.getcwd() + f'\\words\\{word}') time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.hotkey('alt', 'n') time.sleep(t_sleep) pyautogui.write(f'{word}.mp3') time.sleep(t_sleep) pyautogui.press('enter') for example_number in range(n_example): with open(f'./words/{word}/meaning_{option}/example{example_number}.txt', 'r') as file: pyautogui.write(('\n' if example_number!=0 else '') + f'Example {example_number+1}:' + next(file) + '\n') # write the example pyautogui.hotkey('f3') # attach picture/audio/video time.sleep(t_sleep) pyautogui.hotkey('ctrl', 'l') # path insert mode pyautogui.write(os.getcwd() + f'\\words\\{word}\\meaning_{option}') time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.hotkey('alt', 'n') time.sleep(t_sleep) pyautogui.write(f'example{example_number}.mp3') time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.press('tab') # switch to back with open(f'./words/{word}/meaning_{option}/meaning{option}.txt') as file: pyautogui.write(next(file)) # insert the meaning time.sleep(t_sleep) pyautogui.press('tab') # switch to back with open(f'./words/{word}/meaning_{option}/tag.txt') as file: pyautogui.write(next(file) + ' [CLAC]') # insert the vim time.sleep(t_sleep) pyautogui.press('tab') # switch to back time.sleep(t_sleep) pyautogui.press('enter') time.sleep(t_sleep) pyautogui.press('esc')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addWord(self, word: str) -> None:\n tmp = self.root\n for i, letter in enumerate(word):\n if letter not in tmp.seq:\n tmp.seq[letter] = Node()\n \n tmp = tmp.seq[letter]\n \n tmp.value = word", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def add_word(word : str = typer.Argument(..., help=\"Adds a word into the trie\")):\n response_url = url + \"/add-word/\" + word\n response = requests.post(response_url)\n # typer.echo(response.status_code)\n typer.echo(response.json()[\"status\"])", "def addWord(self, word): \n\n # make trie for new word\n self.root.makeTrie(word)", "def addWord(self, word: str) -> None:\n self.trie.insert(word)", "def addWord(self, word):\n node = self\n for c in word:\n node = node[c]\n node.go += 1", "def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1", "def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1", "def add_word(self, word):\r\n word = word.strip().lower()\r\n if word in self.builtin_words:\r\n return\r\n if word not in self.word_count:\r\n self.word_count[word] = 1\r\n else:\r\n self.word_count[word] += 1", "def addWord(self, word: str) -> None:\n trie = self.trie\n for c in word:\n trie = trie[c]\n trie[None] = None", "def add_word(self, word):\n word = word.lower()\n if word in self.word_list:\n self.word_list[word] += 1\n else:\n self.word_list[word] = 1", "def addWord(self, word):\n trie = self.trie\n for c in word:\n if c in trie.children:\n trie = trie.children[c]\n else:\n new_trie_node = TrieNode()\n trie.children[c] = new_trie_node\n trie = new_trie_node\n\n trie.is_term = True", "def next_word(self):\n self.append = self.add_new_word", "def new_match(self, new_word): \n self.rhyming_words.append(new_word)", "def addWord(self, word):\n current_node = self\n for idx, letter in enumerate(word):\n if letter not in current_node.kids:\n current_node.kids[letter] = WordDictionary()\n current_node.kids[letter].val = letter\n current_node = current_node.kids[letter]\n if idx == len(word) - 1:\n current_node.isWord = True", "def add(self, word: str) -> None:\n self.d.add(word)", "def addKeyWord(self, kWord):\n #kWord.printKeyWord()\n self.sentence.append(kWord)", "def _add_word(self, word):\n if not word in self._word2idx.keys():\n self._word2idx[word] = self.vocab_size\n self.freqs[word] = 0\n self._idx2word[self.vocab_size] = word\n self.vocab_size += 1\n self.freqs[word] += 1", "def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word", "def addWord(self, word: str) -> None:\n trav = self.root\n \n for c in word:\n if c not in trav.childs:\n trav.childs[c] = self.Node()\n trav = trav.childs[c]\n \n trav.rec += 1", "def add_translation( documentName, word):\r\n transref = \":lex_\" + word.word +\" a ontolex:LexicalEntry;\\n\"\r\n formref = \" ontolex:canonicalForm :form_\" + word.word + \" .\\n \\n\"\r\n repref = \":form_\" + word.word +\" a ontolex:Form;\\n\"\r\n writtenRepRef = \" ontolex:writtenRep \\\"\"\r\n writtenRepRef += word.word + \"\\\"\" + word.writingLanguage + \" .\"\r\n translateEntry = transref + formref + repref + writtenRepRef\r\n with open(documentName, 'a') as f:\r\n f.write(translateEntry)\r\n f.write(\"\\n\\n\")\r\n return", "def add_word(self,word,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].add(str(index))\r\n else:\r\n self.word_dict[word] = {str(index)}\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, word,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_words \"\r\n +\"(word, notebook)\"\r\n +\" VALUES (?,?);\",value_tuple)\r\n value_tuple = (notebookname, word, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO word_to_indexes \"\r\n +\"(notebook, word, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def addWord(self, word: str) -> None:\n node = self.root\n \n for c in word:\n node = node.children[c]\n \n node.word = True", "def addWord(self, word: str) -> None:\n self.dict[len(word)].append(word)", "def add(trie, word, weight):\n\ttrie.insert(word,weight)#we can do this since in my insert function it helps me update weight\n\treturn trie", "def translate_leet(phrase):", "def addWord(self, word: str) -> None:\n trie = self.trie\n for c in word:\n if c not in trie:\n trie[c] = dict()\n trie = trie[c]\n trie['#'] = '#'", "def add_word(self, word, freq=None):\n pass", "def append_rephrase(self, qn):\n # TODO:\n pass", "def add(self, word: str) -> None:\n self.d.add(word)\n self.d.add(word.lower())\n self.save_user_dict()", "def addWord(self, word: str) -> None:\n current = self.root\n for letter in word:\n if letter not in current.children:\n current.children[letter] = TrieNode(letter)\n\n current = current.children[letter]\n\n current.is_word = True", "def addWord(self, word):\n ptr = self.root\n for k in word:\n if k in ptr.child:\n ptr = ptr.child[k]\n else:\n ptr.child[k] = TrieNode()\n ptr = ptr.child[k]\n ptr.isEnd = True", "def addWord(self, word, value):\n # check for spaces in word...\n # dunno if aggro should change on phrases\n if ' ' in word or type(value).__name__ != 'int':\n return None\n try:\n aggroFile = open(aggroMgr.__file, 'a')\n newword = word + ' ' + repr(value)\n aggroFile.write('\\n' + newword)\n aggroFile.close()\n ## add new word to _aggroWords\n self._addAggroWd(newword)\n except IOError:\n print aggroMgr.__file + ': cannot be opened'", "def add(self, word):\r\n if not word or word.strip() == '':\r\n return\r\n self.words.append(word)", "def addWord(self, word):\n cursor = self.root\n for char in word:\n if char not in cursor.children:\n cursor.children[char] = TrieNode(char)\n cursor = cursor.children[char]\n cursor.size += 1", "def add_phrase(self, n, num):\n raise NotImplementedError()", "def addWord(self, word):\n curr = self.trie\n\n for c in word:\n if c not in curr.children:\n curr.children[c] = TrieNode()\n\n curr = curr.children[c]\n\n curr.is_terminal = True", "def addWord(wmap, tok, lem):\n\n if (not tok in tt.setStopWords) and (not (tok.isupper() and tok.lower() in tt.setStopWords)): #Don't add stopwords - but be carful US vs us\n olem = lem\n lem = lem.lower() # makes many things simpler\n if tok in wmap: # tok is mapped already..., this is needed, sometimes the lemmatizing is inconsistent, eg. \"prototyping\" might go to \"prototyping\" or \"prototype\"\n if wmap[tok] != lem: #token exists in map, but is mapped differently\n clem = wmap[tok]\n if len(lem) < len(clem): ##new word is shorter (usually this means no plural form or so), eg. houses vs house\n if not clem in wmap or wmap[clem] == clem: #if not exists, add new mapping from old lemma of word to new lemma,eg. if mwords[Houses]=houses then we add mwords[houses]=house\n wmap[clem] = lem\n else:\n if not lem in wmap or wmap[lem] == lem: #existing lemma is shorter, we map to new lemma to the existing one\n wmap[lem] = wmap[tok]\n lem = wmap[tok]\n wmap[tok] = lem\n wmap[lem] = lem # a lemma maps to itself (maybe difference in capitalization)\n if olem != lem: wmap[olem] = lem # a lemma maps to itself\n if len(tok) > len(lem) and not tok.islower(): #if have Responsibilities -> responsibility, than add responsibilities -> responsibility, the \">=\" might be changed to \">\" without much loss\n addWord(wmap,tok.lower(),lem)", "def addWord(self, word: 'str') -> 'None':\n p=self.dictword\n for s in word:\n if s not in p:\n p[s]={}\n p=p[s]\n else:\n p=p[s]\n p['#']=None", "def addWord(self, word: str) -> None:\n curr = self.trie\n for char in word:\n if char not in curr:\n curr[char] = {}\n curr = curr[char]\n curr['$'] = {}", "def addWord(self, word):\n if word:\n self.word_dict[len(word)].append(word)", "def add_word_to_dictionary(self, word: str):\n self.trie.add_word(word)", "def addWord(self, word):\n node=self.root\n for c in word:\n if c not in node.children: node.children[c]=TrieNode()\n node=node.children[c]\n node.isWord=True", "def addWord(self, word):\n node = self.root\n for ch in word:\n node = node.children[ch]\n node.isWord = True", "def addWord(self, word: str) -> None:\n root = self.root\n for w in word :\n if not w in root.children :\n root.children[w] = TrieNode()\n root = root.children[w]\n \n root.isEnd = True", "def addWord(self, word: str) -> None:\n\n temp = self.start\n for i in range(len(word)):\n if temp.children[ord(word[i])-ord('a')] is None:\n temp.add_children()\n\n temp = temp.children[ord(word[i])-ord('a')]\n if i+1 == len(word):\n temp.end = True", "def _do_word(self, word, lemma=None, type=None):\n # Improve 3rd person singular \"'s\" lemma to \"be\", e.g., as in \"he's fine\".\n if lemma == \"'s\" and type in (\"VB\", \"VBZ\"):\n lemma = \"be\"\n self.words.append(Word(self, word, lemma, type, index=len(self.words)))", "def addWord(self, word: str) -> None:\n # Find split node\n word += '0'\n curr = self.trie\n i = 0\n while i < len(word) and word[i] in curr:\n curr = curr[word[i]]\n i += 1\n \n # Add the rest of the word\n while i < len(word):\n curr[word[i]] = {}\n curr = curr[word[i]]\n i += 1", "def _add(self, irc, msg, args, words):\n \"\"\"Compare:\n user : bot: punny add fin even\n user : bot: punny squid What is this, I don't even.\n bot : user: What is this, I don't fin.\n To:\n user : bot: punny add fin even efin\n user : bot: punny squid What is this, I don't even.\n bot : user: What is this, I don't efin.\n \"\"\"\n if words is None:\n irc.reply(see_help)\n return\n # TODO: Use 'spiced up' success messages\n try:\n # TODO: Check if it exists (probably just\n # implement in local.punny module?)\n self.pungen.add_pun(*words.split())\n self._save()\n irc.reply(conf.supybot.replies.success)\n except:\n # TODO: Log this\n irc.reply(conf.supybot.replies.error)", "def add_input(input_string, trie):\n trie.insert(input_string) # add name to Trie", "def addWord(self, word: str) -> None:\n cur = self.root\n for c in word:\n cur = cur.children[c]\n cur.end = True", "def add_word_to_trigram(new_word):\n # Trigrams require 2 previous words\n # If we don't have those yet, then set them\n if len(prev_words) < 2:\n prev_words.append(new_word)\n return\n\n # If it exists, add the word to the list\n # If it doesn't exist, create it\n word_tuple = (prev_words[0], prev_words[1])\n if word_tuple in trigrams:\n trigrams[word_tuple].append(new_word)\n else:\n trigrams[word_tuple] = [new_word]\n\n # Increment the prev words\n prev_words.pop(0)\n prev_words.append(new_word)", "def add(self, word, page, val):\n currnode = self.root\n\n for letter in word:\n if letter not in currnode.children:\n currnode.children[letter] = TrieNode()\n currnode = currnode.children[letter]\n\n if currnode is not self.root:\n currnode.pages.add(page, val)", "def __add__(self, other):\n space = '' if other.type == 'punctuation' else ' '\n new = Word('0.0', '0.0', self.word + space + other.word, self.type)\n new.start = self.start\n new.end = other.end\n return new", "def addWord(self, word: str) -> None:\n if str(len(word)) in self.elements:\n self.elements[str(len(word))].add(word)\n else:\n self.elements[str(len(word))] = {word}", "def _add_new_word(self, word):\n if word not in self.word_to_id:\n word_id = len(self.word_to_id)\n self.word_to_id[word] = word_id\n self.id_to_word[word_id] = word", "def addWord(self, word):\n if word[0] not in self.child:\n self.child[word[0]] = WordDictionary()\n if len(word) > 1:\n self.child[word[0]].addWord(word[1:])\n elif len(word) == 1:\n self.child[word[0]].isend = True", "def addWord(self, word):\n lenw = len(word)\n if not lenw in self.bag:\n self.bag[lenw] = []\n self.bag[lenw].append(word)", "def _add_word(self, id_: int, word: str, key: str) -> None:\n node = self.root\n for c in word:\n self._alphabet.add(c)\n node = node[c]\n # we can't have two different words with same tree-path\n # but they can have multiple ids, so let's keep them in a list\n items = node.setdefault(key, list())\n if id_ not in items:\n items.append(id_)", "def addWord(self, word):\n cur = self.root\n for c in word:\n if c not in cur.children:\n cur.children[c] = Trie()\n cur = cur.children[c]\n cur.isWord = True", "def __iadd__(self, term):\n self.add(term)\n return self", "def addWord(self, word):\n cur = self.root\n\n for c in word:\n if c not in cur.children:\n cur.children[c] = TrieNode()\n \n cur = cur.children[c]\n\n cur.isWord = True", "def add_text(self, text):\n words = self.clean_text_util.clean_text(text)\n # remove duplicate word\n words = set(words) \n\n # for each word:\n # - if the word already exist in the dictionary we update the occurrence\n # - otherwise we add a new word with his index to the dictionary \n for word in words:\n word_info_pickle = self.dictionary_db.get(word)\n if word_info_pickle:\n word_info = pickle.loads(word_info_pickle)\n word_info.number += 1\n self.dictionary_db.replace(word, pickle.dumps(word_info))\n\n else:\n new_word_info = WordInfo(word, self.word_index)\n self.dictionary_db.add(word, pickle.dumps(new_word_info))\n self.word_index += 1\n \n text_nb = int(self.classifier_state_db.get(\"text_nb\"))\n text_nb += 1\n self.classifier_state_db.replace(\"text_nb\", str(text_nb))", "def addWord(self, word):\n node = self.root\n \n for char in word:\n if char not in node.children:\n node.children[char] = TrieNode()\n node = node.children[char]\n \n node.is_word = True", "def pig_latin(word):\n \n first_letter = word[0]\n rest_of_word = word[1 : ]\n \n # Student should complete function on the next lines.\n \n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u':\n return word + \"way\"\n else:\n return rest_of_word + first_letter + \"ay\"", "def addWord(self, word):\n selected_node = self.root\n for i in word:\n if selected_node.next.get(i) is None:\n new_node = WordDictionary.Node()\n selected_node.next[i] = new_node\n selected_node = new_node\n else:\n selected_node = selected_node.next[i]\n if not selected_node.isFinish:\n selected_node.isFinish = True\n self.size += 1", "def add_bigrams(text):\n\n\tbigram = Phrases(text, min_count=20) # min freq of 20\n\tbi_phraser = Phraser(bigram)\n\tfor idx in range(len(text)):\n\t\tfor token in bi_phraser[text[idx]]:\n\t\t\tif '_' in token:\n\t\t\t\ttext[idx].append(token)\n\n\treturn text", "def pig_latin(word):\n first_letter = word[0]\n rest_of_word = word[1 : ]\n #print(\"First letter is\", first_letter)\n #print(\"rest_of_word is\", rest_of_word)\n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u': \n pig_latin_word = word + 'way'\n else: \n pig_latin_word = rest_of_word + first_letter + 'ay'\n return pig_latin_word", "def translate(self):\n\t\tvowels = \"aeiou\"\n\n\t\tif (self.word[0] not in vowels) and (self.word[1] in vowels):\n\t\t\tnew_word = self.word[1:] + self.word[0] + \"ay\"\n\t\telif self.word[0] in vowels:\n\t\t\tnew_word = self.word + \"way\"\n\t\telse:\n\t\t\tnew_word = self.word[2:] + self.word[:2] + \"ay\"\n\n\t\tprint(new_word)", "def add_word(self):\n word = self.word # easier to call word now\n\n wordlist_path = self.get_wordlist_path()\n with open(wordlist_path) as f:\n data = json.load(f)\n\n if exists_already(data,word):\n exit()\n\n next_index = int(data[\"cur_index\"]) + 1 # new index\n data[\"words\"][next_index] = word # update wordlist\n data[\"words\"] = dict(sorted(data[\"words\"].items(), key=lambda item: item[1])) # alphabetisize\n data[\"cur_index\"] = next_index # update index\n\n with open(wordlist_path, 'w') as f:\n json.dump(data, f, indent = 4)\n\n print(f\"[{word}] added to [{self.pos}]. This is the [{next_index}] indexed word added.\")", "def add(vector, ngram):\n if ngram in vector:\n vector[ngram] += 1\n else:\n vector[ngram] = 1", "def insert(self, word: str) -> None:\n currnode=self.root\n for ch in word:\n #dic.get(parameter, default value)\n node=currnode.children.get(ch,TrieNode())\n currnode.children[ch]=node\n currnode=node\n \n currnode.iswordend=True", "def add(self, word: str) -> None:\n self.words.add(word)\n self.added_words.add(word)", "def add(self, word):\n current_node = self.root\n\n for char in word:\n if char not in current_node.children: # checks if that char does not already exists in the children Trie\n current_node.children[char] = TrieNode() # if it doesnt add it to the children dict\n\n current_node = current_node.children[char] # else loop through and go in the node\n\n current_node.is_word = True # complete node by making is_word TRUE", "def hed(self, part, text):\n n = Node(node_type=Node.APPENDIX, label=[part, self.appendix_letter],\n title=text)\n self.m_stack.push_last((0, n))\n self.paragraph_counter = 0\n self.depth = 0", "def update_word(self, word):\n self.word = word", "def lookup(text):\n ret = basic(text)\n ret.update(basic(text, \"gwas\"))\n return ret", "def addWord(self, word):\n p = self.root\n for c in word:\n ind = ord(c) - ord('a')\n if (p.children[ind] == None):\n p.children[ind] = Node(c)\n p = p.children[ind]\n \n p.value = len(word)", "def addWord(self, word):\n node = self.root\n for i in range(len(word)):\n if word[i] in node.children:\n node = node.children[word[i]]\n else:\n break\n\n for j in range(i, len(word)):\n node.children[word[j]] = Node()\n node = node.children[word[j]]\n node.val = word", "def _step1a(self, word):\n # this NLTK-only rule extends the original algorithm, so\n # that 'flies'->'fli' but 'dies'->'die' etc\n if self.mode == self.NLTK_EXTENSIONS:\n if word.endswith(\"ies\") and len(word) == 4:\n return self._replace_suffix(word, \"ies\", \"ie\")\n\n return self._apply_rule_list(\n word,\n [\n (\"sses\", \"ss\", None), # SSES -> SS\n (\"ies\", \"i\", None), # IES -> I\n (\"ss\", \"ss\", None), # SS -> SS\n (\"s\", \"\", None), # S ->\n ],\n )", "async def wordfilter_add(self, ctx, *, phrase):\n phrase = phrase.lower()\n await self.bot.redis.rpush('wordfilter', phrase)\n self.words.append(phrase)\n await ctx.send(f'Added `{phrase}` to the filtered words')", "def insert(self, word):\n if not word:\n return\n if word[0] in self.trie:\n cur = self.trie[word[0]]\n else:\n cur = TrieNode(word[0])\n for char in word[1:]:\n if char not in cur.nexts:\n cur.nexts[char] = TrieNode(char)\n cur = cur.nexts[char]\n cur.isTerm = True", "def test_insert_anagrams(self):\n self.dictionary.insertWord(\"able\")\n self.dictionary.insertWord(\"elba\")\n words = self.dictionary.getAnagrams(\"able\")\n self.assertEqual(words, [\"able\", \"elba\"])", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def pig_latin(phrase):\n\n\n # loop over each word in the phrase\n # in word[0] starts with aeiou\n # add yay to the end of that word\n # if word[0] starts with non aeiou\n # move word[0] to the end and add ay\n\n result = []\n\n for word in phrase.split():\n\n if word[0] in 'aeiou':\n\n result.append(word + 'yay')\n\n else:\n\n result.append(word[1:] + word[0] + 'ay')\n\n return \" \".join(result)", "def add_vocab_word(self, word):\n # If it's a special token, it'll be separatelly processed during saving file. Skip here.\n if word in special_tokens:\n return\n # Check each character in the word. We don't want none-character (control code) in the vocaburary.\n for char in word:\n if cu.is_none_char(char):\n return\n # If it's a new word, store it.\n if (not word in self.words_ext) and (not word in self.words_new):\n self.words_new.append(word)", "def add_phrase(self, phrase: Phrase) -> None:\n self.phrase_string_map[phrase.phrase_string] = phrase\n self.phrase_type[phrase.phrase_string].add(\"phrase\")\n self.phrase_index[phrase.phrase_string] = phrase\n self.phrase_length_index[len(phrase.phrase_string)].add(phrase.phrase_string)\n self._index_phrase_words(phrase)\n self._index_phrase_tokens(phrase)", "def test_add_word_in_file(self):\n pass", "def add_word(self, word_one, word_two):\n \n # if word_one is in the corpus\n if word_one in self.corpus.keys():\n # if the word_two is already in word_one's corpus\n if word_two in self.corpus[word_one].keys():\n # increment the count by 1\n self.corpus[word_one][word_two] += 1\n # if word_two is not in word_one's corpus\n else:\n # add word_two to the corpus with an initial value of 1\n self.corpus[word_one][word_two] = 1\n # if word_one is not already in the corpus\n else:\n # add it and initialize its dictionary with word_two\n self.corpus[word_one] = {word_two : 1}", "def update(self,haiku, typenum):\n self.occurrences += 1\n for i in range(2):\n for x in (haiku.triple[i]).wordarray:\n if (self.wordtype == dictionary.wordtype(x) and \n dictionary.word_filter(x) != self.word):\n self.update_adj_dict(x, i==typenum)", "def _add_text(self, elem):\n words = WORD_SEPARATORS.split(elem.string.lower())\n for word in words:\n word = word.strip()\n if word in self._ignored_words:\n continue\n self._curr_words.append((self.word_id(word), self._font_size))\n\n \"\"\" Update inverted index \"\"\"\n if self.word_id(word) in self._inverted_index:\n self._inverted_index[self.word_id(word)].add(self._curr_doc_id)\n self._resolved_inverted_index[word].add(self._curr_url)\n\n else:\n self._inverted_index[self.word_id(word)] = {self._curr_doc_id}\n self._resolved_inverted_index[word] = {self._curr_url}", "def add_word(self, word, data=None):\n self.__word = word\n self.__data = data", "def addUnigrams(self, rating, writtenReview):\n sentence = writtenReview.split()\n for word in sentence:\n if word not in self.dictionary:\n self.addItem(word)\n self.totalTerms[rating] += 1\n self.dictionary[word].incrementFrequency(rating)", "def add_word(words, summary):\n for word in words:\n if word not in summary:\n summary[word] = 1 # If not a word exists, add the word and set value as 1\n else:\n summary[word] += 1 # If a word exists, just increase value by 1", "def solve_example(parser: ArgumentParser) -> None:\n parser.add_argument(\"--word\", type=str, help=\"Word representing the one relator\", required=True)", "def add_word(self, request):\n if Word.query(Word.word == request.word).get():\n raise endpoints.ConflictException('That word is in the list!')\n else:\n word_list = []\n temp = request.word.upper()\n for i in temp:\n if i == \" \" or i < 'A' or i > 'Z':\n raise endpoints.BadRequestException(\n 'Please Enter One Word!')\n else:\n word_list.append(i)\n w = Word(word=request.word, word_list=word_list)\n w.put()\n return StringMessage(message='Added %s to the list!' % request.word)", "def addWord(self, word):\n cur = self.__root\n for c in word:\n if c not in cur.next:\n cur.next[c] = Node()\n cur = cur.next[c]\n\n if not cur.isWord:\n cur.isWord = True", "def question_new_translate():", "def create_link(self, word, meaning):\n print(str(self.unique_id) + \" learned \" +\n str(word) + \" for \" + str(meaning))\n self.meaning2word[meaning] = word\n self.word2meaning[word] = meaning\n self.wordsuccess[word] = []\n\n if meaning not in self.model.vocabulary:\n self.model.vocabulary[meaning] = {}\n\n # If word not in vocabulary, add it\n if word not in self.model.vocabulary[meaning]:\n self.model.vocabulary[meaning][word] = [self.unique_id]\n # Else append this agent to its users\n else:\n self.model.vocabulary[meaning][word].append(self.unique_id)", "def _inject(word, phrase):\n words = phrase.split()\n words.append(word)\n random.shuffle(words)\n return \" \".join(words)" ]
[ "0.6528331", "0.64054376", "0.636892", "0.62993944", "0.6260526", "0.62493396", "0.61003536", "0.61003536", "0.6083645", "0.6070733", "0.6045671", "0.6045019", "0.60449773", "0.603758", "0.6037229", "0.6024666", "0.6018381", "0.5992664", "0.59754694", "0.59717524", "0.59580487", "0.59378326", "0.59328115", "0.59168684", "0.59134626", "0.59103835", "0.59068125", "0.5906477", "0.5902757", "0.5896169", "0.58913064", "0.5889117", "0.5886325", "0.5885251", "0.58713835", "0.5867696", "0.5865324", "0.58571815", "0.5848978", "0.5841498", "0.5825309", "0.58207893", "0.5807311", "0.5778377", "0.5771391", "0.57699215", "0.5761638", "0.57573277", "0.5755751", "0.5739526", "0.5731378", "0.5730173", "0.5726218", "0.57231605", "0.5723131", "0.5720508", "0.5711808", "0.5708834", "0.5687701", "0.567357", "0.5670014", "0.56686765", "0.5660108", "0.5659708", "0.5653853", "0.5651735", "0.56492573", "0.56491905", "0.56453925", "0.5631706", "0.5624745", "0.561759", "0.5607086", "0.5590696", "0.5587426", "0.5582763", "0.5562098", "0.55536413", "0.5553576", "0.5550141", "0.5508205", "0.5507374", "0.5506738", "0.54958826", "0.54931086", "0.5484019", "0.5481506", "0.54772013", "0.54699713", "0.5464919", "0.54633003", "0.546316", "0.54604757", "0.5452406", "0.5437484", "0.54335594", "0.5432478", "0.5430584", "0.542919", "0.5420875" ]
0.5661711
62
Theano implementation of the Fast Gradient Sign method.
def fgm(x, predictions, y=None, eps=0.3, ord=np.inf, clip_min=None, clip_max=None): assert ord == np.inf, "Theano implementation not available for this norm." if y is None: # Using model predictions as ground truth to avoid label leaking y = T.eq(predictions, T.max(predictions, axis=1, keepdims=True)) y = T.cast(y, utils_th.floatX) y = y / T.sum(y, 1, keepdims=True) # Compute loss loss = utils_th.model_loss(y, predictions, mean=True) # Define gradient of loss wrt input grad = T.grad(loss, x) # Take sign of gradient signed_grad = T.sgn(grad) # Multiply by constant epsilon scaled_signed_grad = eps * signed_grad # Add perturbation to original example to obtain adversarial example adv_x = theano.gradient.disconnected_grad(x + scaled_signed_grad) # If clipping is needed, reset all values outside of [clip_min, clip_max] if (clip_min is not None) and (clip_max is not None): adv_x = T.clip(adv_x, clip_min, clip_max) return adv_x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def grad_sigmoid(self):\n return self.sigmoid(self.x)*(self.sigmoid(-self.x))\n raise NotImplementedError(\"Sigmoid gradient not implemented\")", "def sigmoid_grad(self, X):\n var=self.sigmoid(X)\n return var*(1-var)", "def sigmoid_grad(z):\n return Sigmoid(z) * (1 - Sigmoid(z))", "def sigmoid_grad(x):\n sig = sigmoid(x)\n return np.multiply(sig, 1 - sig)", "def grad_sigmoid(self):\r\n return self.sigmoid(self.x) * (1 - self.sigmoid(self.x))", "def sign(tensor):\n raise NotImplementedError", "def grad_sigmoid(self):\n grad = self.sigmoid(self.x) * (1 - self.sigmoid(self.x))\n return grad", "def grad_tanh(self):\n return (1-np.tanh(self.x)*np.tanh(self.x))\n raise NotImplementedError(\"tanh gradient not implemented\")", "def ThetaFunc(self, x):\n return 0.5 * (np.sign(x) + 1)", "def sigmoid_gradient(z):\n #derivative of sigmoid\n return z * (1 - z)", "def grad_tanh(self):\n grad = 1 - self.tanh(self.x) ** 2\n return grad", "def sigmoid(values, gain, shift):\n import numpy as np\n\n tiny = 0.000000001\n\n # Make sure argument is a numpy array\n if type(values) != np.ndarray:\n values = np.array(values)\n\n return 1.0 / (1.0 + np.exp(-gain * (values - shift)) + tiny)", "def sigmoid(z):\r\n \r\n return vSigmoid(z);", "def grad_tanh(self):\r\n return 1 - np.square(self.tanh(self.x))", "def sigmoid(t):\n sig=np.exp(t)/(1+np.exp(t))\n return sig", "def forward_hidden_activation(self, X):\n return np.tanh(X)", "def tanh_grad(self, X):\n return 1-self.tanh(X)**2", "def test_gt_grad():\r\n floatX = config.floatX\r\n T = theano.tensor\r\n\r\n input_ = T.vector(dtype=floatX)\r\n random_values = numpy.random.RandomState(1234).uniform(\r\n low=-1, high=1, size=(2, 2))\r\n W_values = numpy.asarray(random_values, dtype=floatX)\r\n W = theano.shared(value=W_values, name='weights')\r\n correct_score = T.dot(input_, W)\r\n wrong_input = T.vector(dtype=floatX)\r\n wrong_score = theano.clone(correct_score, {input_: wrong_input})\r\n # Hinge loss\r\n\r\n scores = T.ones_like(correct_score) - correct_score + wrong_score\r\n cost = (scores * (scores > 0)).sum()\r\n T.grad(cost, input_)", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def convert_softsign(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n out = x / (_op.const(1.0, dtype) + _op.abs(x))\n g.add_node(op.output(\"Out\")[0], out)", "def _tanh_sigmoid(tensor):\n return 2 * sigmoid(2 * tensor) - 1", "def sigmoid(z):\n\n ### START CODE HERE ### (≈ 1 line of code)\n s = 1 / (1 + np.exp(-z))\n ### END CODE HERE ###\n\n return s", "def gradient(cls, x):\n return 1 - TanH.apply(x) ** 2", "def activation(x):\n # return np.tanh(x)\n return np.maximum(0,x)", "def sigmoid_back_propagate(da, cache):\n z = cache\n s = 1 / (1 + np.exp(-z))\n dz = da * s * (1 - s)\n assert (dz.shape == z.shape)\n assert (da.shape == z.shape)\n return dz", "def sigmoid_backward(dout, cache):\n dx, x = None, cache\n\n f = lambda x: 1/(1 + np.exp(-x)) # activation function (sigmoid)\n\n fun = f(x)\n\n dx = np.multiply(fun, (1-fun))\n dx = np.multiply(dx,dout)\n\n return dx", "def steepest(Xf, yf, gamma=0.001, iterations=1000): # DONT WORK, be happy\n K = len(Xf[0,:])\n beta = np.random.randn(K, 1)\n for i in range(iterations):\n t = Xf@beta\n sigmoid = expit(t)\n #print(sigmoid)\n #siggy = 1./(1 + np.exp(t))\n #loss = yf - sigmoid\n #print(\"iteration %g, cost: %f\" % (i, loss))\n grad = 2/K*Xf.T@(sigmoid - yf)\n beta = beta - gamma*grad\n #cost = -np.sum(np.transpose(yf)@np.log(1 + siggy) - np.transpose(1-yf)@np.log(siggy))\n #print(cost)\n #print(i)\n #break\n return beta", "def sign_st(x):\n from tframe import hub as th\n def sign(v):\n return (tf.cast(tf.math.greater_equal(v, 0), th.dtype) - 0.5) * 2\n def grad(dy):\n return dy * tf.cast(tf.logical_and(\n tf.greater_equal(x, -1.0), tf.less_equal(x, 1.0)), dtype=th.dtype)\n return sign(x), grad", "def sigmoid_backward(dA, Z):\r\n dsig = sigmoid(Z) * (1 - sigmoid(Z))\r\n return dA * dsig", "def sigmoid(t):\n return np.exp(t)/(1+np.exp(t))", "def sigmoid(t):\n return 1 / (1 + np.exp(-t))", "def sigmoid(z):\n \n return 1 / (1 + np.exp(-z))#your code here", "def sigmoid_backward(a, z, g_z):\r\n exp_a = np.multiply(z, 1 - z)\r\n g_a = g_z * exp_a\r\n return g_a", "def sigmoid_backward(dA, cache):\n Z = cache\n s = 1.0 / (1.0 + np.exp(-Z))\n dZ = dA * s * (1 - s)\n return dZ", "def sigmoid_backward(dA, cache):\n\n Z = cache\n s,_ = sigmoid(Z)\n dZ = dA * s * (1-s)\n return dZ", "def sig(self, batch):\n ans = 0\n for t in [-1,-2,-3]:\n z = batch[0][t]\n ans += (z[:-1]*z[1:]).sum() \n return ans", "def sigmoid(z): \n return 1/(1 + np.e**(-z))", "def sigmoid(t):\n\n return 1.0 / (1.0 + np.exp(-t))", "def ff_train(\n self, input_tensor: torch.Tensor, signs: torch.Tensor, theta: float\n ):\n # upgrade optimizer for positive goodness\n y = self(input_tensor.detach())\n y_pos = y[torch.where(signs == 1)]\n y_neg = y[torch.where(signs == -1)]\n # y_pos = self(input_tensor.detach()[torch.where(signs == 1)])\n loss_pos, cumulated_logits_pos = self.loss_fn(y_pos, theta, sign=1)\n # self.optimizer.zero_grad()\n # loss_pos.backward()\n # print(loss_pos.item())\n # self.optimizer.step()\n # y_neg = self(input_tensor.detach()[torch.where(signs == -1)])\n loss_neg, cumulated_logits_neg = self.loss_fn(y_neg, theta, sign=-1)\n self.optimizer.zero_grad()\n loss = loss_pos + loss_neg\n loss.backward()\n self.optimizer.step()\n separation = [cumulated_logits_pos, cumulated_logits_neg]\n y = torch.zeros(\n input_tensor.shape[0], *y_pos.shape[1:], device=input_tensor.device\n )\n y[torch.where(signs == 1)] = y_pos\n y[torch.where(signs == -1)] = y_neg\n return y.detach(), separation", "def sigmoid_backward(dA, cache):\n\n Z = cache\n\n s = 1 / (1 + np.exp(-Z))\n dZ = dA * s * (1 - s)\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def sigmoid_forward(x):\n\n out = 1/(1+np.exp(-x))\n\n cache = x\n return out, cache", "def sigmoid_forward(x):\n out = None\n ########################################################################\n # TODO: Implement the Sigmoid forward pass. #\n ########################################################################\n\n out = 1 / (1 + np.exp(-x))\n\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n cache = out\n return out, cache", "def sigmoid(z):\n return 1 / (1 + e ** -z)", "def test_softplus_activation(self):\n self.assertEqual(\n [0.4740769841801067, 0.9740769841801067], list(af.SoftPlus().output(np.array([-0.5, 0.5]))))\n self.assertEqual([0.3775406687981454, 0.6224593312018546], list(\n af.SoftPlus().derivative(np.array([-0.5, 0.5]))))", "def sigmoid_derivative(x):\r\n\r\n ### START CODE HERE ### (≈ 2 lines of code)\r\n s = 1.0 /(1 + 1/np.exp(x))\r\n ds = s*(1-s)\r\n ### END CODE HERE ###\r\n\r\n return ds", "def sigmoidal(min_iterations, i, start = start_temp, final = final_temp ):\n\n\t# to prevent a math overflow a scale (x^(1/ (i - min_iterations))) is used\n\ttemperature = final + ((start - final)**( 1/ (i - min_iterations))) / \\\n\t\t\t\t\t(1 **(i - min_iterations)) + math.exp(0.3 * ((i - min_iterations / 2) /(i - min_iterations)))\n\n\treturn temperature", "def sigmoid(t):\n t[t >= 20] = 20\n t[t <= -20] = -20\n return np.exp(t)/(np.exp(t)+1)", "def sigmoid_derivative(x):\n return x * (1-x)", "def sigmoid(t):\n empty = np.empty(t.shape)\n empty[t > 0] = 1/(1+ np.exp(-t[t>0]))\n empty[t <= 0 ] = np.exp(t[t<=0])/(1+np.exp(t[t<=0]))\n return empty", "def forward(self, state):\n x = self.fc1(state)\n action = self.tanh(x)\n\n action = action.cpu().data.numpy() * self.action_lim\n action = torch.FloatTensor(action)\n\n return action", "def tanh_forward(self, x):\n \n #############################################################################\n # TODO: Implement the tanh forward pass. #\n #############################################################################\n out = np.tanh(x)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = out\n return out, cache", "def forward(self, X):\n # Propaga a entrada pela rede\n self.z = np.dot(X, self.W1) # Produto escalar da entrada com a primeira matrix de pesos\n self.z2 = self.sigmoid(self.z) # Função de ativação\n self.z3 = np.dot(self.z2, self.W2) # Produto escalar da hidden layer com a segunda matrix de pesos\n return self.sigmoid(self.z3) # Função de ativação na saída ", "def __sigmoid(z, derivative=False):\n if derivative:\n return z * (1 - z)\n else:\n return 1 / (1 + np.exp(-z))", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def sigmoid(t):\n\tprecLim = 10\n\t\n\tt[t<=-precLim] = 0\n\tt[t>-precLim] = 1/ (1 + np.exp(-t))\n\n\treturn t", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def sigmoid_backward(dA, Z):\n\n s = 1 / (1 + np.exp(-Z))\n dZ = dA * s * (1 - s)\n\n assert (dZ.shape == Z.shape)\n\n return dZ", "def gradFun(self, S, x):", "def sigmoid(z):\n g = (1 + np.exp(-z))**-1\n return g", "def sigmoid(z):\n\n s = 1/(1+ np.exp(-z));\n return s;", "def sigmoid(X,W,b):\n preActivation = np.dot(X, W) + b\n return (1.0)/(1.0 + np.exp(-preActivation))", "def sigmoid(z):\n\n S = (1 / (1 + np.exp(-z)))\n return S", "def grad_fn(self, pred: Tensor, true: Tensor) -> Tensor:\n pass", "def sigmoid_derivative(x):\n return x * (1.0 - x)", "def test_asymptotic_32():\r\n\r\n #TODO: consider adding the optimization of crossentropy into the current\r\n # mode for the purpose of running this test\r\n\r\n for dtype in 'float32', 'float64':\r\n if dtype == 'float32':\r\n x = tensor.fmatrix()\r\n x2 = tensor.fvector()\r\n else:\r\n x = tensor.dmatrix()\r\n x2 = tensor.dvector()\r\n y = tensor.lvector()\r\n\r\n c = categorical_crossentropy(softmax(x + x2), y)\r\n f = theano.function([x, y, x2], [c.sum(),\r\n tensor.grad(c.sum(), x)], mode='FAST_RUN')\r\n if 0:\r\n for i, n in enumerate(f.maker.fgraph.toposort()):\r\n print i, n\r\n\r\n xval = numpy.zeros((5, 5), dtype=dtype).astype(dtype)\r\n x2val = numpy.zeros(5, dtype=xval.dtype).astype(dtype)\r\n for i in xrange(100):\r\n cval, gxval = f(xval, numpy.arange(5), x2val)\r\n xval -= 100.3 * gxval\r\n #print cval, gxval\r\n assert cval == 0 # no problem going to zero error\r\n\r\n #what about when x gets really big?\r\n\r\n xval = numpy.zeros((5, 5), dtype=dtype)\r\n x2val = numpy.zeros(5, dtype=xval.dtype)\r\n for i in xrange(100):\r\n\r\n cval, gxval = f(xval, numpy.arange(5), x2val)\r\n xval += 100000.3 * gxval\r\n #print cval, gxval\r\n\r\n assert cval > 61750000\r\n assert gxval[0, 0] == -1.0\r\n assert gxval[0, 1] == 0.25", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def calcSign(self, state):\n\n sentinel_vec = self.sentinel - state.pos\n target_vec = self.target - state.pos\n\n temp = (sentinel_vec % target_vec) ^ chrono.ChVectorD(0, 0, 1)\n\n return (temp > 0) - (temp < 0)", "def sigmoid(z):\n return 1 / (1 + np.exp(-1 * z))", "def sigmoid(z):\n\n s = 1/(1+ np.exp(-z))\n \n return s", "def gradient(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = np.eye(n)\n\n forw = np.zeros(n)\n for i in range(n):\n forw[i] = f(x + s*e[i])\n\n g = (forw - f(x)) / s\n return g", "def sigmoid(z):\n return 1 / (1 + np.exp(-z))", "def sigmoid(z):\r\n \r\n return 1.0 / (1.0+np.exp(-z))", "def sigmoid(X):\n if isinstance(X,(list,tuple)):\n X=np.array(X)\n return 1/(1+np.exp(-X))\n #return np.exp(X)/(1+np.exp(X))", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)", "def sigmoid_forward(self, x):\n \n #############################################################################\n # TODO: Implement the Sigmoid forward pass. #\n #############################################################################\n out = 1 / (1 + np.exp(-x))\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = out\n return out, cache", "def sigmoid(z):\n\treturn 1.0/(1.0+np.exp(-z))", "def sigmoid(z):\n\n\ts = 1 / (1 + np.exp(-z)) #definition of the sigmoid function\n\treturn s", "def f1(phi, phi_o, d):\n return 1 - sigmoid_decay(phi, phi_o, d)", "def forward(self, x):\n self.y = x.tanh()\n return self.y", "def sigmoid(z):\n return 1 / (1 + (np.exp(-z)))", "def sigmoid(x):\r\n #pred_x = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\r\n pred_x = 1.0 / (1.0 + np.exp(-x))\r\n return pred_x\r\n pass", "def sigmoid(z):\n return 1/(1+np.exp(-z))", "def grad_fn(self, pred: Tensor, true: Tensor) -> Tensor:\n return (pred - true) / true.shape[0]", "def sigmoid(x):\n pos_mask = (x >= 0)\n neg_mask = (x < 0)\n z = np.zeros_like(x)\n z[pos_mask] = np.exp(-x[pos_mask])\n z[neg_mask] = np.exp(x[neg_mask])\n top = np.ones_like(x)\n top[neg_mask] = z[neg_mask]\n return top / (1 + z)", "def sigmoid(Z):\n\n A = 1 / (1 + np.exp(-Z))\n cache = Z\n\n return A, cache", "def sigmoid(X):\n g = 1/(1 + np.exp(-X))\n return g", "def activation_sigmoid(self):\n self.value = 1 / (1 + np.e ** (-self.value))", "def sigmoid(z):\n return 1/(1 + numpy.exp(z))", "def activ_fn_derivative(z):\n return 1 - np.square(np.tanh(z))", "def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))", "def forward(self, Z):\n self.A = 1 / (1 + np.exp(-Z)) # compute sigmoid activations", "def sigmoid(z):\n return 1.0 / (1 + np.exp(-z))", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def sigmoid(Z):\n\n A = 1.0/(1.0+np.exp(-Z))\n cache = Z\n\n return A, cache", "def hardtanh(self, min_value=-1, max_value=1):\n intermediate = crypten.stack([self - min_value, self - max_value]).relu()\n intermediate = intermediate[0].sub(intermediate[1])\n return intermediate.add_(min_value)", "def deriv_sigmoid(self,z):\n return np.exp(-z) / ( (1 + np.exp(-z)) ** 2 )", "def diff_sigmoid(z):\r\n diff_z = np.multiply(z, (1.0 - z))\r\n return diff_z\r\n pass", "def sigmoid(z):\n g = 1/(1 + np.exp(-z))\n return g", "def grad_fn(grad: np.ndarray) -> np.ndarray:\n return grad * np.ones_like(t.data)" ]
[ "0.62575996", "0.620261", "0.6106974", "0.6093499", "0.60716397", "0.5985662", "0.5984108", "0.59664273", "0.59456444", "0.590908", "0.5791624", "0.574362", "0.5741642", "0.5734987", "0.5703245", "0.56998265", "0.5676139", "0.566065", "0.5639867", "0.5634217", "0.5617749", "0.56070757", "0.55969083", "0.55899256", "0.55876493", "0.55570376", "0.55469894", "0.554506", "0.5538363", "0.5530879", "0.5528762", "0.55198073", "0.5513752", "0.54766417", "0.5474368", "0.5469227", "0.54557437", "0.54472435", "0.543877", "0.5434001", "0.5428491", "0.5415761", "0.541189", "0.54052615", "0.54044914", "0.5404433", "0.5395573", "0.53755313", "0.537052", "0.5359891", "0.5354947", "0.53539026", "0.5352679", "0.5339194", "0.5333746", "0.5330285", "0.5328498", "0.5324987", "0.5319295", "0.5314422", "0.5310593", "0.5309765", "0.53090644", "0.53071", "0.5304961", "0.5295554", "0.5290223", "0.52868813", "0.52834", "0.5277497", "0.5277161", "0.5271132", "0.5267711", "0.52640736", "0.52640736", "0.5261612", "0.52590936", "0.52584326", "0.525547", "0.5251566", "0.5250187", "0.5247173", "0.52395666", "0.523688", "0.5230149", "0.5228045", "0.522737", "0.52268505", "0.5223839", "0.52230674", "0.52200395", "0.52198106", "0.52131677", "0.5212122", "0.52056766", "0.52035296", "0.52009165", "0.5197073", "0.519605", "0.51957333", "0.5193054" ]
0.0
-1
Assert that the first (leftmost) protocol value is correctly fetched from the xforwardedheader.
def test_get_protocol_with_more_than_one_value(): request = Mock( headers={"X-Forwarded-Proto": "https,http,http"}, protocol="http", ) expected = "https" protocol = get_browser_protocol(request) assert expected == protocol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_h2_header_ok(self):\n self.set_frang_config(frang_config=\"http_strict_host_checking true;\")\n client = self.get_client(\"deproxy-1\")\n client.start()\n client.parsing = False\n\n first_headers = [(\":authority\", \"localhost\"), (\":path\", \"/\")]\n second_headers = [(\":path\", \"/\"), (\"host\", \"localhost\")]\n third_headers = [(\":authority\", \"localhost\"), (\":path\", \"/\"), (\"host\", \"localhost\")]\n fourth_headers = [\n (\":authority\", \"tempesta-tech.com\"),\n (\":path\", \"/\"),\n (\"forwarded\", \"host=tempesta-tech.com\"),\n (\"forwarded\", \"for=tempesta.com\"),\n ]\n\n header_list = [\n first_headers,\n first_headers, # as byte\n second_headers,\n second_headers, # as byte\n third_headers,\n third_headers, # as byte\n fourth_headers,\n fourth_headers, # as byte\n ]\n for header in header_list:\n head = [\n (\":scheme\", \"https\"),\n (\":method\", \"HEAD\"),\n ]\n head.extend(header)\n client.make_request(head)\n self.assertTrue(client.wait_for_response(1))\n\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")", "def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n (\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\n\"\n \"Host: tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta1-tech.com\\r\\n\\r\\n\"\n ),\n ]\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\", requests=requests\n )\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")", "def test_host_header_with_old_proto(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.0\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: Host header field in protocol prior to HTTP/1.1\",\n )", "def test_host_header_mismatch(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def test_host_header_mismatch_empty(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: \\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def test_host_header_no_port_in_uri(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)", "def test_host_header_no_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:80/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)", "def test_host_header_mismath_port(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:81\\r\\n\\r\\n\"\n ],\n )\n self.check_response(\n client, status_code=\"403\", warning_msg=\"port from host header doesn't match real port\"\n )", "def test_send_pp_header_v1_no_src_addr(self):\n socket = self.get_socket(PROXY_PROTOCOL.V1)\n socket.getsockname.return_value = ('1.1.1.1', 1000)\n socket.getpeername.return_value = ('2.2.2.2', 2000)\n\n socket._send_pp_header()\n\n expected_header = encode_v1('TCP4', '1.1.1.1', '2.2.2.2', 1000, 2000)\n socket.sendall.assert_called_once_with(expected_header)", "def assert_header(self):\r\n\r\n if self.length > self.owner.settings[SETTINGS_MAX_FRAME_SIZE]:\r\n raise netius.ParserError(\r\n \"Headers are greater than SETTINGS_MAX_FRAME_SIZE\",\r\n stream = self.stream,\r\n error_code = FRAME_SIZE_ERROR\r\n )\r\n if self.last_type in (HEADERS, CONTINUATION) and not\\\r\n self.last_end_headers and not self.last_stream == self.stream:\r\n raise netius.ParserError(\r\n \"Cannot send frame from a different stream in middle of headers\",\r\n error_code = PROTOCOL_ERROR\r\n )", "def test_host_header_as_ip6(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: [20:11:abb::1]:80\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def test_host_header_as_ip(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def assert_has_valid_head(self, response, expected):\r\n assert 'head' in response\r\n head = response['head']\r\n assert isinstance(head, str)\r\n assert head == expected", "def test_host_header_mismath_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def testIP(self):\n self.assertEqual([\"http://234.234.234.234\"], grab('http://234.234.234.234', self.needScheme))", "def test_send_pp_header_v1_with_src_addr(self):\n socket = self.get_socket(PROXY_PROTOCOL.V1, src_addr=('6.6.6.6', 666))\n socket.getsockname.return_value = ('1.1.1.1', 1000)\n socket.getpeername.return_value = ('2.2.2.2', 2000)\n\n socket._send_pp_header()\n\n expected_header = encode_v1('TCP4', '6.6.6.6', '2.2.2.2', 666, 2000)\n socket.sendall.assert_called_once_with(expected_header)", "def test_headers(self):\n self.assert_expected_token_value()", "def test_server_should_be_http_1_1(httpbin):\n resp = get_raw_http_response(httpbin.host, httpbin.port, \"/get\")\n assert resp.startswith(b\"HTTP/1.1\")", "def test_h2_host_header_as_ipv6(self):\n self._test(\n headers=[\n (\":path\", \"/\"),\n (\"host\", \"[20:11:abb::1]:443\"),\n ],\n expected_warning=WARN_IP_ADDR,\n )", "def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], request_data_headers)", "def test_normalize_xmlrpc_address_missing_protocol(self):\r\n input_val = 'google.com:1234'\r\n expected_val = 'http://google.com:1234'\r\n actual_val = normalize_xmlrpc_address(input_val, 1471)\r\n self.assertEqual(expected_val, actual_val)", "def test_h2_host_header_as_ip(self):\n self._test(\n headers=[\n (\":path\", \"/\"),\n (\"host\", \"127.0.0.1\"),\n ],\n expected_warning=WARN_IP_ADDR,\n )", "def testIPv6(self):\n self.assertEqual([\"http://[2001:a68:104:1337:250:daff:fe72:871c]/toimia\"], grab('foo http://[2001:a68:104:1337:250:daff:fe72:871c]/toimia', self.needScheme))", "def test_discard_first(self):\n test_length = random.randint(0,100)\n test_string = \"#\\t{0}\".format(\"\\t\".join(map(str, xrange(test_length))))\n expected = test_length\n computed = len(self.parser.parse_header(test_string, extract_mock))\n self.assertEquals(expected, computed)", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {\"http.server_name\": hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def test_correct_sheme_host_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn(self.httpbin.client['host'], urlparse(req.prepared_request.url).netloc)\n self.assertIn(self.httpbin.client['scheme'], urlparse(req.prepared_request.url).scheme)\n self.assertIn(self.httpbin.client['get_my_ip']['path'], urlparse(req.prepared_request.url).path)", "def test_response_ok():\n from server import response_ok\n assert response_ok().split(b'\\r\\n')[0] == b'HTTP/1.1 %s' % OK_200", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {SpanAttributes.HTTP_SERVER_NAME: hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_specific_url_is_used_for_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n\n url = self.httpbin.client[\"get_my_headers\"][\"url\"]\n self.assertIn(url, req.prepared_request.url)", "def test_default_headers_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'],\n self.httpbin.client[\"default_headers\"]['All-Request-Headers'])", "def validate_http_request(request):\n request_str = request.decode('utf-8')\n print(request_str)\n split_request = request_str.split(' ')\n if (split_request[0] == 'GET') and split_request[2].startswith('HTTP/1.1'):\n request_url = split_request[1].replace(\"/\", \"\\\\\")\n x = (True, request_url)\n return x\n y = (False, None)\n return y", "def testIPv6noscheme(self):\n if self.needScheme: return\n \n self.assertEqual([\"[2001:a68:104:1337:250:daff:fe72:871c]/toimia\"], grab('foo [2001:a68:104:1337:250:daff:fe72:871c]/toimia', self.needScheme))", "def testNoScheme(self):\n if self.needScheme: return\n \n self.assertEqual([\"123.123.123.123\"], grab('123.123.123.123', self.needScheme))", "def test_udp_header_native(self):\n header = UDP_HEADER(\n source_port = 8080,\n dest_port = 8080,\n length = 2,\n checksum = 0xbeef\n )\n\n expected_val = struct.pack('HHHH', 8080, 8080, 2, 0xbeef)\n\n self.assertEqual(header.to_bytes(), expected_val)", "def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None", "def test_min_args(self):\n bust_fragments(self.resp, '/foo/bar')\n self.assert_header_set('[\"/foo/bar\"]')", "def test_tcp_header_native(self):\n header = TCP_HEADER(\n source_port = 8080,\n dest_port = 8080,\n seq_num = 0xbeefcafe,\n ack_num = 0xcafebeef,\n data_offset = 0xf,\n flag_ns = 1,\n flag_cwr = 1,\n flag_ece = 1,\n flag_urg = 1,\n flag_ack = 1,\n flag_psh = 1,\n flag_rst = 1,\n flag_syn = 1,\n flag_fin = 1,\n window_size = 12,\n checksum = 0xffff\n )\n\n expected_data = [\n 8080, 8080, 0xbeefcafe, 0xcafebeef, int('10001111', 2), 0xff, 12, 0xffff\n ]\n\n expected_val = struct.pack('HHIIBBHH', *expected_data)\n\n self.assertEqual(header.to_bytes(), expected_val)", "def test_user_headers_sent_with_request(self):\n user_header = {'All-Request-Headers': 'Headers from user code'}\n req = self.httpbin.get_my_headers(headers=user_header, dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], user_header['All-Request-Headers'])", "def parse_protocol_header(stream: BytesIO) -> Tuple[int, int, int]:\n prefix, *version = unpack('>5sBBB', _read(stream, 8))\n if prefix != b'AMQP\\x00':\n raise ValueError(\"wrong protocol, expected b'AMQP\\x00', got {}\".format(\n prefix\n ))\n return version", "def test_specific_url_query_sent_with_request(self):\n req = self.httpbin_2.get_my_headers(dry_run=True)\n def_url_query = self.httpbin_2.client[\"get_my_headers\"][\"url_query\"]\n self.assertIn(urlencode(def_url_query), req.prepared_request.url)", "def test_get_protocol_version(self):\n server, client = loopback()\n client_protocol_version = client.get_protocol_version()\n server_protocol_version = server.get_protocol_version()\n\n assert isinstance(server_protocol_version, int)\n assert isinstance(client_protocol_version, int)\n\n assert server_protocol_version == client_protocol_version", "def test_differentProtocol(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n protocols = []\n\n class FakeProtocol(object):\n def __init__(self):\n self.transport = StubPort()\n\n def query(self, address, query, timeout=10, id=None):\n protocols.append(self)\n return defer.succeed(dns.Message())\n\n resolver._connectedProtocol = FakeProtocol\n resolver.query(dns.Query('foo.example.com'))\n resolver.query(dns.Query('bar.example.com'))\n self.assertEqual(len(set(protocols)), 2)", "def test_raw_empty(self):\n self.assertRaisesHeaderError([''])", "def test_leading_crlf(parser, data):\n msg = b\"\\r\\n\\r\\n\\r\\n\\r\\nGET / HTTP/1.1\\r\\n\\r\\n\"\n\n http11.c.lib.HTTPParser_execute(parser, msg, 0, len(msg))\n\n assert data == {\n \"request_method\": b\"GET\",\n \"request_uri\": b\"/\",\n \"http_version\": b\"HTTP/1.1\",\n }\n assert parser.finished\n assert not parser.error\n\n msg = b\"\\r\\n\\r\\n\\r\\n\\r\\nHTTP/1.1 200 OK\\r\\n\\r\\n\"\n\n http11.c.lib.HTTPParser_execute(parser, msg, 0, len(msg))\n\n assert parser.finished\n assert parser.error == http11.c.lib.EINVALIDMSG", "def test_response_error(err_msg):\n from server import response_error\n error_text = b'HTTP/1.1 %s' % err_msg\n assert response_error(err_msg).split(b'\\r\\n')[0] == error_text", "def test_check_http_url_split(url, expected_split):\n assert http_urlsplit(url) == expected_split", "def test_h2_authority_header_as_ip(self):\n self._test(\n headers=[\n (\":path\", \"/\"),\n (\":authority\", \"127.0.0.1\"),\n ],\n expected_warning=WARN_IP_ADDR,\n )", "def test_https_over_http_error(http_server, ip_addr):\n httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))\n interface, _host, port = _get_conn_data(httpserver.bind_addr)\n with pytest.raises(ssl.SSLError) as ssl_err:\n http.client.HTTPSConnection(\n '{interface}:{port}'.format(\n interface=interface,\n port=port,\n ),\n ).request('GET', '/')\n expected_substring = (\n 'wrong version number' if IS_ABOVE_OPENSSL10\n else 'unknown protocol'\n )\n assert expected_substring in ssl_err.value.args[-1]", "def testLeadingAndTrailingText(self):\n self.assertEqual([\"http://123.123.123.123\"], grab('fooasdf asdf a http://123.123.123.123 asdfasdf', self.needScheme))", "def test_get_invalid(self):\n self.client_socket.sendto(b'i', CONTROL_SOCKET_FILE)\n (bytes, address) = self.client_socket.recvfrom(6)\n #self.assertEquals(address, CONTROL_SOCKET_FILE)\n self.assertEqual(len(bytes), 1)\n self.assertEqual(bytes[0], 7)", "def test_get_ip_from_headers(self):\n response = self.client.get(self.voter_location_url, REMOTE_ADDR='69.181.21.132')\n self.assertEqual(response.status_code, 200)\n json_data = json.loads(response.content.decode())\n self.assertEqual(json_data['success'], True)\n self.assertEqual(json_data['voter_location_found'], True)", "def test_get_protocol_version_name(self):\n server, client = loopback()\n client_protocol_version_name = client.get_protocol_version_name()\n server_protocol_version_name = server.get_protocol_version_name()\n\n assert isinstance(server_protocol_version_name, str)\n assert isinstance(client_protocol_version_name, str)\n\n assert server_protocol_version_name == client_protocol_version_name", "def test_parse_header(self):\n data = parse_header(self.header)\n self.assertEqual(data.get(\"application\"), \"my Grandma\")\n self.assertEqual(data.get(\"version\"), \"has\")\n self.assertEqual(data.get(\"reference\"), \"furry\")\n self.assertEqual(data.get(\"query_letters\"), 27)\n self.assertEqual(data.get(\"database\"), \"Cats\")", "def test_websocket_traceresponse_header(self):\n\n orig = get_global_response_propagator()\n set_global_response_propagator(TraceResponsePropagator())\n\n self.scope = {\n \"type\": \"websocket\",\n \"http_version\": \"1.1\",\n \"scheme\": \"ws\",\n \"path\": \"/\",\n \"query_string\": b\"\",\n \"headers\": [],\n \"client\": (\"127.0.0.1\", 32767),\n \"server\": (\"127.0.0.1\", 80),\n }\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_input({\"type\": \"websocket.connect\"})\n self.send_input({\"type\": \"websocket.receive\", \"text\": \"ping\"})\n self.send_input({\"type\": \"websocket.disconnect\"})\n _, socket_send, *_ = self.get_all_output()\n\n span = self.memory_exporter.get_finished_spans()[-1]\n self.assertEqual(trace_api.SpanKind.SERVER, span.kind)\n\n trace_id = format_trace_id(span.get_span_context().trace_id)\n span_id = format_span_id(span.get_span_context().span_id)\n traceresponse = f\"00-{trace_id}-{span_id}-01\"\n\n self.assertListEqual(\n socket_send[\"headers\"],\n [\n [b\"traceresponse\", f\"{traceresponse}\".encode()],\n [b\"access-control-expose-headers\", b\"traceresponse\"],\n ],\n )\n\n set_global_response_propagator(orig)", "def test_strict_https_header(flask_app, app):\n app.config['STRICT_HTTPS'] = True # enable strict https\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Strict-Transport-Security') == 'max-age=31536000; includeSubDomains'\n\n app.config['STRICT_HTTPS'] = False # disable\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert not headers.get('Strict-Transport-Security')", "def test_h2_authority_header_as_ipv6(self):\n self._test(\n headers=[\n (\":path\", \"/\"),\n (\":authority\", \"[20:11:abb::1]:443\"),\n ],\n expected_warning=WARN_IP_ADDR,\n )", "def _expect_100(connection: typing.Union[ssl.SSLSocket, socket.socket]) -> bool:\n try:\n headers = b''\n while b'\\r\\n\\r\\n' not in headers:\n headers += connection.recv(1024)\n return b' 100 ' in headers.split(b'\\r\\n')[0]\n except IOError:\n return False", "def test_get_current_request_hostname(self):\r\n assert_is_none(get_current_request_hostname())", "def test_basic(self):\n request = fake_twisted_request(request_headers={\n b'x-foo': [b'bar'],\n })\n self.assertThat(\n _nevow_request_to_request_map(request),\n ContainsDict({\n 'content_type': Equals(b'application/octet-stream'),\n 'content_length': Equals(0),\n 'character_encoding': Is(None),\n 'headers': Equals({b'Content-Length': [0],\n b'X-Foo': [b'bar'],\n b'Host': [b'example.com']}),\n 'remote_addr': Equals(b'192.168.1.1'),\n 'request_method': Equals(b'GET'),\n 'server_name': Equals(b'example.com'),\n 'server_port': Equals(80),\n 'scheme': Equals(b'http'),\n 'uri': Equals(URL.from_text(u'/one'))}))", "def test_having_port(self):\n url = 'https://user:12345@domain.com:8080/project/objects.inv'\n expected = 'https://domain.com:8080/project/objects.inv'\n actual = _strip_basic_auth(url)\n assert expected == actual", "def test_get_current_request_hostname(self):\n assert get_current_request_hostname() is None", "def test_header_parser_vanilla(self):\n lines = [\"Content-Type: application/json\", \"Accept: application/json\"]\n h = {\"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}\n headers = parser._parse_headers(lines)\n self.assertEqual(h, headers)", "def validate_http_request(request):\r\n if request != b'':\r\n # Divide the request line: [method, sp, url, version, cr lf]\r\n request = request.decode().split('\\r')[0]\r\n method = request.split()[0]\r\n url = request.split()[1]\r\n version = request.split()[2]\r\n if method == METHOD and version == VERSION:\r\n return True, url\r\n else:\r\n return False, None\r\n else:\r\n return True, None", "def test_fetchSpecificHeader(self):\n d = self.client.fetchSpecific('11', headerType='HEADER')\n self.assertEqual(\n self.transport.value(), b'0001 FETCH 11 BODY[HEADER]\\r\\n')\n self.client.lineReceived(\n b'* 11 FETCH (BODY[HEADER]'\n b' \"From: someone@localhost\\r\\nSubject: Some subject\")')\n self.client.lineReceived(b'0001 OK FETCH completed')\n self.assertEqual(\n self.successResultOf(d),\n {11: [['BODY', ['HEADER'],\n \"From: someone@localhost\\r\\nSubject: Some subject\"]]})", "def test_handshake_missing_headers(tchannel_pair):\n server, client = tchannel_pair\n\n client.initiate_handshake(headers={})\n with pytest.raises(InvalidMessageException):\n server.await_handshake(headers={})", "def test_check_http_url_split_validation():\n with pytest.raises(ValueError):\n http_urlsplit('https://aaa.cz')\n\n with pytest.raises(ValueError):\n http_urlsplit('ftp://ddd.cz')", "def test_parse_host_port(self):\n # test default port for http\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test default port for https\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTPS)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test specific port\n endpoint = \"1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)\n\n # test value error\n endpoint = \"1.2.3.4:abcd\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # protocol unsupported\n endpoint = \"ftp://1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # test of endpoint dominates the protocol\n endpoint = \"http://1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)", "def test_create_proxy_no_protocol(self):\n\n with self.assertRaises(ValueError) as ex:\n ProxyConfig(\n address=\"localhost\",\n username=\"username\",\n password=\"password\"\n )\n\n self.assertEqual(\n ex.exception.args[0],\n f\"The provided proxy address of localhost does not contain a protocol, please specify in the full format e.g. http://myproxy.com:8080\")", "def test_content_type_header_not_automatically_added(httpbin):\n resp = requests.get(httpbin + \"/headers\").json()[\"headers\"]\n assert \"Content-Type\" not in resp", "def is_forwarded(self):\n return bool(re.match(FW_PATTERNS, self.header('Subject', '')))", "def test_first_line_binary_little_endian(self):\n self.assertEqual(self.header.designation.format, 'BINARY-LITTLE-ENDIAN')", "def testGetOneOutOfLowerBorder(self):\n inst = WireData(b'0123456789')\n with self.assertRaises(FormError):\n inst[-11] # pylint: disable=pointless-statement", "def test_request_headers(mock_send, mock_format):\n ClientSession().request('GET', 'https://url', access_token='token')\n request_obj = mock_send.call_args[0][0]\n assert request_obj.headers['Authorization'] == 'Bearer token'", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def check_header(self, name, value):\r\n if value in self.headers.get(name, ''):\r\n return True\r\n return False", "def _get_forwarded_host(self, request: Request) -> Optional[str]:\n forwarded_host = request.headers.getlist(\"X-Forwarded-Host\")\n if not forwarded_host or len(forwarded_host) > 1:\n return None\n return forwarded_host[0].strip()", "def test_traceroute_noheader(self):\n self.assertEqual(jc.parsers.traceroute.parse(self.osx_10_14_6_traceroute_noheader, quiet=True), self.osx_10_14_6_traceroute_no_header_json)", "def test_url_add_missing_protocol(self):\n assert ct.url_add_missing_protocol(\"https://www.bad-actor.services/\") == \"https://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\"http://www.bad-actor.services/\") == \"http://www.bad-actor.services/\"\n assert ct.url_add_missing_protocol(\n \"www.bad-actor.services/\",\n default=\"https\") == \"https://www.bad-actor.services/\"", "def testLeadingSpaces(self):\n self.assertEqual([\"http://tomtom.foobar.org/\"], grab(' http://tomtom.foobar.org/', self.needScheme))\n self.assertEqual([\"http://www.foobi.org/saatoimia\"], grab(' http://www.foobi.org/saatoimia', self.needScheme))", "def test_forward(self):\n validate_forward()", "def test_parse_wsgi_bind_addr(raw_bind_addr, expected_bind_addr):\n assert parse_wsgi_bind_addr(raw_bind_addr) == expected_bind_addr", "def test_content_length() -> None:\n assert \"CONTENT-LENGTH: 3\" in Fauxmo.add_http_headers(\"foo\")\n assert \"CONTENT-LENGTH: 4\" in Fauxmo.add_http_headers(\"föo\")", "def get_http_header(url, header_name):\n\n try:\n response = urllib2.urlopen(url)\n \n if response.info().get(header_name):\n return None\n \n except ConnectionError:\n print('Connection Error')\n except UnknownError: \n print('Unknown Error')", "def test_response_header(BASE_URL, COUNTRY_CODE):\n # make request\n result = requests.get(f'{BASE_URL}{COUNTRY_CODE}')\n assert result.headers['Content-Type'] == 'application/json'", "def test_failed_verify_oauth_body_sign_proxy_mangle_url(self):\n request = self.get_signed_grade_mock_request_with_correct_signature()\n self.xmodule.verify_oauth_body_sign(request)\n # we should verify against get_outcome_service_url not\n # request url proxy and load balancer along the way may\n # change url presented to the method\n request.url = 'http://testurl/'\n self.xmodule.verify_oauth_body_sign(request)", "def validate_subprotocol(subprotocol, hixie):\n\n if not subprotocol:\n raise HandshakeException('Invalid subprotocol name: empty')\n if hixie:\n # Parameter should be in the range U+0020 to U+007E.\n for c in subprotocol:\n if not 0x20 <= ord(c) <= 0x7e:\n raise HandshakeException(\n 'Illegal character in subprotocol name: %r' % c)\n else:\n # Parameter should be encoded HTTP token.\n state = http_header_util.ParsingState(subprotocol)\n token = http_header_util.consume_token(state)\n rest = http_header_util.peek(state)\n # If |rest| is not None, |subprotocol| is not one token or invalid. If\n # |rest| is None, |token| must not be None because |subprotocol| is\n # concatenation of |token| and |rest| and is not None.\n if rest is not None:\n raise HandshakeException('Invalid non-token string in subprotocol '\n 'name: %r' % rest)", "def test_default_host_http_required(self):\n client = self.base_scenario(\n frang_config=\"\", requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"]\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def test_valid_header_not_at_top_message_added(self):\n\n node_mock = MagicMock()\n node_mock.stream.return_value.__enter__.return_value.read.return_value.decode.return_value = 'print(\\'hello\\')\\n# Valid\\n# Header'\n with self.assertAddsMessages(pylint.testutils.Message(\n msg_id='invalid-file-header',\n line=1,\n args=self.EXPECTED_HEADER)):\n self.checker.process_module(node_mock)", "def _get_forwarded_proto(self, request: Request) -> List[str]:\n forwarded_proto_str = request.headers.getlist(\"X-Forwarded-Proto\")\n if not forwarded_proto_str or len(forwarded_proto_str) > 1:\n return []\n return [p.strip() for p in forwarded_proto_str[0].split(\",\")]", "async def parse_http_request_header(reader: StreamReader, writer: StreamWriter):\n\n lines = await reader.read_until(b\"\\r\\n\\r\\n\")\n headers = lines[:-1].decode().split(\"\\r\\n\")\n method, path, ver = HTTP_LINE.match(headers.pop(0)).groups()\n log.info(\"req original: %s\", lines)\n url = urllib.parse.urlparse(path)\n #\n lines = \"\\r\\n\".join(i for i in headers if not i.startswith(\"Proxy-\") and i.strip())\n headers = dict(i.split(\": \", 1) for i in headers if \": \" in i)\n\n if method == \"CONNECT\":\n host_name, port = path.split(\":\", 1)\n port = int(port)\n writer.write(f\"{ver} 200 OK\\r\\nConnection: close\\r\\n\\r\\n\".encode())\n return host_name, port, b\"\"\n\n else:\n url = urllib.parse.urlparse(path)\n host_name = url.hostname\n port = url.port or 80\n newpath = url._replace(netloc=\"\", scheme=\"\").geturl()\n\n req = f\"{method} {newpath} {ver}\\r\\n{lines}\\r\\n\\r\\n\".encode()\n log.info(\"new req: %s\", req)\n return (host_name, port, req)", "def test_headers(self):\n msg = self.shortDescription()\n self.assertTrue(False, msg=msg)\n pass", "def correct_header_fields():\n test_str = \"c0rrect_!!heAd3R fi3ld5__%%!! @\\n\"\n server = start_server()\n client = start_client()\n\n write_to(client, test_str)\n segments = read_segments_from(client)\n if not segments:\n return False\n teardown()\n\n # Start reference solution to get answers.\n ref_server = start_server(port=REF_PORT, reference=True)\n ref_client = start_client(server_port=REF_PORT, reference=True)\n\n # Get reference checksum.\n write_to(ref_client, test_str)\n ref_segment = read_segments_from(ref_client)[0]\n\n # Check the first sent segment. Should have all the same header fields as\n # the reference.\n segment = segments[0]\n\n # Check the flags first. Maybe decided to ACK all segments.\n if not segment.has_same_flags(ref_segment):\n if \"ACK\" in segment.flags:\n segment.flags.remove(\"ACK\")\n\n return (\n segment.seqno == ref_segment.seqno and\n (segment.ackno == 0 or segment.ackno == ref_segment.ackno) and\n segment.length == ref_segment.length and\n segment.has_same_flags(ref_segment) and\n segment.window == ref_segment.window and\n (segment.checksum == ref_segment.checksum or\n int(segment.checksum, 16) == segment.c_repr.cksum)\n )", "def test_valid_header_no_message_added(self):\n\n node_mock = MagicMock()\n node_mock.stream.return_value.__enter__.return_value.read.return_value.decode.return_value = self.EXPECTED_HEADER\n with self.assertNoMessages():\n self.checker.process_module(node_mock)", "def _check_next_url(next):\n if '://' in next:\n return None\n return next", "def test_bad_control_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n ControlHeaderStruct.parse(data)", "def test_bad_control_packet_header(self, data):\n with pytest.raises(StreamError) as e:\n ControlHeaderStruct.parse(data)", "def test_empty_case(self):\n self.assertEquals([], self.parser.parse_header(\"\", extract_mock))\n self.assertEquals([], self.parser.parse_header(\"#\\t\", extract_mock))", "def get_header(header, pkt):\n try:\n str_pkt = str(pkt)\n\n init_header = str_pkt.index( header )\n after_header = str_pkt[ ( init_header + len(header) ) : ]\n end_header = after_header.index(const.END_LINE)\n\n val = after_header[ : end_header ]\n\n except ValueError:\n val = '-1'\n\n return val", "def test_fetchWithPartialValidArgument(self):\n # We need to clear out the welcome message.\n self.transport.clear()\n # Let's send out the faulty command.\n self.server.dataReceived(b\"0001 FETCH 1 FULLL\\r\\n\")\n expected = b\"0001 BAD Illegal syntax: Invalid Argument\\r\\n\"\n self.assertEqual(self.transport.value(), expected)\n self.transport.clear()\n self.server.connectionLost(error.ConnectionDone(\"Connection closed\"))" ]
[ "0.6413071", "0.6368929", "0.63600814", "0.6126467", "0.6093351", "0.6008764", "0.597977", "0.59638256", "0.5935303", "0.5915244", "0.59103775", "0.588814", "0.58332074", "0.5819531", "0.58032525", "0.57846427", "0.5782095", "0.57428664", "0.5648308", "0.5636194", "0.5635823", "0.5607141", "0.55981106", "0.55842924", "0.55680764", "0.55322903", "0.5489046", "0.54722", "0.54717106", "0.54137605", "0.5407792", "0.5402206", "0.54011655", "0.53798777", "0.53119606", "0.5304216", "0.52970237", "0.5292959", "0.5248809", "0.5231113", "0.5225623", "0.52183735", "0.52056116", "0.5191461", "0.5190543", "0.51753575", "0.5170202", "0.51630706", "0.51578194", "0.51417106", "0.5139298", "0.5136834", "0.51179236", "0.5106585", "0.50989974", "0.5097069", "0.50964826", "0.508107", "0.5080131", "0.5079196", "0.5070832", "0.506987", "0.50679046", "0.5067878", "0.50626975", "0.5061786", "0.5058522", "0.50515234", "0.5046831", "0.50457424", "0.50436234", "0.503978", "0.5032027", "0.50249666", "0.5019202", "0.5019202", "0.5017393", "0.5012313", "0.5008195", "0.49949816", "0.4986452", "0.49855566", "0.49641955", "0.4961916", "0.4960691", "0.4960558", "0.49603698", "0.4957902", "0.49566922", "0.49498266", "0.4948035", "0.49459454", "0.4945304", "0.49412337", "0.4935516", "0.49330658", "0.49330658", "0.49321553", "0.4929355", "0.49244004" ]
0.72213775
0
Assert that a dict of k/v's is correctly created when receiving encoded values.
def test_convert_request_arguments_with_encoded_items_to_dict(): arguments = { "key1": [b"value1"], "key2": [b"value2"], "key3": [b"value3"], } expected = { "key1": "value1", "key2": "value2", "key3": "value3", } result = convert_request_to_dict(arguments) assert expected == result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dict(self):\n self.assertValue(\n {'foo': 'foo', 'bar': 43, 'zippy': 'zoo'},\n 'bar: 43 foo: foo zippy: zoo\\n'\n )", "def verifyData(self, expectedDict):\n pass", "def test_key_dict(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n dictionary = key.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"in_car\": True}", "def test_store_dict(self):\n data = {'a': 1, 'b': 'two', 'c': u'\\xfc\\xe9\\xdf\\xa2\\u03a9', 'd': [1,2,3,4], \n 'e': {'e1': 4, 'e2': 'fish'} }\n self._test_storable(data)", "def test_valid_analysis_request(analysis_request_dict: JSONDict) -> None:\n\n request = AnalysisRequest(**analysis_request_dict)\n\n assert request.dict() == analysis_request_dict", "def _dict_assert(actual_dict, expected_dict):\n for key in set(actual_dict) & set(expected_dict):\n _value_assert(key, actual_dict[key], expected_dict[key])", "def _validate_dict_data(self, expected, actual):\n for k, v in expected.iteritems():\n if k in actual:\n if (isinstance(v, basestring) or\n isinstance(v, bool) or\n isinstance(v, (int, long))):\n if v != actual[k]:\n return \"{}:{}\".format(k, actual[k])\n elif not v(actual[k]):\n return \"{}:{}\".format(k, actual[k])\n else:\n return \"key '{}' does not exist\".format(k)\n return None", "def test_dictionary_io(self):\n dict_val = {'blake':31, 'something_else':'that'}\n v1 = DictionaryTestVertex.create(test_id=5, map_val=dict_val)\n v2 = DictionaryTestVertex.get(v1.vid)\n\n assert v2.map_val == dict_val", "def test_dictionary_io(self):\r\n dict_val = {'blake':31, 'something_else':'that'}\r\n v1 = DictionaryTestVertex.create(test_id=5, map_val=dict_val)\r\n v2 = DictionaryTestVertex.get(v1.vid)\r\n\r\n assert v2.map_val == dict_val", "def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_creation_dict():\n with pytest.raises(ValueError) as __:\n value = dict()\n __ = param.Integer(value=value)", "def test_input_dict(self):\n self.app.app.preprocess_request()\n\n input_dict = {'foo': 'bar'}\n\n resp = self.r(input_dict)\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertIn(\n 'foo:bar',\n resp.data.decode()\n )", "def test_invalid_dict(self):\r\n data = '\"\\\\\"Test\\\\tTesting\"'\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('JSON should be dict', response.content)\r\n self.assertEqual(response.status_code, 400)", "def test_dictionary_coerce():\n\n @type_checked\n def _run_test(something:{int: str}):\n for key, value in something.items():\n assert isinstance(key, int)\n assert isinstance(value, str)\n\n _run_test(something={123: \"abc\", 2314: 12312, \"123\": \"abc\"})", "def test_payment_accepted_invalid_dict(self):\r\n baseline = {\r\n 'orderNumber': '1',\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n }\r\n wrong = {\r\n 'orderNumber': 'k',\r\n }\r\n # tests for missing key\r\n for key in baseline:\r\n params = baseline.copy()\r\n del params[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)\r\n\r\n # tests for keys with value that can't be converted to proper type\r\n for key in wrong:\r\n params = baseline.copy()\r\n params[key] = wrong[key]\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params)", "def check_for_dict(check):", "def test_serialize_map_fails_with_missing_values():\n Paint = Map(\n MapEntrySpec(2, \"colour\", String)\n )\n\n with pytest.raises(ValueError):\n bytes(Paint.to_bytes({}))", "def test_get_cases_for_dict(self):\n pass", "def test_dict(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType.from_dict(data)\n assert data == observation_type.to_dict()", "def test_to_dict_checker():\n msg = \"It looks like an object has changed. Please be sure to update to_dict before updating this test to pass.\"\n assert len(DisplayInfo.__slots__) == 5, msg\n assert len(Event.__slots__) == 13, msg\n assert len(SessionInfo.__slots__) == 5, msg\n assert len(SlotInfo.__slots__) == 4, msg\n assert len(TimestampInfo.__slots__) == 3, msg\n assert len(VisitorInfo.__slots__) == 3, msg", "def test_fromkeys(self):\n d = SplayDict.fromkeys(['a', 'b', 'c'], 1)\n self.assertIn('a' , d)\n self.assertIn('b' , d)\n self.assertIn('c' , d)\n self.assertEqual(d['a'] , 1)\n self.assertEqual(d['b'] , 1)\n self.assertEqual(d['c'] , 1)", "def test_convert(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]).data, test[1])", "def test_dict_keys_duplicate(self):\n assert (\n orjson.dumps({\"1\": True, 1: False}, option=orjson.OPT_NON_STR_KEYS)\n == b'{\"1\":true,\"1\":false}'\n )", "def test_entities__Entity__tagged_values__2():\n e = Entity(u'Dummy', IDummy, 'Dummy', a=1, b='asdf')\n e.tagged_values['a'] = 2\n assert dict(a=1, b='asdf') == e.tagged_values", "def test_escape_no_value_present(self):\r\n testdict = escapeddict.EscapedDict({'key1': 'value1', 'key2': 'value2 ${key_not_present} ${key1}'})\r\n for key in testdict.keys():\r\n print testdict[key]\r\n assert testdict['key1'] == 'value1'\r\n assert testdict['key2'] == 'value2 ${key_not_present} value1'", "def test_isadict(self):\n # It is a dict-subclass, so this kind of pointless, but it doen't hurt.\n d, m = dict(a=5), ConfigDict(a=5)\n d['key'], m['key'] = 'value', 'value'\n d['k2'], m['k2'] = 'v1', 'v1'\n d['k2'], m['k2'] = 'v2', 'v2'\n self.assertEqual(d.keys(), m.keys())\n self.assertEqual(list(d.values()), list(m.values()))\n self.assertEqual(d.get('key'), m.get('key'))\n self.assertEqual(d.get('cay'), m.get('cay'))\n self.assertEqual(list(iter(d)), list(iter(m)))\n self.assertEqual([k for k in d], [k for k in m])\n self.assertEqual(len(d), len(m))\n self.assertEqual('key' in d, 'key' in m)\n self.assertEqual('cay' in d, 'cay' in m)\n self.assertRaises(KeyError, lambda: m['cay'])", "def test_serialize_map():\n Car = Map(\n MapEntrySpec(2, \"colour\", String),\n MapEntrySpec(1, \"manufacturer\", String),\n MapEntrySpec(3, \"preowned\", Boolean),\n MapEntrySpec(4, \"miles_travelled\", UnsignedInt)\n )\n\n car_data = {\n \"preowned\": True,\n \"manufacturer\": \"Ford\",\n \"colour\": \"brown\",\n \"miles_travelled\": 18562\n }\n\n assert bytes([\n 4, # Number of entries\n 3, *Boolean.to_bytes(True),\n 1, *String.to_bytes(\"Ford\"),\n 2, *String.to_bytes(\"brown\"),\n 4, *UnsignedInt.to_bytes(18562),\n ]) == bytes(\n Car.to_bytes(car_data)\n )", "def CompareEncoded(self, expected_encoded, actual_encoded):\n self.assertEquals(simplejson.loads(expected_encoded),\n simplejson.loads(actual_encoded))", "def test_valid_dumpling(self, packet_dumpling_dict):\n assert validate_dumpling(\n json.dumps(packet_dumpling_dict)) == packet_dumpling_dict", "def test_dict_to_dict(self):\n @converters.wrap\n def inner_test(param: dict):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})\n inner_test(param={'foo': 1, 'bar': ['bat', 2]})", "def check_dict(dic, validator, messages):\n check_dict_alg(dic, validator, [], messages, validator, \"NoObject\")", "def test_empty_dict_coerce():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test([(\"something\", \"is_true\")])", "def test_entities__Entity__tagged_values__1():\n e = Entity(u'Dummy', IDummy, 'Dummy', a=1, b='asdf')\n assert dict(a=1, b='asdf') == e.tagged_values", "def test_positive_time_period_dict_in_serializer() -> None:\n assert cv.custom_serializer(cv.positive_time_period_dict) == {\n \"type\": \"positive_time_period_dict\",\n }", "def testAttributeValues(self):\n ddict = {\n (\"\", \"bool\"): True,\n (\"\", \"int\"): 11,\n (\"\", \"float\"): 1.1,\n (\"\", \"str\"): \"a\",\n (\"\", \"boollist\"): [True, False, True],\n (\"\", \"intlist\"): [11, 22, 33],\n (\"\", \"floatlist\"): [1.1, 2.2, 3.3],\n (\"\", \"strlist\"): [\"a\", \"bb\", \"ccc\"],\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n dictdump.dicttoh5(ddict, h5file)\n for k, expected in ddict.items():\n result = h5file.attrs[k[1]]\n if isinstance(expected, list):\n if isinstance(expected[0], str):\n numpy.testing.assert_array_equal(result, expected)\n else:\n numpy.testing.assert_array_almost_equal(result, expected)\n else:\n self.assertEqual(result, expected)", "def assertDictStructure(self, expect: dict, actual: dict, path: list = []) -> None:\n self.assertEqual(expect.keys(), actual.keys(),\n msg=f\"Expected field keys are not same: {self.path_to_dict_path(path)}\")\n for key in actual:\n if isinstance(expect[key], dict):\n self.assertIsInstance(actual[key], dict,\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} to be type dict, \"\n f\"got type {type(actual[key])} instead\")\n self.assertDictStructure(expect[key], actual[key], path + [key])\n elif isinstance(expect[key], list):\n self.assertIsInstance(actual[key], list,\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} to be type list, \"\n f\"got type {type(actual[key])} instead\")\n\n if not expect[key]:\n self.assertFalse(actual[key], msg=f\"Expected empty list {self.path_to_dict_path(path+[key])},\"\n f\"received non empty list {actual[key]}\")\n else:\n self.assertTrue(actual[key], msg=f\"Expected list {self.path_to_dict_path(path+[key])},\"\n f\"received empty list {actual[key]}\")\n\n if expect[key] and isinstance(expect[key][0], dict):\n for i, entry in enumerate(actual[key]):\n self.assertDictStructure(expect[key][0], entry, path + [key, i])\n else:\n for i, entry in enumerate(actual[key]):\n self.assertIsInstance(entry, expect[key][0],\n msg=f\"Expected field {self.path_to_dict_path(path+[key, i])} \"\n f\"to be type {expect[key][0]}, got type {type(entry)} instead\")\n else:\n if type(expect[key]) == type:\n self.assertIsInstance(actual[key], expect[key],\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} \"\n f\"to be type {expect[key]}, got type {type(actual[key])} instead\")\n else:\n self.assertIn(type(actual[key]), expect[key].__args__,\n msg=f\"Expected field {self.path_to_dict_path(path+[key])} \"\n f\"to be type {expect[key]}, got type {type(actual[key])} instead\")", "def _check_keys(dict):\n for key in dict.keys():\n if isinstance(dict[key], sio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict", "def assertDictContainsSubset(self, expected, actual, msg=None):\r\n missing = []\r\n mismatched = []\r\n for key, value in expected.iteritems():\r\n if key not in actual:\r\n missing.append(key)\r\n elif value != actual[key]:\r\n mismatched.append('%s, expected: %s, actual: %s' %\r\n (safe_repr(key), safe_repr(value), \r\n safe_repr(actual[key])))\r\n\r\n if not (missing or mismatched):\r\n return\r\n\r\n standardMsg = ''\r\n if missing:\r\n standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in \r\n missing)\r\n if mismatched:\r\n if standardMsg:\r\n standardMsg += '; '\r\n standardMsg += 'Mismatched values: %s' % ','.join(mismatched)\r\n\r\n self.fail(self._formatMessage(msg, standardMsg))", "def test_hood_dict(self):\n hood = Hood({\"warning\": False, \"closed\": True})\n\n dictionary = hood.as_dict()\n assert isinstance(dictionary, dict)\n assert dictionary == {\"warning\": False, \"closed\": True}", "def test_dict(self, obj: dict) -> None:\r\n properties = read_properties(obj)\r\n for key, value in properties.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if read_type(value) == 'object':\r\n logger.debug('dict -> dict')\r\n self.test_dict(obj=value)\r\n elif read_type(value) == 'array':\r\n logger.debug('dict -> list')\r\n self.test_list(array=value)", "def test_empty_dict():\n\n @type_checked\n def _run_test(thing:{}):\n assert isinstance(thing, dict)\n\n _run_test({\"foo\": \"bar\"})", "def test_asdict():\n car = Car('Peugeot', '406', '2.0 HDI Saint Tropez Sedan', False, 2001, False, 11)\n car_dict = car._asdict()\n car_expected = {\n 'brand': 'Peugeot',\n 'model': '406',\n 'version': '2.0 HDI Saint Tropez Sedan',\n 'availability': False,\n 'year': 2001,\n 'brandNew': False,\n 'id': 11\n }\n assert car_dict == car_expected", "def testAttributeValues(self):\n ddict = {\n \"@bool\": True,\n \"@int\": 11,\n \"@float\": 1.1,\n \"@str\": \"a\",\n \"@boollist\": [True, False, True],\n \"@intlist\": [11, 22, 33],\n \"@floatlist\": [1.1, 2.2, 3.3],\n \"@strlist\": [\"a\", \"bb\", \"ccc\"],\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n dictdump.dicttonx(ddict, h5file)\n for k, expected in ddict.items():\n result = h5file.attrs[k[1:]]\n if isinstance(expected, list):\n if isinstance(expected[0], str):\n numpy.testing.assert_array_equal(result, expected)\n else:\n numpy.testing.assert_array_almost_equal(result, expected)\n else:\n self.assertEqual(result, expected)", "def testCasDict(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n self.assertEqual(casDict, self.msTest.getCASDict(),\n \"The dictionary inside the MachineSettings was not validly initialized\")", "def check_expected_values(self, expected_values, scraped_values):\n\n\t\tfor key in expected_values:\n\t\t\tself.assertIn(key, scraped_values)\n\t\t\tself.assertEqual(expected_values[key], scraped_values[key])", "def assertDictAlmostEqual(self, dict1, dict2):\n self.assertListEqual(dict1.keys(), dict2.keys())\n for i, j in zip(dict1.keys(), dict2.keys()):\n self.assertListAlmostEqual(list(dict1[i]), list(dict2[j]))", "def assertOrderedDictEqual(self, first, second):\n self.assertEqual(list(first.keys()), list(second.keys()))\n first_iter = first.items().__iter__()\n second_iter = second.items().__iter__()\n i = 0\n while True:\n try:\n first_k, first_v = next(first_iter)\n second_k, second_v = next(second_iter)\n with self.subTest(key=first_k, i=i):\n self.assertEqual(first_k, second_k)\n self.assertEqual(first_v, second_v)\n except StopIteration:\n break\n i += 1", "def test_creation_dict_from_std_dict(self):\n semaphore = Semaphore()\n lock = Lock()\n\n std_dict = {'a': 1, 'b': 2, 'c': 3}\n\n d = SwapDict(std_dict)\n self.assertTrue(str(sorted(d)) == str(sorted(std_dict)), \"Error creation SwapDict from dict, info: \\nSwapDict: %s\\n dict: %s\" %\n (str(d), str(std_dict)))\n del d", "def test_dict(test_data):\n\n # Stupidly trivial map\n gpmap.read_dict({\"wildtype\":\"0\",\n \"data\":{\"genotype\":[\"0\"]}})\n\n # Make sure wildtype check is working\n with pytest.raises(ValueError):\n gpmap.read_dict({\"data\":{\"genotype\":[\"0\"]}})\n\n # Make sure wildtype length/genotype length check working\n with pytest.raises(ValueError):\n gpmap.read_dict({\"wildtype\":\"01\",\"data\":{\"genotype\":[\"0\"]}})\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(wildtype=d[\"wildtype\"],\n genotype=d[\"genotype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write out as a dcitionary\n gpm_as_dict = gpm.to_dict()\n\n # Check wildtype meta data, mutations meta data\n assert gpm_as_dict[\"wildtype\"] == d[\"wildtype\"]\n for i in range(len(gpm_as_dict[\"mutations\"])):\n assert np.array_equal(gpm_as_dict[\"mutations\"][i],d[\"mutations\"][i])\n\n # This is a pandas data conversion. Don't check in detail, just make sure\n # the conversion dumped out a a dict.\n assert type(gpm_as_dict[\"data\"]) is dict\n\n # Read dictionary back in and make sure it's the same\n new_gpm = gpmap.read_dict(gpm_as_dict)\n conftest.compare_gpmap(gpm,new_gpm)", "def test_invalid_key_gen(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Ge1nder': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)", "def test_dict_keys_time_err(self):\n val = datetime.time(12, 15, 59, 111, tzinfo=pytz.timezone(\"Asia/Shanghai\"))\n with pytest.raises(orjson.JSONEncodeError):\n orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS)", "def test_jfpv1_empty_dict_as_value(self):\n\n obj_in_1 = {\"field1\": \"yes\"}\n fp_1 = create(input=json.dumps(obj_in_1), hash_function=hash_functions.SHA256, version=1)\n\n obj_in_2 = {\"field1\": \"yes\", \"field2\": {}}\n fp_2 = create(input=json.dumps(obj_in_2), hash_function=hash_functions.SHA256, version=1)\n\n self.assertNotEqual(fp_1, fp_2)", "def assertDictSupersetOf(self, expected_subset, actual_superset):\n if not isinstance(expected_subset, dict):\n self.fail(\"expected_subset (%s) is not an instance of dict\" %\n type(expected_subset))\n if not isinstance(actual_superset, dict):\n self.fail(\"actual_superset (%s) is not an instance of dict\" %\n type(actual_superset))\n for k, v in expected_subset.items():\n self.assertIn(k, actual_superset)\n self.assertEqual(v, actual_superset[k],\n \"Key %(key)s expected: %(exp)r, actual %(act)r\" %\n {'key': k, 'exp': v, 'act': actual_superset[k]})", "def test_hash(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n my_dict = { v1 : 'foo'}\n\n self.assertTrue(isinstance(my_dict, dict))", "def test_basedict2(self):\n tester = BaseModel()\n self.assertIn(\"id\", tester.to_dict())\n self.assertIn(\"created_at\", tester.to_dict())\n self.assertIn(\"updated_at\", tester.to_dict())", "def test_kwargs(self):\n def f(**kwargs):\n self.assertEqual(kwargs, {'spam': 'eggs'})\n\n kwargs = self.decode('\\n\\x0b\\x01\\tspam\\x06\\teggs\\x01')\n\n f(**kwargs)", "def test_invalid_distribution_info_keys(self):\n\n invalid_distrib_info_keys = {\"bad_key\": \"\", \"badder_key\": True, \"worker_cost\": False}\n self.ocp_data[\"distribution_info\"] = invalid_distrib_info_keys\n self.assertEqual(self.ocp_data[\"distribution_info\"], invalid_distrib_info_keys)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)", "def validateTagValue(obj, attribute, value):\n if isinstance(value, dict):\n if sorted(value.iterkeys()) != BINARY_VALUE_KEYS:\n raise ValueError(\"Can't store invalid binary value: %r\" % value)\n return value", "def test_attr_dict(self):\n obj = awstats_reader.AttrDict([('this','that'), ('thus','those')])\n self.assertEqual(obj.thus, 'those')", "def test_from_empty_dict(self):\n from sosbeacon.event.message import Message\n\n self.assertRaisesRegexp(\n Exception, 'key is required', Message.from_dict, {})", "def test_invert_dict(self):\r\n self.assertEqual(invert_dict({}), {})\r\n self.assertEqual(invert_dict({'3':4}), {4:['3']})\r\n self.assertEqual(invert_dict(\\\r\n {'a':'x','b':1,'c':None,'d':('a','b')}), \\\r\n {'x':['a'],1:['b'],None:['c'],('a','b'):['d']})\r\n self.assertRaises(TypeError, invert_dict, {'a':['a','b','c']})\r\n d = invert_dict({'a':3, 'b':3, 'c':3, 'd':'3', 'e':'3'})\r\n self.assertEqual(len(d), 2)\r\n assert 3 in d\r\n d3_items = d[3][:]\r\n self.assertEqual(len(d3_items), 3)\r\n d3_items.sort()\r\n self.assertEqual(''.join(d3_items), 'abc')\r\n assert '3' in d\r\n d3_items = d['3'][:]\r\n self.assertEqual(len(d3_items), 2)\r\n d3_items.sort()\r\n self.assertEqual(''.join(d3_items), 'de')", "def test_dict_value(self):\n time_format = \"%Y-%m-%dT%H:%M:%S.%f\"\n insta = Amenity()\n dict_con = insta.to_dict()\n self.assertEqual(dict_con[\"__class__\"], \"Amenity\")\n self.assertEqual(type(dict_con[\"created_at\"]), str)\n self.assertEqual(type(dict_con[\"updated_at\"]), str)\n self.assertEqual(\n dict_con[\"created_at\"],\n insta.created_at.strftime(time_format)\n )\n self.assertEqual(\n dict_con[\"updated_at\"],\n insta.updated_at.strftime(time_format))", "def test_basedict(self):\n tester = BaseModel()\n self.assertTrue(dict, type(tester.to_dict()))", "def test_check_xyz_dict(self):\n xyz1 = converter.check_xyz_dict(self.xyz1['str'])\n self.assertEqual(xyz1, self.xyz1['dict'])\n\n xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n xyz2 = converter.check_xyz_dict(xyz2)\n expected_xyz2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n self.assertEqual(xyz2, expected_xyz2)\n\n xyz3 = 3.0\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz3)\n\n xyz4 = {'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz4)\n\n xyz5 = {'symbols': ('C', 'H', 'H', 'H', 'H', 'S', 'S', 'S'),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n with self.assertRaises(ConverterError):\n converter.check_xyz_dict(xyz5)\n\n # test a zmat input\n zmat6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'coords': ((None, None, None), ('R_1_0', None, None), ('R_2_1', 'A_2_1_0', None),\n ('R_3_2', 'A_3_2_0', 'D_3_2_0_1')),\n 'vars': {'R_1_0': 1.2451214479859707, 'R_2_1': 1.8953164901754294, 'A_2_1_0': 30.18165946689929,\n 'R_3_2': 2.785552137148173, 'A_3_2_0': 24.405141545817347,\n 'D_3_2_0_1': 3.6222548091772e-06}, 'map': {0: 0, 1: 1, 2: 2, 3: 3}}\n xyz6 = converter.check_xyz_dict(zmat6)\n expected_xyz6 = {'symbols': ('N', 'N', 'H', 'H'),\n 'isotopes': (14, 14, 1, 1),\n 'coords': ((-2.4426534384901547e-09, -4.375090750708016e-09, -0.622560729110669),\n (-2.4426534384901547e-09, -4.375090750708016e-09, 0.6225607188753017),\n (-2.4426534384901547e-09, 0.9528575945413793, -1.015818661524137),\n (7.032081834243086e-08, -0.9528574729632926, 1.015818803737915))}\n\n self.assertEqual(xyz6, expected_xyz6)", "def test_dict_values(self):\n t_format = \"%Y-%m-%dT%H:%M:%S.%f\"\n c = City()\n dic = c.to_dict()\n self.assertEqual(dic[\"__class__\"], \"City\")\n self.assertEqual(type(dic[\"created_at\"]), str)\n self.assertEqual(type(dic[\"updated_at\"]), str)\n self.assertEqual(dic[\"created_at\"], c.created_at.strftime(t_format))\n self.assertEqual(dic[\"updated_at\"], c.updated_at.strftime(t_format))", "def test_update(inp):\n atty = AttyDict(a={'aa': 1, 'ab': 2})\n regular = dict(a={'aa': 1, 'ab': 2})\n\n atty.update(**inp)\n assert valid_values(atty)\n\n regular.update(**inp)\n assert dict(atty) == regular", "def test_create_mimic_dict_2(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertIn(\n '', result,\n \"Mimic dict should have one key entry for empty string '' \"\n )", "def test_01_is_equal_true(self):\n\n dict1 = {\"a\": \"1\", \"b\": \"2\"}\n dict2 = {\"a\": \"1\", \"b\": \"2\"}\n items_equal = utils.is_equal(dict1, dict2)\n self.assertTrue(items_equal)", "def test_obj_dict(self):\n obj = storage.all()\n self.assertIsInstance(obj, dict)", "def test_encode_decode(self):\n assert self._test == pybinn.loads(pybinn.dumps(self._test))", "def test_addEntryByDict(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry({'term': 'foo', 'tags': 'a', 'value': '1'})\n self.assertTrue(b)", "def test_create_mimic_dict_1(self):\n result = self.module.create_mimic_dict(\"imdev.txt\")\n self.assertIsInstance(\n result, dict,\n \"The return value of create_mimic_dict() should be a dict.\"\n )", "def verify_json(output, expected_keys):\n deser = json.loads(output)\n assert deser\n for expected_key in expected_keys:\n assert expected_key in deser", "def test_to_json(self):\n\n expected = \"\"\"{\n \"Hello\": \"world\",\n \"Py\": \"Funceble\",\n \"World\": {\n \"world\": \"hello\"\n },\n \"funilrys\": [\n \"Fun\",\n \"Ilrys\"\n ],\n \"pyfunceble\": [\n \"funilrys\"\n ]\n}\"\"\"\n actual = Dict(self.test_subject.copy()).to_json()\n\n self.assertEqual(expected, actual)\n\n actual = Dict().from_json(expected)\n expected = self.test_subject.copy()\n\n self.assertEqual(expected, actual)", "def test_dict(self, dictionary: dict) -> None:\r\n if not isinstance(dictionary, dict):\r\n raise ValueError(f'Expected dictionary, but received {type(dictionary)}')\r\n for key, value in dictionary.items():\r\n conditional_check(key, self.case_check, self.ignored_keys)\r\n if isinstance(value, dict):\r\n self.test_dict(dictionary=value)\r\n elif isinstance(value, list):\r\n self.test_list(items=value)", "def test_comparing(self):\n for test in self.test_dict_data:\n self.assertEqual(dottedDict(test[0]), test[1])", "def test_dict(self):\n s1 = Square(4)\n s1_dict = s1.to_dictionary()\n s1_correct = {\"id\":1, \"size\":4, \"x\":0, \"y\":0}\n self.assertEqual(s1_dict, s1_correct)\n\n s2 = Square(9)\n s2_new = {\"id\":9, \"size\":4, \"x\":3, \"y\":4}\n s2.update(**s2_new)\n self.assertEqual(s2.to_dictionary(), s2_new)", "def test_serialize(value, expected):\n assert json_dumps(value) == expected", "def assertKeys(self, data, expected):\r\n self.assertEqual(sorted(data.keys()), sorted(expected))", "def test_values(self):\n\n self.assertDictEqual(Square.to_dictionary(self.s1),\n {'size': 10, 'id': 9, 'y': 0, 'x': 0})\n self.assertEqual(self.s1.id, 9)\n self.assertEqual(self.s1.width, 10)\n self.assertEqual(self.s1.height, 10)\n self.assertEqual(self.s1.x, 0)\n self.assertEqual(self.s1.y, 0)\n self.assertDictEqual(Square.to_dictionary(self.s2),\n {'y': 0, 'id': 10, 'size': 2, 'x': 0})\n self.assertEqual(Square.__str__(self.s2),\n \"[Square] (10) 0/0 - 2\")\n self.assertEqual(self.s2.id, 10)\n self.assertEqual(self.s2.width, 2)\n self.assertEqual(self.s2.height, 2)\n self.assertEqual(self.s2.x, 0)\n self.assertEqual(self.s2.y, 0)\n self.assertDictEqual(Square.to_dictionary(self.s3),\n {'y': 0, 'id': 11, 'size': 10, 'x': 5})\n self.assertEqual(Square.__str__(self.s3),\n \"[Square] (11) 5/0 - 10\")\n self.assertEqual(self.s3.id, 11)\n self.assertEqual(self.s3.width, 10)\n self.assertEqual(self.s3.height, 10)\n self.assertEqual(self.s3.x, 5)\n self.assertEqual(self.s3.y, 0)\n self.assertDictEqual(Square.to_dictionary(self.s4),\n {'y': 4, 'id': 12, 'size': 6, 'x': 2})\n self.assertEqual(Square.__str__(self.s4),\n \"[Square] (12) 2/4 - 6\")\n self.assertEqual(self.s4.id, 12)\n self.assertEqual(self.s4.width, 6)\n self.assertEqual(self.s4.height, 6)\n self.assertEqual(self.s4.x, 2)\n self.assertEqual(self.s4.y, 4)\n self.assertDictEqual(Square.to_dictionary(self.s5),\n {'y': 5, 'id': 20, 'size': 2, 'x': 3})\n self.assertEqual(Square.__str__(self.s5),\n \"[Square] (20) 3/5 - 2\")\n self.assertEqual(self.s5.id, 20)\n self.assertEqual(self.s5.width, 2)\n self.assertEqual(self.s5.height, 2)\n self.assertEqual(self.s5.x, 3)\n self.assertEqual(self.s5.y, 5)\n # self.assertEqual(self.s1._Base__nb_objects, 6)", "def inner_test(param: dict):\n self.assertEqual(param, {'foo': 1, 'bar': ['bat', 2]})", "def _validate_input_dict(self, input):\n if isinstance(input, dict):\n required = {\"type\", \"value\"}\n not_found = required - set(input.keys())\n if not_found:\n raise SpecificationError(\n \"Required key(s) not found in input dictionary: {}\".format(\n \", \".join(not_found)\n )\n )\n else:\n raise Exception(\"input element has to be a dictionary\")", "def test_key_str(self):\n key = Key({\"warning\": False, \"inCar\": True})\n\n string = str(key)\n assert isinstance(string, str)\n assert string == \"{'warning': False, 'in_car': True}\"", "def test_deep_set_create(self):\n mdict = copy.deepcopy(self.dict1)\n res = dictupdate.set_dict_key_value(mdict, \"K:L:M\", \"Q\")\n self.assertEqual(\n {\n \"A\": \"B\",\n \"C\": {\"D\": \"E\", \"F\": {\"G\": \"H\", \"I\": \"J\"}},\n \"K\": {\"L\": {\"M\": \"Q\"}},\n },\n res,\n )", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def _value_assert(current_key, actual_value, expected_value):\n if actual_value is None:\n return\n if isinstance(actual_value, list) and isinstance(expected_value, list):\n _list_assert(actual_value, expected_value)\n elif isinstance(actual_value, dict) and isinstance(expected_value, dict):\n _dict_assert(actual_value, expected_value)\n else:\n assert actual_value == expected_value, \"key: {}\".format(current_key)", "def test_dictionary_values(self) -> None:\n items = {\"a\": 1, \"b\": 2, \"c\": 3}\n result = flatten(items.values())\n self.assertEqual(sorted(result), [1, 2, 3])", "def test_from_dict(self):\n cd = ConfigDict.from_dict({\n 'x': 1,\n 'y': {\n 'z': 2,\n 'w': [1,2, {'v': 22}]\n }\n })\n\n self.assertEquals(cd.x, 1)\n self.assertEquals(cd['x'], 1)\n self.assertEquals(cd.y.z, 2)\n self.assertEquals(cd['y']['z'], 2)\n self.assertEquals(cd.y.w[2].v, 22)\n self.assertEquals(cd['y']['w'][2]['v'], 22)", "def test_issue588(self):\n c = ConfigDict()\n c.load_dict({'a': {'b': 'c'}}, make_namespaces=True)\n self.assertEqual('c', c['a.b'])\n self.assertEqual('c', c['a']['b'])\n self.assertEqual({'b': 'c'}, c['a'])", "def test_validate_non_included_keys():\n field = PartialDictField(included_keys=['a'], value_field=CharField(max_length=5),\n required=False)\n data = {'b': '123456'}\n try:\n field.run_validators(data)\n except ValidationError:\n assert False, 'Got a ValidationError for a non-included key'", "def test_dict_serialization(self, molecule):\n serialized = molecule.to_dict()\n molecule_copy = Molecule.from_dict(serialized)\n assert molecule == molecule_copy", "def assertContainsDict(self, dictionary, data):\n for key in dictionary:\n self.assertTrue(key in data, msg=\"Data doesn't have key '{}'\".format(key))\n value = dictionary[key]\n value2 = data[key]\n self.assertEqual(value, value2,\n msg=\"key={}, value={} != target={}\".format(key, value, value2))", "def test_dict_keys_strict(self):\n assert (\n orjson.dumps(\n {9223372036854775807: True},\n option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER,\n )\n == b'{\"9223372036854775807\":true}'\n )", "def test_dict_keys_substr_passthrough(self):\n assert (\n orjson.dumps(\n {SubStr(\"aaa\"): True},\n option=orjson.OPT_NON_STR_KEYS | orjson.OPT_PASSTHROUGH_SUBCLASS,\n )\n == b'{\"aaa\":true}'\n )", "def test_note_asdict(fake_note_with_video_attachment):\n\n note_id_value: str = str(uuid.uuid4())\n a_note = Note.from_dict(note_id_value, fake_note_with_video_attachment)\n assert \"msdyn_workorder\" == a_note.asdict()['object_type']\n assert a_note.work_order_id == a_note.asdict()['work_order_id']\n assert a_note.filename == a_note.asdict()['filename']", "def test_set_dict_value_1(self):\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"A\")", "def test_deep_set_ordered_dicts(self):\n res = dictupdate.set_dict_key_value({}, \"A:B\", \"foo\", ordered_dict=True)\n self.assertEqual({\"A\": OrderedDict([(\"B\", \"foo\")])}, res)", "def test_invalid_values(self):\n self.assertEqual(dictsort([1, 2, 3], \"age\"), \"\")\n self.assertEqual(dictsort(\"Hello!\", \"age\"), \"\")\n self.assertEqual(dictsort({\"a\": 1}, \"age\"), \"\")\n self.assertEqual(dictsort(1, \"age\"), \"\")" ]
[ "0.7359073", "0.68950987", "0.6765567", "0.6608221", "0.6606146", "0.6555588", "0.64671695", "0.6464478", "0.64596117", "0.6452391", "0.64428294", "0.64428294", "0.64036715", "0.63711655", "0.63386524", "0.63026303", "0.629081", "0.6284405", "0.6279023", "0.6257576", "0.62568307", "0.62556154", "0.6252925", "0.62472653", "0.62393945", "0.6220476", "0.61929256", "0.6178212", "0.6168924", "0.61514676", "0.6148442", "0.61454254", "0.6132765", "0.612926", "0.61185026", "0.61099654", "0.6100638", "0.6073397", "0.60688454", "0.60685855", "0.60598356", "0.60289544", "0.6025494", "0.6023081", "0.6007719", "0.5978836", "0.59725237", "0.59686786", "0.59591526", "0.59556633", "0.595413", "0.5946842", "0.59449625", "0.594", "0.59378624", "0.5933076", "0.5924174", "0.592357", "0.5912848", "0.5912045", "0.59106356", "0.59103656", "0.5909303", "0.59054744", "0.5899673", "0.5897942", "0.58828664", "0.5878884", "0.5877979", "0.5871074", "0.58588046", "0.58493257", "0.5843494", "0.5840536", "0.58237565", "0.5817267", "0.5817096", "0.5815918", "0.58101755", "0.58056957", "0.57932246", "0.5791796", "0.5786638", "0.57859516", "0.5785044", "0.578499", "0.5783363", "0.57817364", "0.57596713", "0.5750166", "0.574899", "0.5744778", "0.57438993", "0.57407844", "0.57344395", "0.5725299", "0.57238036", "0.572375", "0.57168007", "0.57158655" ]
0.656517
5
Extract metadata like original image name and crop position from the given file name. Change this function to use a different file name pattern.
def get_metadata_from_filename(file_name: str) -> namedtuple: if os.path.isabs(f): file_name = os.path.basename(file_name) original_image_name = file_name.split('-')[0] x_pos = int(file_name.split('.')[-2].split('+')[-2:][0]) Metadata = namedtuple('Metadata', ['original_image_name', 'x_pos']) return Metadata(original_image_name, x_pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images\n regexDiff = '(_Diff(\\d+))?'\n # regex to match optional crop information e.g., Axis-Cowles_2019-02-19T16;23;49_Crop_270x521x569x820.jpg\n regexOptionalCrop = '(_Crop_(-?\\d+)x(-?\\d+)x(\\d+)x(\\d+))?'\n matchesExp = re.findall(regexExpanded + regexDiff + regexOptionalCrop, fileName)\n # regex to match names like 1499546263.jpg\n regexUnixTime = '(1\\d{9})'\n matchesUnix = re.findall(regexUnixTime + regexDiff + regexOptionalCrop, fileName)\n cropInfo = None\n if len(matchesExp) == 1:\n match = matchesExp[0]\n parsed = {\n 'cameraID': match[0],\n 'date': match[1],\n 'hours': match[2],\n 'minutes': match[3],\n 'seconds': match[4]\n }\n isoStr = '{date}T{hour}:{min}:{sec}'.format(date=parsed['date'],hour=parsed['hours'],min=parsed['minutes'],sec=parsed['seconds'])\n dt = dateutil.parser.parse(isoStr)\n unixTime = int(dt.timestamp())\n parsed['diffMinutes'] = int(match[6] or 0)\n cropInfo = match[-4:]\n elif len(matchesUnix) == 1:\n match = matchesUnix[0]\n unixTime = int(match[0])\n dt = datetime.datetime.fromtimestamp(unixTime)\n isoStr = datetime.datetime.fromtimestamp(unixTime).isoformat()\n parsed = {\n 'cameraID': 'UNKNOWN_' + fileName,\n 'date': dt.date().isoformat(),\n 'hours': str(dt.hour),\n 'minutes': str(dt.minute),\n 'seconds': str(dt.second)\n }\n parsed['diffMinutes'] = int(match[2] or 0)\n cropInfo = match[-4:]\n else:\n logging.error('Failed to parse name %s', fileName)\n return None\n if cropInfo[0]:\n parsed['minX'] = int(cropInfo[0])\n parsed['minY'] = int(cropInfo[1])\n parsed['maxX'] = int(cropInfo[2])\n parsed['maxY'] = int(cropInfo[3])\n parsed['isoStr'] = isoStr\n parsed['unixTime'] = int(unixTime)\n return parsed", "def repackFileName(parsedName):\n cropCoords = None\n if 'minX' in parsedName:\n cropCoords=(parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])\n return getImgPath('', parsedName['cameraID'], parsedName['unixTime'],\n cropCoords=cropCoords,\n diffMinutes=parsedName['diffMinutes'])", "def parse_crop_details(fn, crop_name, crop_parent):\n if crop_name is None:\n if fn is None:\n raise ValueError(\"Either `fn` or `crop_name` must be give.\")\n crop_name = _get_fn_name(fn)\n\n crop_parent = crop_parent if crop_parent is not None else os.getcwd()\n crop_location = os.path.join(crop_parent, \".xyz-{}\".format(crop_name))\n\n return crop_location, crop_name, crop_parent", "def extract_metadata(name):\n seps = name.count(\" - \")\n artist = title = None\n\n if seps == 1:\n\n pos = name.find(\" - \")\n artist = name[:pos].strip()\n title = name[pos + 3:].strip()\n\n else:\n title = name.strip()\n\n return dict(artist=artist, title=title)", "def file_info(file_name, file_pattern):\n match = re.compile(file_pattern).match(file_name)\n if match:\n basepath = match.group('basepath')\n sensor = match.group('sensor')\n ax = match.group('ax')\n freq = match.group('freq')\n date = match.group('date')\n return basepath, sensor, ax, freq, date\n else:\n return None # there is no file extension to file_name", "def getImageInformation(file_path):\n if os.path.isdir(file_path) == False:\n file_dir = os.path.basename(file_path)\n file_name = os.path.splitext(file_dir)[0]\n file_format = os.path.splitext(file_path)[1]\n return file_name, file_format", "def extract_file_name(self, input_file):\n self.file_name_with_ext, self.file_name = extract_file_name(input_file)", "def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }", "def parse_image_filename(filename):\n\n # regexes\n starts_with_six_digits = re.compile(r'^\\d{6}')\n capital_letter = re.compile(r'([A-Z]{1})')\n plus = re.compile(r'\\+')\n\n # split the filename and extention\n filename, extension = os.path.splitext(filename)\n try:\n style_number, color, description = filename.split('_')\n except Exception as e:\n print(e)\n print(filename, extension)\n\n style_number = int(style_number)\n\n # decode the color\n # intCaps -> int/caps\n color = capital_letter.sub(r'/\\1', color).lower()\n # plus+to+space -> plus to space\n color = plus.sub(r' ', color)\n\n # decode the description\n description = plus.sub(r' ', description)\n\n return style_number, color, description", "def _parse_h36m_imgname(imgname) -> Tuple[str, str, str]:\n subj, rest = osp.basename(imgname).split('_', 1)\n action, rest = rest.split('.', 1)\n camera, rest = rest.split('_', 1)\n return subj, action, camera", "def test_get_image_name(self):\n ssp = self._get_ssp_stor()\n\n def verify_image_name(name, checksum, expected):\n img_meta = image_meta.ImageMeta(name=name, checksum=checksum)\n self.assertEqual(expected, ssp._get_image_name(img_meta))\n self.assertTrue(len(expected) <= const.MaxLen.FILENAME_DEFAULT)\n\n verify_image_name('foo', 'bar', 'image_foo_bar')\n # Ensure a really long name gets truncated properly. Note also '-'\n # chars are sanitized.\n verify_image_name(\n 'Template_zw82enbix_PowerVM-CI-18y2385y9123785192364',\n 'b518a8ba2b152b5607aceb5703fac072',\n 'image_Template_zw82enbix_PowerVM_CI_18y2385y91'\n '_b518a8ba2b152b5607aceb5703fac072')", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata", "def _get_file_info(filename):\n filename = os.path.split(filename)[-1]\n filename = filename[:str.rfind(filename, '.jsonl.gz')]\n _, mode, idx = filename.split('_')\n return mode, idx", "def parse_file_name(file_name):\n\n elements = file_name.split(\"_\")\n if file_name.find(\"_VI_\") > 0:\n client = elements[0]\n capture_range = \"R1\"\n condition = elements[2]\n polarization = \"VIS\"\n shot = elements[4]\n modality = \"VIS\"\n else:\n client = elements[0]\n capture_range = elements[1]\n condition = elements[2]\n polarization = elements[3]\n shot = elements[4]\n modality = \"THERMAL\"\n \n return client, capture_range, condition, polarization, shot, modality", "def extract_date_metadata(fname):\n\n try:\n # check if file has creation date, exception if not\n date_metadata = fileops.get_video_creation_date_metadata(fname)\n\n # extract the date/time string from metadata, exception if\n # not the proper format\n datetimestr = metadata_to_datetimestr(date_metadata)\n\n logging.debug(\"Found creation date metadata %r for file %r\",\n datetimestr, os.path.basename(fname))\n\n return datetimestr\n\n except fileops.VideoMetadataError:\n logging.warning(\n \"%r does not have a proper creation date metadata\",\n os.path.basename(fname))\n\n return \"\"\n\n except DateStrError:\n logging.warning(\n \"%r creation data metadata not the right format\",\n os.path.basename(fname))\n \n return \"\"", "def _get_python_info_rename(path: str) -> str:\n if path.name.endswith(\".egg-info\"):\n f = \"PKG-INFO\"\n else:\n # Assume dist-info. Are there other options?\n f = \"METADATA\"\n pkgmetainfodata = path / f\n with pkgmetainfodata.open() as f:\n for line in f:\n match = re.match(r'^Name: ([A-Z-a-z].+)', line)\n if match:\n name = match.group(1)\n break\n if not line.strip():\n # First blank line; gone too far; give up\n return\n else:\n return\n return name + path.suffix", "def extract_filename(str):\n regex = r\"([0-9_-]+).jpg\"\n matches = re.search(regex, str)\n if matches:\n return matches.group(1)", "def get_preset_metadata(self, filename):\r\n\r\n raise NotImplementedError", "def get_data_from_name(image_name):\n nome = image_name.split(\".\")[0]\n nome_recebido = list(nome)\n ano = ''.join(nome_recebido[:4])\n mes = ''.join(nome_recebido[4:6])\n dia = ''.join(nome_recebido[6:8])\n hora = ''.join(nome_recebido[8:10])\n minuto = ''.join(nome_recebido[10:12])\n segundo = ''.join(nome_recebido[12:14])\n codigo = ''.join(nome_recebido[14:24])\n certeza = ''.join(nome_recebido[24:27])\n placa = ''.join(nome_recebido[27:34])\n posicao = ''.join(nome_recebido[34])\n classificao = ''.join(nome_recebido[35:37])\n velocidade = ''.join(nome_recebido[37:40])\n comprimento = ''.join(nome_recebido[40:43])\n sequencial = ''.join(nome_recebido[43:])\n\n return [ano, mes, dia, hora, minuto, segundo, codigo, certeza, placa, posicao, classificao, velocidade, comprimento,\n sequencial]", "def extract_description(path):\n return os.path.splitext(os.path.basename(path))[0]", "def test_get_original_file_name_match_regex(self):\n test_file_name = \"uploaded_file_name_%s_abcd123\" % settings.FILE_DUPLICATION_MARKER\n expected_file_name = \"uploaded_file_name\"\n cfs = CustomFileStorage()\n self.assertEqual(cfs.get_original_file_name(test_file_name), expected_file_name)", "def LoadMetadata(filename):\r\n## print filename\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.zvi'))\r\n if globbed:\r\n return LoadZVIMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.xml'))\r\n if globbed:\r\n return LoadAxioVisionXMLMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'metadata.txt'))\r\n if globbed:\r\n return LoadMMMetaData(globbed[0])\r\n return None\r\n #no further valid options, crash horribly\r", "def read_photo_date(file_name):\n # Open image file for reading (binary mode)\n fd = open(file_name, 'rb')\n\n # Return Exif tags\n tags = exifread.process_file(fd)\n try:\n date_time = tags['EXIF DateTimeOriginal']\n except KeyError:\n date_time = get_timestamp_from_mp4(os.path.basename(file_name))\n if date_time == \"\":\n # date time info is not valid in exif, try to get file's create time\n date_time = get_file_modification_time(file_name)\n \n\n log(str(date_time) + \"--->\" + str(file_name))\n\n #parse date time string and returns tuple\n words = str(date_time).split(' ')[0].split(':') #2013:11:16 17:44:16\n if len(words) == 3:\n y = words[0]\n m = words[1]\n d = words[2]\n else:\n words = str(date_time).split(' ')[0].split('-') # 2015-01-08 16:05:13\n y = words[0]\n m = words[1]\n d = words[2]\n\n #returns a tuple\n return y, m, d", "def extractParticular(link):\n webpage = openWebsite(link).read()\n nameIndexStart = webpage.index('<title>') + 7\n nameIndexStop = webpage[nameIndexStart:].index('</title>') + nameIndexStart - 1\n name = webpage[nameIndexStart : nameIndexStop].split('-')[0]\n name = \" \".join(name.split())\n name = re.sub('/', '', name)\n\n avatarName = RESTAURANTPATH + '{}.png'.format(\"\".join(name.split()).lower())\n captureImage(link, avatarName)\n\n return name, avatarName", "def extract_metadata_videoname(basename):\n # basename could be a path to a bb video file or just the basename.\n # TODO(gitmirgut): Check if in data from 2015 is in the same string format.\n fn_wo_ext = os.path.splitext(os.path.basename(basename))[0]\n id_str, interval_str = fn_wo_ext.split('_')[1:]\n start_str, end_str = interval_str.split('--')\n id_int = int(id_str)\n start_ts = iso8601.parse_date(start_str)\n end_ts = iso8601.parse_date(end_str)\n series = pd.Series([id_int, start_ts, end_ts],\n index=['cam_id', 'start_ts', 'end_ts'])\n return series", "def reFileName(str_):\n rv = 'None', str_\n m = re.match(r'((?:[a-zA-Z0-9-]){4,})_(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n else:\n m = re.match(r'(\\d+-\\d+)\\.-\\.(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n return rv", "def _extract_metadata(self) -> None:\n self.log(\"Extracting metadata.\")\n image_paths: list[Path] = []\n for ext in (\"jpg\", \"jpeg\", \"png\"):\n image_paths.extend(self._base_dir.glob(f\"**/*.{ext}\"))\n image_paths_str = [str(image.relative_to(self._base_dir)) for image in image_paths]\n filepaths = pd.Series(image_paths_str)\n metadata = cast(\n pd.DataFrame,\n filepaths.str.split(\"/\", expand=True).rename( # type: ignore[attr-defined]\n columns={0: \"superclass\", 1: \"concept\", 2: \"context\", 3: \"filename\"}\n ),\n )\n metadata[\"filepath\"] = filepaths\n metadata.sort_index(axis=1, inplace=True)\n metadata.sort_values(by=[\"filepath\"], axis=0, inplace=True)\n metadata = self._label_encode_metadata(metadata)\n metadata.to_csv(self._metadata_path)", "def test_jpeg_exif(h, f):\n if h[6:10].lower() == 'exif':\n return 'jpeg'", "def img_in(filename):\n temp_img = Image.open(filename)\n img = np.array(temp_img)\n name = filename.split('.')[-2]\n return name, img", "def extract_filename(self, filename: str, lang: str) -> 'typing.Dict[str, str]':\n meta = {}\n meta['date'] = self._getNikolaTime(os.path.getctime(filename))\n w_title = os.path.basename(filename).replace(\"/\", \"_\", 100).rstrip('.org')\n w_title = w_title.replace(\" \", \"_\", 100)\n meta['w_title'] = w_title\n\n if 'test' in filename:\n meta['write'] = True\n\n split = filename.split(\"/\") \n if len(split) > 2:\n cate = split[1]\n cate = self._lookup_cate_table(cate)\n meta['category'] = cate\n\n self._manually_write_meta(filename, meta)\n return meta", "def image_name(name):\n \n # Gets the '.' position\n dot = name.find('.')\n # Slice the name from beginning and before '.'\n img = name[:dot]\n # return string with jpg format\n return \"{}.jpg\".format(img)", "def parse_filename(cls, filename):\n #from nose.tools import set_trace; set_trace()\n m = re.match(cls._pattern, os.path.basename(filename))\n basename = m.group(1)\n bandname = cls._bandmap.get(m.group(2), m.group(2))\n return basename, bandname", "def extract_metadata(rawfile,codeversions={}):\r\n import datetime\r\n add_standard_metadata(rawfile)\r\n # get monochromator-related information\r\n mom = average_metadata(rawfile['$entry/instrument/crystal/omega'])\r\n tk_angle = average_metadata(rawfile['$entry/instrument/crystal/takeoff_angle'])\r\n # get the date\r\n date_form = datetime.datetime.strptime(str(rawfile['$entry/start_time']),\"%Y-%m-%d %H:%M:%S\")\r\n mono_change = datetime.datetime(2009,04,01)\r\n if date_form < mono_change:\r\n monotype = \"115\"\r\n else:\r\n monotype = \"335\"\r\n hklval = pick_hkl(mom - tk_angle/2.0,monotype)\r\n if len(hklval)==3: # i.e. h,k,l found\r\n rawfile.add_metadata(\"_pd_instr_monochr_pre_spec\",\r\n hklval + \" reflection from Ge crystal, \"+monotype+\" cut\",tag=\"CIF\")\r\n wavelength = calc_wavelength(hklval,tk_angle)\r\n rawfile.add_metadata(\"_diffrn_radiation_wavelength\",\"%.3f\" % wavelength,tag=\"CIF\")\r\n rawfile.add_metadata(\"_[local]_diffrn_radiation_wavelength_determination\",\r\n \"Wavelength is calculated from monochromator hkl and takeoff angle and is therefore approximate\",\r\n tag=\"CIF\")\r\n # The following is changed later if the primary collimator is found to be inserted\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"%.3f\" % (0.099*2.0*wavelength),tag=\"CIF\")\r\n # Do some logic to obtain collimator positions\r\n pcr = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_rotation\"])\r\n pcx = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_translation\"])\r\n if pcx > 120:\r\n if abs(pcr-360.0)<5 or abs(pcr) < 5: # 5' collimator\r\n coll_string = \"A 5' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.0833\",tag=\"CIF\")\r\n else:\r\n coll_string = \"A 10' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.1667\",tag=\"CIF\")\r\n else: coll_string = \"No primary monochromator \"\r\n try:\r\n scr = average_metadata(rawfile['$entry/sample/secondary_collimator'])\r\n if scr>0.5:\r\n coll_string += \" and a 10' secondary collimator post-monochromator.\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_mono/spec\",\"0.1667\",tag=\"CIF\")\r\n else:\r\n coll_string += \" and no secondary collimator.\"\r\n rawfile.add_metadata(\"_diffrn_radiation_collimation\",coll_string,tag=\"CIF\")\r\n except AttributeError: #some early files are missing secondary collimator\r\n pass\r\n # These values were in the CIF writing area of the Java routines, best put here\r\n try:\r\n program_release = str(rawfile[\"$entry/program_revision\"])\r\n except AttributeError:\r\n program_release = str(rawfile[\"$entry/sics_release\"])\r\n rawfile.add_metadata(\"_computing_data_collection\",str(rawfile[\"$entry/program_name\"]) + \" \" + \\\r\n program_release,\"CIF\")\r\n # List the code versions used for data reduction\r\n codelist = \"\"\r\n for key in codeversions.keys():\r\n codelist += \"%-20s: %s\\n\" % (key,codeversions[key])\r\n rawfile.add_metadata(\"_computing_data_reduction\", str(\"Gumtree Echidna/Python routines, Git versions:\\n\" + codelist),\"CIF\")\r\n rawfile.add_metadata(\"_pd_spec_special_details\",sanitize(str(rawfile[\"$entry/sample/name\"])),\"CIF\")\r\n rawfile.add_metadata(\"_[local]_data_collection_description\",str(rawfile[\"$entry/sample/description\"]),\"CIF\")\r\n start_time = str(rawfile[\"$entry/start_time\"]).replace(\" \",\"T\")\r\n end_time = str(rawfile[\"$entry/end_time\"]).replace(\" \",\"T\")\r\n rawfile.add_metadata(\"_pd_meas_datetime_initiated\", start_time,\"CIF\")\r\n rawfile.add_metadata(\"_[local]_datetime_completed\", end_time,\"CIF\")\r\n try:\r\n username = str(rawfile[\"user_name\"])\r\n except:\r\n username = \"?\"\r\n rawfile.add_metadata(\"_pd_meas_info_author_name\", sanitize(username),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_email\", str(rawfile[ \"$entry/user/email\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_phone\", str(rawfile[ \"$entry/user/phone\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_2theta_monochr_pre\",\"%.3f\" % tk_angle,\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_mono/spec\", \"%.1f\" % average_metadata(rawfile[ \"$entry/sample/mono_sample_mm\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_spec/detc\",\"%.1f\" % average_metadata(rawfile[\"$entry/instrument/detector/radius\"]),\"CIF\")\r\n try:\r\n rawfile.add_metadata(\"_diffrn_source_power\", \"%.2f\" % (average_metadata(rawfile[\"$entry/instrument/source/power\"])*1000),\"CIF\")\r\n except AttributeError: #sometimes source power is missing\r\n pass\r\n # imgCIF information about geometry\r\n # axis loop\r\n names = (('_axis.id','_axis.type','_axis.equipment','_axis.depends_on'),)\r\n values = [['source','gravity','stth','horizontal','vertical'],\r\n ['.','.','rotation','rotation','translation'],\r\n ['source','gravity','detector','detector','detector'],\r\n ['.','.','.','stth','stth']]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n radius = rawfile.__dict__['ms'][\"_pd_instr_dist_spec/detc\"]\r\n # add the vectors:\r\n \"\"\"\r\n source 0 0 1 . . .\r\n gravity -1 0 0 . . .\r\n stth 1 0 0 . . .\r\n horizontal 1 0 0 . . .\r\n vertical 1 0 0 0 0 -728\r\n \"\"\"\r\n vector_dict = {\"_axis.vector[1]\":['0','-1','1','1','1'],\r\n \"_axis.vector[2]\":['0','0','0','0','0'],\r\n \"_axis.vector[3]\":['1','0','0','0','0'],\r\n \"_axis.offset[1]\":['.','.','.','.','.'],\r\n \"_axis.offset[2]\":['.','.','.','.','.'],\r\n \"_axis.offset[3]\":['1','0','0','0',\"-\"+radius]}\r\n rawfile.__dict__['ms'].AddToLoop('_axis.id',vector_dict)\r\n # Add information about the stth positions for later use\r\n rawfile.add_metadata(\"_diffrn_scan.id\",\"1\",\"CIF\")\r\n rawfile.add_metadata(\"_diffrn_scan.frames\",rawfile.shape[0],\"CIF\")\r\n frame_ids = map(lambda a:\"%d\" % a,range(rawfile.shape[0]))\r\n stths = rawfile.stth[:]\r\n names = ((\"_diffrn_scan_frame.frame_id\",\"_diffrn_scan_frame.frame_number\"),)\r\n values = [frame_ids,range(1,rawfile.shape[0]+1)] #Spec says start from 1\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n names = ((\"_diffrn_scan_frame_axis.frame_id\",\"_diffrn_scan_frame_axis.axis_id\",\r\n \"_diffrn_scan_frame_axis.angle\"),)\r\n values = [frame_ids,['stth']*rawfile.shape[0],map(float,stths)]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n return rawfile", "def extract_level_and_date_from_name(self):\n images = glob.glob(os.path.join(self.frame_dir, '*'))\n level = []\n dates = []\n for i, im in enumerate(images):\n base, tail = os.path.split(im)\n name = tail.split('.')[-2]\n number = name.split('_')[-1]\n date = name.split('_')[-3]\n time = name.split('_')[-2]\n dates.append(datetime.strptime(date+'_'+time, '%y%m%d_%H%M%S').strftime('%d/%m/%Y %H:%M:%S'))\n level.append(float(number))\n return np.array(level), np.array(dates)", "def splitFilename(filename):\n\n if filename[-4:] == '.rpm':\n filename = filename[:-4]\n \n archIndex = filename.rfind('.')\n arch = filename[archIndex+1:]\n\n relIndex = filename[:archIndex].rfind('-')\n rel = filename[relIndex+1:archIndex]\n\n verIndex = filename[:relIndex].rfind('-')\n ver = filename[verIndex+1:relIndex]\n\n epochIndex = filename.find(':')\n if epochIndex == -1:\n epoch = ''\n else:\n epoch = filename[:epochIndex]\n \n name = filename[epochIndex + 1:verIndex]\n return name, ver, rel, epoch, arch", "def metadata_name(filename):\n\tif test_hachoir_extension(filename):\n\t\tmetadata = metadata_for_file(filename)\n\t\tif metadata:\n\t\t\tdata = dict([\n\t\t\t\t(data.key, data.values[0].value)\n\t\t\t\tfor data in metadata\n\t\t\t\tif data.values\n\t\t\t\t])\n\t\telse:\n\t\t\tdata=None\n\telif test_3D_extension(filename):# 3D not in the extention \n\t\tdata = {'mime_type':'model'}\n\telse:\n\t\tdata=None\n\treturn data", "def extract_exif(fname):\n\n try:\n # check if file has EXIF date, exception if not\n exif_data = fileops.get_exif_datetimeorig_tag(fname)\n\n # extract the date/time string from EXIF, exception if\n # not the proper format\n datetimestr = exif_to_datetimestr(exif_data)\n\n logging.debug(\"Found EXIF Tag %r for file %r\", datetimestr, \n os.path.basename(fname))\n\n return datetimestr\n\n except fileops.EXIFTagError:\n logging.warning(\"%r does not have a proper EXIF tag\",\n os.path.basename(fname))\n return \"\";\n\n except DateStrError:\n logging.warning(\"%r EXIF tag not the right format\",\n os.path.basename(fname))\n return \"\";", "def parse_rarefaction_fname(name_string):\r\n\r\n root, ext = os.path.splitext(name_string)\r\n root_list = root.split(\"_\")\r\n iters = int(root_list.pop())\r\n seqs_per_sam = int(root_list.pop())\r\n base_name = \"_\".join(root_list)\r\n return base_name, seqs_per_sam, iters, ext", "def decompose_newstyle_name(filename):\n path, parts, ext = _get_fields(filename)\n observatory = parts[0]\n serial = list_get(parts, 3, \"\")\n\n if ext == \".pmap\":\n assert len(parts) in [1,2], \"Invalid .pmap filename \" + repr(filename)\n instrument, filekind = \"\", \"\"\n serial = list_get(parts, 1, \"\")\n elif ext == \".imap\":\n assert len(parts) in [2,3], \"Invalid .imap filename \" + repr(filename)\n instrument = parts[1]\n filekind = \"\"\n serial = list_get(parts, 2, \"\")\n else:\n assert len(parts) in [3,4], \"Invalid filename \" + repr(filename)\n instrument = parts[1]\n filekind = parts[2]\n serial = list_get(parts, 3, \"\")\n\n # Don't include filename in these or it messes up crds.certify unique error tracking.\n\n assert instrument in INSTRUMENTS+[\"\"], \"Invalid instrument \" + repr(instrument)\n assert filekind in FILEKINDS+[\"\"], \"Invalid filekind \" + repr(filekind)\n assert re.match(r\"\\d*\", serial), \"Invalid id field \" + repr(id)\n # extension may vary for upload temporary files.\n\n return path, observatory, instrument, filekind, serial, ext", "def get_metadata_from_filename(f,file_name_field_order,file_name_delimiter,\\\n default_text='not_specified',verbose=False): \n filename_components = {}\n for i,field in enumerate(f.split(file_name_delimiter)):\n filename_components[i]=field\n #if verbose:\n # print \"Filename components:\",filename_components\n filename_metadata = {}\n try:\n for field in file_name_field_order.keys():\n filename_metadata[field] =\\\n filename_components.get(file_name_field_order.get(field,default_text),default_text)\n\n #if verbose:\n # print \"filename_metadata:\",filename_metadata\n except IndexError, e:\n print \"Could not parse filename %s using delimiter: %s. Skipping...\" %(f,file_name_delimiter)\n return None\n\n return filename_metadata", "def rename(img):\n ext = splitext(img)[1].lower()\n name = get_date(open(img))\n if name is not None:\n name = name + ext\n return copy(img, name)", "def _pname_and_metadata(in_file):\n\n\n if in_file.endswith(\".csv\"):\n raise ValueError(\"Did not find input metadata file: %s\" % in_file)\n base, md, global_vars = in_file, {}, {}\n md_file = None\n return base, md, global_vars, md_file", "def get_name(fname):\n if fname.endswith('.nii.gz'):\n fname = fname.replace('.nii.gz', '')\n\n name_stuff = {}\n tmp = fname.split('_') # tmp is just a placeholder\n elems = tmp[-4:-1] # The elements of the file name in a list\n name_stuff['IC'] = elems[0][2:] # 18\n name_stuff['Scan'] = elems[1][1:] # 3\n name_stuff['Hemi'] = elems[2].upper()\n\n return name_stuff", "def load_metadata(self, name) -> Dict[str, str]:\n return load_metadata(self._casedir / Path(\"{name}/metadata_{name}.yaml\".format(name=name)))", "def _getSampleMetadataFromFilename(self, filenameSpec):\n\n # If the dilution series design is not defined in the SOP, load the default.\n if not 'dilutionMap' in self.Attributes.keys():\n dilutionMap = pandas.read_csv(os.path.join(toolboxPath(), 'StudyDesigns', 'DilutionSeries.csv'), index_col='Sample Name')\n self.Attributes['dilutionMap'] = dilutionMap['Dilution Factor (%)'].to_dict()\n\n # Strip any whitespace from 'Sample File Name'\n self.sampleMetadata['Sample File Name'] = self.sampleMetadata['Sample File Name'].str.strip()\n\n # Break filename down into constituent parts.\n baseNameParser = re.compile(filenameSpec, re.VERBOSE)\n fileNameParts = self.sampleMetadata['Sample File Name'].str.extract(baseNameParser, expand=False)\n\n # Deal with badly ordered exclusions\n fileNameParts['exclusion'].loc[fileNameParts['exclusion2'].isnull() == False] = fileNameParts['exclusion2'].loc[fileNameParts['exclusion2'].isnull() == False]\n fileNameParts.drop('exclusion2', axis=1, inplace=True)\n\n # Pass masks into enum fields\n fileNameParts.loc[:, 'AssayRole'] = AssayRole.Assay\n fileNameParts.loc[fileNameParts['reference'] == 'SR', 'AssayRole'] = AssayRole.PrecisionReference\n fileNameParts.loc[fileNameParts['baseName'].str.match('.+[B]\\d+?[SE]\\d+?', na=False).astype(bool), 'AssayRole'] = AssayRole.PrecisionReference\n fileNameParts.loc[fileNameParts['reference'] == 'LTR', 'AssayRole'] = AssayRole.PrecisionReference\n fileNameParts.loc[fileNameParts['reference'] == 'MR', 'AssayRole'] = AssayRole.PrecisionReference\n fileNameParts.loc[fileNameParts['injectionKind'] == 'SRD', 'AssayRole'] = AssayRole.LinearityReference\n fileNameParts.loc[fileNameParts['groupingKind'].str.match('Blank', na=False).astype(bool), 'AssayRole'] = AssayRole.LinearityReference\n fileNameParts.loc[fileNameParts['groupingKind'].str.match('E?IC', na=False).astype(bool), 'AssayRole'] = AssayRole.Assay\n\n fileNameParts.loc[:, 'SampleType'] = SampleType.StudySample\n fileNameParts.loc[fileNameParts['reference'] == 'SR', 'SampleType'] = SampleType.StudyPool\n fileNameParts.loc[fileNameParts['baseName'].str.match('.+[B]\\d+?[SE]\\d+?', na=False).astype(bool), 'SampleType'] = SampleType.StudyPool\n fileNameParts.loc[fileNameParts['reference'] == 'LTR', 'SampleType'] = SampleType.ExternalReference\n fileNameParts.loc[fileNameParts['reference'] == 'MR', 'SampleType'] = SampleType.MethodReference\n fileNameParts.loc[fileNameParts['injectionKind'] == 'SRD', 'SampleType'] = SampleType.StudyPool\n fileNameParts.loc[fileNameParts['groupingKind'].str.match('Blank', na=False).astype(bool), 'SampleType'] = SampleType.ProceduralBlank\n fileNameParts.loc[fileNameParts['groupingKind'].str.match('E?IC', na=False).astype(bool), 'SampleType'] = SampleType.StudyPool\n\n # Skipped runs\n fileNameParts['Skipped'] = fileNameParts['exclusion'].str.match('[Xx]', na=False)\n\n # Get matrix\n fileNameParts['Matrix'] = fileNameParts['groupingKind'].str.extract('^([AC-Z]{1,2})(?<!IC)$', expand=False)\n fileNameParts['Matrix'].fillna('', inplace=True)\n\n # Get well numbers\n fileNameParts.loc[\n fileNameParts['groupingKind'].str.match('Blank|E?IC', na=False).astype(bool), 'injectionNo'] = -1\n fileNameParts['Well'] = pandas.to_numeric(fileNameParts['injectionNo'])\n\n # Plate / grouping no\n fileNameParts['Plate'] = pandas.to_numeric(fileNameParts['groupingNo'])\n\n # Get batch where it is explicit in file name\n fileNameParts['Batch'] = pandas.to_numeric(fileNameParts['baseName'].str.extract('B(\\d+?)[SE]', expand=False))\n fileNameParts['Correction Batch'] = numpy.nan\n\n # Map dilution series names to dilution level\n fileNameParts['Dilution'] = fileNameParts['baseName'].str.extract('(?:.+_?)(SRD\\d\\d)(?:_?.*)', expand=False).replace(self.Attributes['dilutionMap'])\n fileNameParts['Dilution'] = fileNameParts['Dilution'].astype(float)\n # Blank out NAs for neatness\n fileNameParts['reruns'].fillna('', inplace=True)\n fileNameParts['extraInjections'].fillna('', inplace=True)\n\n # Drop unwanted columns\n fileNameParts.drop(['exclusion', 'reference', 'groupingKind', 'injectionNo', 'injectionKind', 'groupingNo'], axis=1, inplace=True)\n\n # Swap in user freindly file names\n fileNameParts.rename(columns={'chromatography': 'Chromatography'}, inplace=True)\n fileNameParts.rename(columns={'instrument': 'Instrument'}, inplace=True)\n fileNameParts.rename(columns={'study': 'Study'}, inplace=True)\n fileNameParts.rename(columns={'baseName': 'Sample Base Name'}, inplace=True)\n fileNameParts.rename(columns={'fileName': 'Sample File Name'}, inplace=True)\n fileNameParts.rename(columns={'suplementalInfo': 'Suplemental Info'}, inplace=True)\n fileNameParts.rename(columns={'ionisation': 'Ionisation'}, inplace=True)\n fileNameParts.rename(columns={'extraInjections': 'Suplemental Injections'}, inplace=True)\n fileNameParts.rename(columns={'reruns': 'Re-Run'}, inplace=True)\n\n # Merge metadata back into the sampleInfo table.\n # first remove duplicate columns (from _dataset _init_)\n if 'AssayRole' in self.sampleMetadata.columns: self.sampleMetadata.drop(['AssayRole'], axis=1, inplace=True)\n if 'SampleType' in self.sampleMetadata.columns: self.sampleMetadata.drop(['SampleType'], axis=1, inplace=True)\n if 'Sample Base Name' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Sample Base Name'], axis=1, inplace=True)\n if 'Dilution' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)\n if 'Batch' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Batch'], axis=1, inplace=True)\n if 'Correction Batch' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Correction Batch'], axis=1, inplace=True)\n # merge\n self.sampleMetadata = pandas.merge(self.sampleMetadata, fileNameParts, left_on='Sample File Name', right_on='Sample File Name', how='left', sort=False)\n\n # Add 'Exclusion Details' column\n self.sampleMetadata['Exclusion Details'] = ''\n\n self.Attributes['Log'].append([datetime.now(), 'Sample metadata parsed from filenames.'])", "def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label", "def extract_filefamilyname( self, filename ):\n matchobject = re.search( r\"^.*_\\d\\d\", filename )\n if matchobject is None:\n return filename\n else:\n familyname = filename[0:(matchobject.end()-3)]\n return familyname", "def parse_name(self, name):\n domain_regex = None\n image_name = re.split('/', name)[-1]\n domain = None\n path = None\n port = None\n \n split_paths = re.split('/', name)[0:-1]\n if re.search('\\.', split_paths[0]) or split_paths[0] == \"localhost\":\n domain = split_paths[0]\n\n if re.search(':', domain):\n split_domain = re.split(':', domain)\n domain = split_domain[0]\n port = split_domain[1]\n \n path = '/'.join(re.split('/', name)[1:-1])\n\n return(domain, port, path, image_name)", "def _get_image_name(image_meta, max_len=pvm_const.MaxLen.FILENAME_DEFAULT):\n return pvm_util.sanitize_file_name_for_api(\n image_meta.name, prefix=DiskType.IMAGE + '_',\n suffix='_' + image_meta.checksum, max_len=max_len)", "def __extract_patient_name (self, r=None):\n\t\tif self._file :\n\t\t\t#r = re.search(r'Name:\\s+(.+?)(?=Visit|MRN|\\d+)', self._file.text, re.I)\n\t\t\tr = re.search(r'Name:\\s+(.+?)(?=\\n)', self._file.text, re.I)\n\t\t\tassert r, \"Patient Name could not be derived from OCR text!\"\n\t\t\tr = r.groups()[0]\n\t\treturn r or None", "def parseFileInfo(self, file):\n # FileMode, FilesNumber, User, Group, Size, Date, Filename\n item = [f for f in file.split(' ') if f != '']\n \n ftype, size, date, filename = (item[0], item[4], ' '.join(item[5:8]), ' '.join(item[8:]))\n # print(ftype, size, date, filename)\n return (ftype, size, date, filename)", "def picture_name(self, filename):\n return '%s%s'%(self.username, splitext(filename)[1])", "def analyze_header_XL30(imname, allow_underscore_alias=True):\n try:\n with open(imname, encoding = \"ISO-8859-1\") as of: \n # TODO seek for [DatabarData] first, then count the 194 lines!\n ih = dict(l.strip().split(' = ') for l in of.read().split('\\n')[:194] if '=' in l)\n except:\n print('Warning: image {:} does not contain readable SEM metadata'.format(imname))\n if not allow_underscore_alias: \n print(' skipping it...')\n else:\n print('Trying to load metadata from ', pathlib.Path(imname).parent / ('_'+pathlib.Path(imname).name))\n try: \n with open(str(pathlib.Path(imname).parent / ('_'+pathlib.Path(imname).name)), encoding = \"ISO-8859-1\") as of: \n ih = dict(l.strip().split(' = ') for l in of.read().split('\\n')[:194] if '=' in l)\n #ih['lDetName'] = '3' ## XXX FIXME: hack for detector override\n except FileNotFoundError: \n return {} ## empty dict \n return ih", "def image_file_name(instance, filename):\n\text = filename[-4:]\n\tnew_filename = os.path.join('images',str(instance.image_folder),str(instance.user).replace(\" \",\"\").lower()+ext)\n\treturn new_filename", "def get_name(self, index):\n return self.image_files[index]", "def get_title(filename):\n _,long_name = filename.split(\"\\\\\")\n name,_ = long_name.split(\"_gold_\")\n f = os.path.join(FIXED, name + \".fix\")\n #with open(f, 'r') as in_f:\n with codecs.open(f, 'r', encoding='utf-8') as in_f:\n lines = in_f.readlines()\n i = 0\n line = lines[i]\n while len(line) < 5:\n i += 1\n line = lines[i]\n return lines[i]", "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def exif(filename):\n clef = ['Exif.Image.Make',\n 'Exif.Image.Model',\n 'Exif.Image.DateTime',\n 'Exif.Photo.ExposureTime',\n 'Exif.Photo.FNumber',\n 'Exif.Photo.DateTimeOriginal',\n 'Exif.Photo.DateTimeDigitized',\n 'Exif.Photo.ShutterSpeedValue',\n 'Exif.Photo.ApertureValue',\n 'Exif.Photo.ExposureBiasValue',\n 'Exif.Photo.Flash',\n 'Exif.Photo.FocalLength',\n 'Exif.Photo.ISOSpeedRatings'\n]\n data = {}\n image_exif = Exif(filename)\n image_exif.read()\n comment = image_exif.comment\n\n for i in clef:\n try:\n data[i] = image_exif.interpretedExifValue(i)\n except:\n data[i] = \"\"\n return data, comment", "def extract_meta_data(video_file_name, output_file=meta.txt, *args, **kwargs):", "def split_filename(path):\n filename = os.path.basename(path)\n name, extension = os.path.splitext(filename)\n region = name.split('.')[0]\n\n return region, name, extension", "def pull_anno(self, index):\n img_path = list(self.annotation.keys())[index]\n return img_path[img_path.rfind(\"/\") + 1 : img_path.rfind(\".\")], self.annotation[img_path]", "def get_emotion_label(self, file_name):\n file_name = file_name[:-4]\n emotion_name = file_name.split('_')[-1] # the last is a position of emotion code\n return emotion_name", "def get_name_from_filename(filename):\n return filename[:-4]", "def get_image_base_name_and_index(_image_path : str) -> Tuple[str, str]:\n image_extless = os.path.splitext(os.path.basename(_image_path))[0]\n\n #There should be at least one integer in the string to act as an index.\n assert re.search(ONLY_CHARACTERS_REGEX, image_extless) is None\n\n #Count forward from the start of the string until we reach an integer,\n # at which point we will have reached the indexing portion of the filename.\n i=1\n while i < len(image_extless) and re.search(ONLY_CHARACTERS_REGEX, image_extless[:i]):\n i += 1\n\n #Decrement i by one to account for the final character which was found to\n # be not a character.\n i -= 1\n\n return image_extless[:i], image_extless[i:]", "def _ImageName(self, image):\n\n image_without_protocol = image.split('/')[-1]\n if '@' in image_without_protocol:\n return image_without_protocol.split('@')[0]\n elif ':' in image:\n return image_without_protocol.split(':')[0]\n else:\n return image_without_protocol", "def metadata(filename, header=fits.PrimaryHDU().header, clear=True):\n\n if clear:\n header.clear()\n\n header.append(('comment', ''), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', '*'*18 + ' Time and Pointing Data ' + '*'*18), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', ''), end=True)\n\n try:\n origname = re.sub('.*CRSA', '', re.sub('.fits', '', filename))\n header.append(('origname', origname, 'Original file ID number'), end=True)\n except:\n pass\n\n ####################################################################\n # Attempt to get the mean time of the exposure. Try three things:\n # 1. The mean of mjd-str and mjd-end in the main header (HDU 0)\n # 2. mjd in the main header (HDU 0)\n # 3. The mean acquisition time in the headers of the individual \n # reads, computed as acqtime in HDU 1 plus 1.48s/2*nreads\n ####################################################################\n\n mjd_ok = True\n try:\n head = fits.open(filename)[0].header\n try:\n mean_mjd = 0.5*(head['mjd-str'] + head['mjd-end'])\n except:\n try:\n mean_mjd = head['mjd'] + 1.48*0.5*len(fits.open(filename))/86400\n except:\n ########################################################\n # Note: acqtime is unreliable--doesn't always update.\n ########################################################\n #head1 = fits.open(filename)[1].header\n #mean_mjd = head1['acqtime'] - 2400000.5\n #mean_mjd += 1.48*0.5*len(fits.open(filename))/86400\n ########################################################\n # This is pretty bad: use the checksum time of the\n # middle read as the time stamp of last resort.\n ########################################################\n head1 = fits.open(filename)[len(fits.open(filename))//2].header\n t = head1.comments['checksum'].split()[-1]\n t = Time(t, format='isot')\n t.format = 'mjd'\n mean_mjd = float(str(t)) \n except:\n mjd_ok = False\n mean_mjd = np.nan\n utc_date = 'unavailable'\n utc_time = 'unavailable'\n\n pos_ok = True\n\n ####################################################################\n # Need RA and Dec to compute parallactic angle\n ####################################################################\n\n try:\n head = fits.open(filename)[0].header\n ra, dec = [head['ra'], head['dec']]\n except:\n #ra, dec = ['05:02:27.5438', '+07:27:39.265']\n \t#ra, dec = ['04:37:36.182', '-02:28:25.87']\n pos_ok = False\n \n if mjd_ok:\n\n ################################################################\n # Subaru's coordinates in degrees\n ################################################################\n \n lng, lat = [-155.4760187, 19.825504]\n subaru = (str(lng) + 'd', str(lat) + 'd')\n t = Time(mean_mjd, format='mjd', location=subaru)\n \n if pos_ok:\n\n ############################################################\n # Precess from J2000 to the appropriate epoch\n ############################################################\n\n c = coord.SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg), frame='fk5')\n \n equinox = 'J%.5f' %(2000 + (mean_mjd - 51544.5)/365.25)\n c = c.transform_to(coord.FK5(equinox=equinox))\n\n ################################################################\n # Compute hour angle to get parallactic angle\n ################################################################\n\n ha = (t.sidereal_time('apparent') - c.ra).rad\n lat = lat*np.pi/180\n \n pa = -np.arctan2(-np.sin(ha), np.cos(c.dec.rad)*np.tan(lat)\n - np.sin(c.dec.rad)*np.cos(ha))\n pa = float(pa%(2*np.pi))\n else:\n pa = np.nan\n\n t.format = 'isot'\n utc_date = str(t).split('T')[0]\n utc_time = str(t).split('T')[1]\n else:\n pa = np.nan\n\n if not np.isfinite(mean_mjd):\n mean_mjd = utc_date = utc_time = 'unavailable'\n\n header['mjd'] = (mean_mjd, 'Mean MJD of exposure') \n header['utc-date'] = (utc_date, 'UTC date of exposure') \n header['utc-time'] = (utc_time, 'Mean UTC time of exposure')\n\n ####################################################################\n # Attempt to fetch useful/important keywords from the original\n # file's FITS header\n ####################################################################\n\n header.append(_fetch('ra', filename, comment='RA of telescope pointing'))\n header.append(_fetch('dec', filename, comment='DEC of telescope pointing'))\n\n if np.isfinite(pa):\n header['parang'] = (pa*180/np.pi, 'Mean parallactic angle (degrees)')\n else:\n header['parang'] = ('unavailable', 'Mean parallactic angle (degrees)')\n header.append(_fetch('d_imrpap', filename, comment='Image rotator pupil position angle (degrees)'))\n\n header.append(_fetch('HIERARCH CHARIS.FILTER.NAME', filename, \n comment='CHARIS filter name', newkey='filtname'))\n header.append(_fetch('HIERARCH CHARIS.FILTER.SLOT', filename, \n comment='CHARIS filter slot', newkey='filtpos'))\n header.append(_fetch('HIERARCH CHARIS.SHUTTER', filename, \n comment='CHARIS shutter position', newkey='shutter'))\n\n return header", "def find_meta(filename, source_directory):\n metafile = os.path.join(source_directory, filename + '_Metadata.csv')\n metadf = pd.read_csv(metafile)\n metadf = metadf.rename(str.lower, axis='columns')\n\n schfile = metadf['schedule_file_name'][0].split('\\\\')[-1].split('.sdu')[0].split('-')[1]\n param = schfile.replace('_', '.')\n\n return param", "def content_file_name(self, filename):\n ext = filename.split('.')[-1]\n filename = \"%s_%s.%s\" % (filename, self.id, ext)\n return os.path.join('pictures/static/pictures/', filename)", "def parse_metadata_file(self, file):\n \n file_keys = list(file.keys())\n \n if 'labelAnnotations' in file_keys:\n #file_annots = file['labelAnnotations'][:int(len(file['labelAnnotations']) * 0.5)]\n file_annots = file['labelAnnotations'][:]\n file_top_score = np.asarray([x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n \n file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray([x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray([x['confidence'] for x in file_crops]).mean()\n \n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray([x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n \n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n \n return df_metadata", "def _extract_file_info(directory, root_path, name):\n file_path = join(directory, name)\n rel_path = relpath(file_path, root_path)\n return {\n \"name\": name,\n \"path\": file_path,\n \"dir_name\": dirname(file_path),\n \"is_file\": isfile(file_path),\n \"is_dir\": isdir(file_path),\n \"level\": len(rel_path.split('/')) - 1\n }", "def namer(self, image_url, page_url):\n title = page_url.rsplit('/', 2)[1]\n image_ext = image_url.rsplit('.', 1)[1]\n return '%s.%s' % (title, image_ext)", "def load_metadata(PATH, filename):\n with open(PATH + '/' + filename + '_croporg' + \".pkl\",\"rb\") as f:\n new_data = pickle.load(f)\n\n print(filename, \"opened\")\n return new_data", "def _image_filename(image_name):\n return '{}.tar'.format(image_name.replace(':', '_').replace('/', '_'))", "def lookup_by_filename (self, filename, metadata_required=True):\n image_base_name = utils.common.get_normalised_name(filename)\n # With 71 out of 24920 records in the database having the\n # Negative ID (in the negative field of Scanning) the same as\n # the non-extension part of output_file_name, and those 71\n # being always very close to the ID, it seems safe to assume\n # that those 71 are mistakes, and it is simplest to just\n # compare the Negative ID with the legacy_base_name.\n self._c.execute('''SELECT * FROM Scanning, Negative\n WHERE Scanning.negative = ? AND\n Scanning.negative = Negative.id''',\n (image_base_name,))\n rows = self._c.fetchall()\n data = None\n if len(rows) == 0:\n if metadata_required:\n print('Skipping image \"%s\"; no metadata' % filename)\n else:\n data = self._extract_metadata_from_map(image_base_name)\n elif len(rows) > 1:\n print('Skipping image \"%s\"; multiple matching metadata records' %\n filename)\n else:\n data = self._extract_metadata_from_row(rows[0])\n if data is None:\n print('Skipping image \"%s\"; no matching image file' % filename)\n else:\n # Perform the more expensive author/photographer lookup.\n self._c.execute(\n '''SELECT Person.forename, Person.surname\n FROM Person, AuthorCounty, Location, Negative\n WHERE Negative.id = ? AND\n Negative.location = Location.id AND\n Location.author_county = AuthorCounty.id AND\n AuthorCounty.person = Person.id''',\n (image_base_name,))\n row = self._c.fetchone()\n if row is not None:\n person_name = '%s %s' % (row['forename'], row['surname'])\n photographer = migration_utils.get_or_create_contributor(\n person_name)\n data['photographer'] = photographer\n return data", "def extract_show(filename):\n try:\n f = open(\"recap_data.csv\", mode='r', encoding=\"utf-8\")\n content = f.read()\n f.close()\n lines = content.split('\\n')\n for line in lines:\n cols = line.split(';')\n if cols[0] == filename:\n return cols[3]\n return None\n\n except Exception as e:\n print(\"Exception du try extract_show\")\n print(e)\n return None", "def split_name(filename):\n # *********** My filename are in the format ./CaAl2Si2O8_T3_nvt_a12.5.outcar.msd.dat\n # ******* so I can split their name with _ and take the compound and T from their name\n filename = filename.strip('./')\n temperature = filename.split('_')[1]\n acell = filename.split('.outcar')[0].split('_')[3].strip('a')\n return temperature, acell", "def test_get_original_file_name_without_duplication_marker(self):\n test_file_name = \"uploaded_file_name\"\n expected_file_name = \"uploaded_file_name\"\n cfs = CustomFileStorage()\n self.assertEqual(cfs.get_original_file_name(test_file_name), expected_file_name)", "def creation_date_from_exif(filename):\n # Source: https://orthallelous.wordpress.com/2015/04/19/extracting-date-and-time-from-images-with-python/\n with Image.open(filename) as img:\n exif = img.getexif()\n\n if exif is None:\n raise ValueError(f\"No exif data for {filename}\")\n\n # for subsecond prec, see doi.org/10.3189/2013JoG12J126 , sect. 2.2, 2.3\n tags = [\n (36867, 37521), # (DateTimeOriginal, SubsecTimeOriginal)\n (36868, 37522), # (DateTimeDigitized, SubsecTimeDigitized)\n (306, 37520), # (DateTime, SubsecTime)\n ]\n\n for tag in tags:\n dat = exif.get(tag[0])\n sub = exif.get(tag[1], 0)\n\n # PIL.PILLOW_VERSION >= 3.0 returns a tuple\n dat = dat[0] if isinstance(dat, tuple) else dat\n if not dat:\n continue\n\n sub = sub[0] if isinstance(sub, tuple) else sub\n if isinstance(sub, int):\n sub = f\"{sub:06d}\"\n\n date, time = dat.split(\" \")\n date = date.replace(\":\", \"-\")\n return fromisoformat(f\"{date}T{time}.{sub}\")\n\n raise ValueError(f\"No date found in the exif data for {filename}\")", "def _get_video_name(self, fname):\n csv_name_split = fname.split(\"_\")\n thirty_fps_loc = csv_name_split.index(\"30fps\")\n video_name = \"_\".join(csv_name_split[0:thirty_fps_loc+1])\n return video_name", "def get_metadata_file(self, file_in_cache):\n return re.sub(r'\\.tar$', '.json', file_in_cache)", "def iter_filename_info(dir_name):\n pattern = re.compile(r'^((.+)__(.+)__([^-]+))\\.png')\n for t in os.walk(dir_name):\n for filename in t[2]:\n if filename.endswith('.png'):\n m = pattern.match(filename)\n if m is None:\n yield {'error': 'png filename not following screenshot'\n ' pattern: {}'.format(filename)}\n else:\n d = m.group(2).replace('__', sep)\n yield {'dunder': m.group(1),\n 'dir': d,\n 'file': m.group(3),\n 'ext': m.group(4),\n 'source': slash(d, m.group(3) + '.' + m.group(4))\n }", "def description(self, fileToCheck):\n\n cmd = \"exiftool -b -description \" + \"\\\"\" + fileToCheck + \"\\\"\"\n proc = subprocess.Popen(cmd,\n shell=True, \n stdout=subprocess.PIPE,\n )\n results = proc.communicate()[0]\n if results == '':\n cmd = \"exiftool -b -caption-abstract \" + \"\\\"\" + fileToCheck + \"\\\"\"\n proc = subprocess.Popen(cmd,\n shell=True, \n stdout=subprocess.PIPE,\n )\n results = proc.communicate()[0]\n return results", "def GetOriginalFilename(name):\n if not name.endswith(\".py\"):\n name = name + \".py\"\n\n # Stop looking for views and widgets in the top folder, except for Main\n if name == \"Main.py\":\n if os.path.isfile(name):\n return name\n\n originalDir = os.getcwd()\n listDir = os.listdir(originalDir)\n # Loop over the content of the demo directory\n for item in listDir:\n if not os.path.isdir(item):\n # Not a directory, continue\n continue\n dirFile = os.listdir(item)\n # See if a file called \"name\" is there\n if name in dirFile:\n return os.path.join(item, name)\n\n # We must return a string...\n return \"\"", "def get_thumbnail_name(self, thumbnail_name, with_size=None):", "def get_name_from_file(filename):\n return filename.split(\".\")[0]", "def _restore_image_name(self, data: Dict[str, str]) -> ImageName:\n return ImageName.parse(data[\"str\"])", "def get_file_name(filepath): # need pytest\n filename, extension = os.path.splitext(filepath.split('/')[-1])\n return filename, extension", "def get_info(raw_filename, epochs_filename):\n trans, fiducials, info = get_head_correct_info(\n raw_filename, epochs_filename)\n return info", "def filename(self,imgurl):\n if imgurl.find('/'):\n return imgurl.rsplit('/', 1)[1]", "def read_file(file_name):\n fits_file = fits.open(file_name)\n\n header = fits_file[0].header\n image_data = fits_file[1].data\n\n segmentation_data = fits_file[2].data\n\n header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}\n # clause to differentiate between CDELT3 and CD3_3\n\n for hdr_key, hdr_value in header_keywords.items():\n # finding required header values\n hdr_value = header[hdr_key]\n header_keywords[hdr_key] = hdr_value\n\n return header_keywords, image_data, segmentation_data", "def fn(self):\n if not self.meta.get(\"FileName\"):\n self.meta[\"FileName\"] = self.tags[\"AccessionNumber\"]\n return self.meta.get('FileName')", "def convertFilename (pattern, name):\n\tresult = \"\"\n\tj = 0\n\ti = 0\n\twhile j < len (pattern) or i < len(name):\n\t\t# If the format ended \n\t\tif j >= len (pattern):\n\t\t\tbreak\n\t\t# If one charactere must be ignored \n\t\telif pattern [j] == '?':\n\t\t\tif i < len(name):\n\t\t\t\tresult = result + name [i]\n\t\t\t\ti += 1\n\t\t\tif j < len(pattern):\n\t\t\t\tj += 1\n\t\t# If one or more characteres must be ignored \n\t\telif pattern [j] == '*':\n\t\t\tif i < len(name):\n\t\t\t\tresult = result + name [i]\n\t\t\t\ti += 1\n\t\t\telse :\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif i < len(name):\n\t\t\t\ti += 1\n\n\t\t\tif j < len(pattern):\n\t\t\t\tresult = result + pattern [j]\n\t\t\t\tj += 1\n\treturn result", "def get_file_name(self):\n return self.path.name[6:]", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def parse_metadata_file(self, file):\n\n file_keys = list(file.keys())\n\n if 'labelAnnotations' in file_keys:\n file_annots = file['labelAnnotations']\n file_top_score = np.asarray(\n [x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n\n file_colors = file['imagePropertiesAnnotation']['dominantColors'][\n 'colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray(\n [x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray(\n [x['confidence'] for x in file_crops]).mean()\n\n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray(\n [x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n\n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n\n return df_metadata", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def _extract_name(line: str) -> str:\n tokens = line[19:-2].split(\" {\")\n name = tokens[0]\n return name", "def unpack_annotation(path):\n buffer = []\n with open(path, 'r') as file:\n lines = file.read()\n\n lines = lines.splitlines()\n for line in lines:\n if not line.startswith('#') and line:\n buffer.append(line)\n\n # Filename to match annotation with photo\n filename = ''\n for line in buffer:\n if 'Image filename' in line:\n filename = line.replace(' ', '').split(':')[1]\n\n # How many person-like objects in photo\n how_many = 0\n for line in buffer:\n if 'Objects with ground truth' in line:\n how_many = int((line.replace(' ', '').split(':')[1][0]))\n break\n\n person_id = []\n for i in range(how_many):\n person_id.append(f'{i+1} \"PASperson\"')\n\n # Centers of objects\n centers = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (X, Y)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split(',')\n centers.append((int(buf[0]), int(buf[1])))\n which_one += 1\n\n # Bounding boxes of objects\n boxes = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (Xmin, Ymin)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split('-')\n buf0 = buf[0].split(',')\n buf1 = buf[1].split(',')\n boxes.append((int(buf0[0]), int(buf0[1]), int(buf1[0]), int(buf1[1])))\n which_one += 1\n\n return filename, how_many, centers, boxes", "def get_clf_from_file_name(file_name: str) -> str:\n matcher = pattern.match(file_name)\n if matcher:\n count = matcher.group('count')\n if count == '0':\n return None\n section = matcher.group('section')\n main_class = matcher.group('main_class')\n sub_class = matcher.group('sub_class')\n\n return '%s_%s_%s' % (section, main_class, sub_class)\n else:\n return None" ]
[ "0.7071573", "0.66357124", "0.64707863", "0.6266383", "0.61719465", "0.6033898", "0.600292", "0.59774214", "0.59133536", "0.5879702", "0.5877382", "0.5843988", "0.5837125", "0.5832479", "0.58029586", "0.57413375", "0.5720259", "0.57090443", "0.5669945", "0.5668011", "0.5648759", "0.5648031", "0.5572095", "0.5543046", "0.55327857", "0.55228084", "0.5515169", "0.5507461", "0.55067086", "0.55063075", "0.5501829", "0.5492195", "0.5491561", "0.54609567", "0.5454082", "0.5449096", "0.54403335", "0.5435868", "0.5428405", "0.54137886", "0.54111236", "0.5406512", "0.539326", "0.5387614", "0.5377684", "0.5369908", "0.5368458", "0.53660995", "0.53635114", "0.5344243", "0.5343383", "0.53416705", "0.53348935", "0.5322607", "0.5317754", "0.5313253", "0.5296839", "0.5293769", "0.52913", "0.5258499", "0.52390987", "0.52321035", "0.5225731", "0.5224236", "0.5217035", "0.52162004", "0.5209637", "0.52053374", "0.5204888", "0.5202971", "0.5195936", "0.5188966", "0.5182899", "0.51817805", "0.5172793", "0.5169477", "0.5162005", "0.5156133", "0.51498544", "0.5148386", "0.5147561", "0.51408494", "0.51385254", "0.51371455", "0.5136761", "0.5136604", "0.51270103", "0.5121103", "0.51187205", "0.5117731", "0.51159656", "0.51124245", "0.5110246", "0.5109451", "0.5109367", "0.5107307", "0.5101507", "0.5099667", "0.508421", "0.5083061" ]
0.6781023
1
Insert the crop represented by file_name into this image.
def insert(self, file_path: str, annot_type: str) -> None: if self._valid_file_name_regex.match(os.path.basename(file_path)) is None: raise ValueError(f'Illegal file name: {os.path.basename(file_path)}') x_pos = get_metadata_from_filename(file_path).x_pos if x_pos in self._x_positions: col = self._cols[x_pos] else: col = Column() self._x_positions.append(x_pos) self._x_positions.sort() col.insert(Crop(file_path, annot_type)) self._cols[x_pos] = col self.n_cols = len(self._cols)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_current(self, image_name):\n # Sets the position of the crop\n self.j ,self.i = 0, 0\n\n # loads the image\n self.image = convert2int(tifffile.imread(image_name)).astype(numpy.float32)\n\n # Computes the number of crops in x and y\n self.ny = numpy.ceil(self.image.shape[0] / self.step)\n self.nx = numpy.ceil(self.image.shape[1] / self.step)\n\n # rescale the image\n self.image -= self.image_min\n self.image /= (0.8 * (self.image_max - self.image_min))\n self.image = numpy.clip(self.image, 0, 1)", "def generateMask(self, nameFile): \n imgPath = os.path.join(GG.utils.PATH_PHOTO_MASK, nameFile)\n imgMask = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\")))\n imgTemplate = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"template.png\")))\n imgUpload = Image.open(imgPath)\n size = MASK_SIZE[self.avatarConfiguration[\"headSize\"]]\n imgUploadResized = imgUpload.resize(size, Image.ANTIALIAS)\n imgMask.paste(imgUploadResized, MASK_COORD[self.avatarConfiguration[\"headSize\"]], imgTemplate)\n imgMask.save(MASK_UPLOAD)\n self.avatarConfiguration[\"mask\"] = \"imgUploadMask.png\"\n self.paintMask()", "def insert_in_tree(self, pic_name, pic_num, crop_num, is_crop=False):\n \n crop = self.communicator.image_store.get_crop(pic_num, crop_num)\n \n # insert the picture/crop name in column 0\n if (is_crop == False):\n myiter = self.tree_store.append(None, None)\n if crop.available == True:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n else:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#A0A0A0\"><b>' + pic_name + '</b></span>')\n elif (is_crop == True):\n #determine iter that points to row containing pic_num\n # in column 1\n parent = None\n for i in range(0, len(self.tree_store)):\n if (pic_num == self.tree_store[i][1]):\n #found the parent, insert the child\n parent = self.tree_store[i].iter\n myiter = self.tree_store.append(parent, None)\n self.tree_store.set_value(myiter, 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n break\n # expand the row to show the crop\n self.image_tree.expand_row(self.tree_store.get_path(parent), True)\n\n # fill in the remaining columns\n self.tree_store.set_value(myiter, 1, pic_num)\n self.tree_store.set_value(myiter, 2, crop_num)\n self.tree_store.set_value(myiter, 3, \"0%\")\n \n return myiter", "def _crop(self, fieldname, scale, box):\n croputils = IImageCroppingUtils(self.context)\n data = croputils.get_image_data(fieldname)\n\n original_file = StringIO(data)\n image = PIL.Image.open(original_file)\n image_format = image.format or self.DEFAULT_FORMAT\n\n cropped_image = image.crop(box)\n cropped_image_file = StringIO()\n cropped_image.save(cropped_image_file, image_format, quality=100)\n cropped_image_file.seek(0)\n\n croputils.save_cropped(fieldname, scale, cropped_image_file)\n\n # store crop information in annotations\n self._store(fieldname, scale, box)\n\n # Purge caches if needed\n notify(Purge(self.context))", "def crop_image (filename):\n from PIL import Image\n image = Image.open(filename)\n for edge in 'NSWE':\n image = _crop(image, edge)\n image.save(filename)", "def set_crop(self, crop):\n self.crop = crop", "def crop_image(inputimage, folder, newimgname, xtop=0, ytop=64, xbottom=512, ybottom=448):\n\timg = Image.open(folder + os.sep + inputimage)\n\timg = img.crop((xtop, ytop, xbottom, ybottom))\n\timg.save(folder + os.sep + newimgname, 'PNG')", "def _copy_image(self, name):\n image = self._get_image(name)\n QtGui.QApplication.clipboard().setImage(image)", "def add_transect_file(self, file_name: str):\n # Create a transect dict\n #transect = {\n # 'Path': file_path,\n # 'File': file_name,\n # 'Number': index,\n #}\n\n # Add the transect to the file\n self.Files.append(file_name)", "def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"", "def set_cropping(self, crop=True):\n self._crop = crop\n self._final = None # Force rebuild", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label", "def crop_image(filename, n):\n image = SimpleImage(filename)\n width = image.width\n new_width = width - (2 * n)\n height = image.height\n new_height = height - (2 * n)\n image_crop_width = SimpleImage.blank(new_width, height)\n for y in range(height):\n for x in range(new_width):\n pixel = image.get_pixel((x + n), y)\n image_crop_width.set_pixel(x, y, pixel)\n image_crop_width.show()\n\n image_crop_height = SimpleImage.blank(width, new_height)\n for y in range(new_height):\n for x in range(width):\n pixel = image.get_pixel(x, y + n)\n image_crop_height.set_pixel(x, y, pixel)\n image_crop_height.show()\n\n image_crop_width_height = SimpleImage.blank(new_width, new_height)\n for y in range(new_height):\n for x in range(new_width):\n pixel = image.get_pixel(x + n, y + n)\n image_crop_width_height.set_pixel(x, y, pixel)\n image_crop_width_height.show()", "def append(self, filename):\n\n self.db.single_insert_camera(filename)\n self.db.batch_insert_camera(filename)", "def crop(image, size):\n size = size.split('x')\n (root, name, ext) = split_filepath(image.path)\n filename = scale_image(image.path, (int(size[0]), int(size[1])), 'crop')\n return '/%s/%s' % (os.path.abspath(root).replace('%s/' % os.path.abspath(settings.BASE_PATH), ''), filename)", "def clip(self):\n \n subprocess.call(['gdaltindex', self.extent, self.referenceImagePath])\n dataNames = sorted(glob.glob(self.fullPath + '/full*.tif'))\n splitAt = len(self.fullPath) + 1\n\n for i in range(len(dataNames)):\n x = dataNames[i]\n y = dataNames[i][:splitAt] + dataNames[i][splitAt+4:]\n subprocess.call(['gdalwarp', '-r', 'near', '-cutline', self.extent, '-crop_to_cutline', x, y, '-dstnodata', '9999'])\n \n for n in dataNames:\n os.remove(n)\n dataNames = sorted(glob.glob(self.fullPath + '/*.tif'))\n test = gdal.Open(dataNames[0]).ReadAsArray()\n logger.log('SUCCESS', 'Clipping complete! %d %s files were successfully clipped to the size of %s with dimensions %d rows by %d columns' % (len(dataNames), str(self.outformat), str(self.referenceImagePath), test.shape[0], test.shape[1]))", "def crop_and_save_single(img,crop_height,crop_width,image_save_dir,name,with_label=False):\n\n assert np.mod(img.shape[0], crop_height) == 0\n assert np.mod(img.shape[1], crop_width) == 0\n\n num_row = img.shape[0] #// crop_height\n num_col = img.shape[1] #// crop_width\n crop_img = np.zeros((crop_height, crop_width, 4))\n\n for row in range(0,num_row,crop_height):\n for col in range(0,num_col,crop_width):\n # print(\"row:{}, row+crop height:{}, j: {}, row+cropwidth:{}\".format(row,row+crop_height,col,col+crop_width))\n crop_img = img[row:row+crop_height, col:col+crop_width, :]\n\n # out_name = img_name[:-4] + '_' + \\\n out_name = name + '_' + \\\n str(num_col) + '_' + str(row).zfill(2) + \\\n '_' + str(col).zfill(2)+'.png'\n\n # if with_label:\n # label_name = \"/\"+str(index) + \"_\" + date_time + \"_label\"\n # crop_3_ch = crop_img[:,:,:3] # if cropping a labeled image\n # crop_label = crop_img[:,:,-1] # if cropping a labeled image\n # PIL_crop_label = Image.fromarray(crop_label.astype(np.uint8))\n # # PIL_crop_label.save(save_dir[1]+\"_label_\"+out_name) # if cropping a labeled image\n\n PIL_crop = Image.fromarray(crop_img[:,:,:3].astype(np.uint8))\n # if with_label:\n # # return PIL_crop,PIL_crop_label\n # # return PIL_crop\n PIL_crop.save(image_save_dir+\"/\"+out_name)", "def add_image(self, image_file_name):\n # check\n if os.path.exists(image_file_name) is False:\n raise NotImplementedError(\"Image file %s does not exist.\" % image_file_name)\n\n self._myCanvas.add_image_file(image_file_name)\n\n return", "def Save_Image_Crop(img, x, y, width, height, filename = None, path = 'Predictions'):\n img = img[y:y+height, x:x+width,:]\n\n if filename is not None:\n try: \n os.mkdir(path)\n except OSError as error: \n print('') \n fig, ax = plt.subplots(figsize=(18, 20))\n ax.imshow(img)\n plt.tight_layout()\n plt.savefig(path + '/' + filename + '_Crop.png')", "def insert_file(wrd, doc, filename: str) -> None:\n doc.Content.Select()\n wrd.Selection.Collapse(0)\n wrd.Selection.InsertFile(filename)", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def crop_image(self, img):\n img.crop_image(self._center, 1.1 * self._radius)", "def add_file_to_clean(self, filename):\n self.files_to_clean.add(filename)", "def _image_paste(self, image, dest_image, pos_x, pos_y):\n dest_image.paste(image, (pos_x, pos_y))", "def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)", "def update_movie(self, file_name):\n try:\n pix = QPixmap(file_name)\n self.cur_imageRect['width'] = pix.width()\n self.cur_imageRect['height'] = pix.height()\n if self.isFullScreen():\n width = self.screen_width\n height = self.screen_height\n padding_left = 0\n padding_top = 0\n else:\n width = 1000\n height = 450\n padding_left = 40\n padding_top = 50\n scale = min(width / pix.width(), height / pix.height())\n self.video_label.setGeometry(padding_left, padding_top, pix.width() * scale, pix.height() * scale)\n self.video_label.clear()\n self.video_label.setPixmap(pix)\n except:\n pass\n os.remove(file_name)", "def _pasteFile(self) -> None:\n if not self._fileClipboard:\n return\n cut = self._fileClipboard.pop()\n filenames = [x.name for x in self._fileClipboard]\n destPaths = [self._currPath.joinpath(x) for x in filenames]\n try:\n duplicates = []\n for src, dest in zip(self._fileClipboard, destPaths):\n if src == dest:\n raise shutil.SameFileError\n if dest in self._currPath.glob('*'):\n duplicates.append(dest)\n if duplicates:\n if self._overwriteFileMsgBox(duplicates) == QMessageBox.Cancel:\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n return\n for src, dest in zip(self._fileClipboard, destPaths):\n if cut and src.is_file():\n shutil.move(str(src), str(dest))\n elif src.is_dir():\n dir_util.copy_tree(str(src), str(dest))\n if cut:\n shutil.rmtree(src)\n elif src.is_file():\n shutil.copy(str(src), str(dest))\n elif not src.exists():\n raise FileNotFoundError\n self._statusBar.showMessage('File pasted!', 3000)\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n except shutil.SameFileError:\n self._statusBar.showMessage('You cannot overwrite the same file!', 3000)\n self._fileClipboard.clear()\n except PermissionError:\n self._statusBar.showMessage('No permission to copy the file!', 3000)\n self._fileClipboard.clear()\n except FileNotFoundError:\n self._statusBar.showMessage('Cannot find the source file!', 3000)\n self._fileClipboard.clear()\n finally:\n self._listDirectories()", "def crop(self, coords):\n pass", "def crop_image(image_to_crop, year):\r\n\timg = Image.open(image_to_crop)\r\n\t#The dimensions of just the US in the image\r\n\timg = img.crop((80, 240, 800, 615))\r\n\r\n\tfile_destination = \"images/cropped_images/\" + str(year) + \".png\"\r\n\r\n\timage_file = open(file_destination, 'wb')\r\n\timg.save(image_file, 'png')\r\n\timage_file.close()", "def _image_paste(self, image, dest_image, pos_x, pos_y):\n height, width = image.shape[:2]\n dest_image[pos_y:(pos_y + height), pos_x:(pos_x + width)] = image", "def save_image(self, file_name: str):\n if not file_name.endswith(\".png\"):\n file_name += \".png\"\n self.image.save(file_name)", "def crop_object_from_image(saving_folder,root_folder_path,root_folder_name,row_info):\n class_name=row_info['class']\n file_id=row_info['file_id']\n img_type=row_info['type']\n xmin=row_info['x_min']\n xmax=row_info['x_max']\n ymin=row_info['y_min']\n ymax=row_info['y_max']\n\n\n origin_img_path=os.path.join(root_folder_path,root_folder_name,img_type,file_id+\".png\")\n crop_img_path=os.path.join(saving_folder,file_id+\"_\"+class_name+\".png\")\n\n origin_img=cv2.imread(origin_img_path)\n crop_img=origin_img[ymin:ymax-1,xmin:xmax-1]\n\n # If width or height only contain 1 pixel, do not crop.\n if xmax-xmin<=2 or ymax-ymin<=2:\n print(\"Only one pixel, pass!\")\n return 0\n # print(origin_img.shape)\n # print(xmin,xmax,ymin,ymax)\n # print(crop_img.shape)\n # print(crop_img_path)\n cv2.imwrite(crop_img_path,crop_img)", "def crop_image(input_image, output_image, start_x, start_y, width, height):\n box = (start_x, start_y, start_x + width, start_y + height)\n output_img = img.crop(box)\n output_img.save(output_image +\".png\")", "def display_file(epd, file_name):\n\n image = Image.open(file_name)\n image = ImageOps.grayscale(image)\n\n # crop to the middle\n w,h = image.size\n x = w / 2 - epd.width / 2\n y = h / 2 - epd.height / 2\n\n cropped = image.crop((x, y, x + epd.width, y + epd.height))\n bw = cropped.convert(\"1\", dither=Image.FLOYDSTEINBERG)\n\n epd.display(bw)\n epd.update()\n\n\n time.sleep(3) # delay in seconds\n\n rs = image.resize((epd.width, epd.height))\n bw = rs.convert(\"1\", dither=Image.FLOYDSTEINBERG)\n\n epd.display(bw)\n epd.update()\n\n time.sleep(3) # delay in seconds", "def upload_calibration_profile(self, filename: str) -> None:\n pass", "def rotate_image(self, file_name, num_rotations=3):\n image = cv2.imread(file_name, cv2.IMREAD_ANYDEPTH)\n destination_filename = self.parent_folder + os.path.basename(file_name)\n cv2.imwrite(destination_filename, np.rot90(image, num_rotations), [cv2.IMWRITE_PNG_COMPRESSION, 0])", "def add_to_queue(self, name, pic_num, crop_num):\n #if the picture is not already in the queue\n #and if it is not already downloaded\n if ((self.communicator.image_store.get_crop(pic_num, crop_num).inqueue == False) & \\\n (self.communicator.image_store.get_crop(pic_num, crop_num).completed == False)):\n #insert in queue\n myiter = self.list_store.append(None)\n #set the data in column 0\n #if the picture is ready for download set color to black\n if (self.communicator.image_store.get_crop(pic_num, crop_num).available == True):\n self.list_store.set_value(myiter, \\\n 0, '<span foreground=\"#000000\"><b>' + name + '</b></span>')\n #otherwise set to gray\n else:\n self.list_store.set_value(myiter, \\\n 0, '<span foreground=\"#A0A0A0\"><b>' + name + '</b></span>')\n #set the data in column 1 and 2\n self.list_store.set_value(myiter, 1, pic_num)\n self.list_store.set_value(myiter, 2, crop_num)\n #let model know picture is inqueue\n self.communicator.image_store.get_crop(pic_num, crop_num).inqueue = True\n #call queue_changed function\n self.queue_changed()\n elif self.communicator.image_store.get_crop(pic_num, crop_num).completed == True:\n print \"image has already been downloaded\"\n else:\n print \"image is currently in the queue\"", "def tool_gen_crop_clicked(self, widget, data=None):\n self._generate_crop()", "def corp_image(self):\n try:\n # Open image\n image_to_crop = Image.open(self.captcha_image_filename, 'r')\n # Crop image\n image = image_to_crop.crop((-1, 8, 65, 22))\n # Save image\n image.save(self.cropped_captcha_filename)\n except UnidentifiedImageError as error:\n raise(error)", "def repackFileName(parsedName):\n cropCoords = None\n if 'minX' in parsedName:\n cropCoords=(parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])\n return getImgPath('', parsedName['cameraID'], parsedName['unixTime'],\n cropCoords=cropCoords,\n diffMinutes=parsedName['diffMinutes'])", "def watermark_image(self, watermark_path, pos_name):\n image = Image.open(self.path)\n watermark = Image.open(watermark_path)\n self.watermark_width, self.watermark_height = watermark.size\n pos = self.watermark_position(pos_name)\n\n parent_dir = os.path.dirname(self.output_path)\n if not os.path.exists(parent_dir):\n os.makedirs(parent_dir)\n\n watermark_ext = os.path.splitext(watermark_path)[-1]\n if watermark_ext in (\".png\", \".PNG\"):\n transparent = Image.new('RGBA', (self.width, self.height), (0, 0, 0, 0))\n transparent.paste(image, (0, 0))\n transparent.paste(watermark, pos, mask=watermark)\n self.output_path = \".\".join([os.path.splitext(self.output_path)[0], \"png\"])\n transparent.save(self.output_path)\n elif watermark_ext in (\".jpg\", \".JPG\", \".jpeg\", \".JPEG\"):\n image.paste(watermark, pos)\n image.save(self.output_path)\n\n return os.path.exists(self.output_path)", "def savecrop(image, fname, outpath, cropabsolute=None, cropfactors=[0.2, 0.2, 0.2, 0.2], preserve_name=False):\n if cropabsolute:\n if type(cropabsolute) != list:\n raise Exception(\"cropabsolute must be a list of length 4.\")\n else:\n if len(cropabsolute) !=4:\n raise Exception(\"Please provide 4 values corresponsindg to [left, upper, right, lower]\")\n cropbox = (int(cropabsolute[0]),\n int(cropabsolute[1]),\n int(image.width - cropabsolute[2]),\n int(image.height - cropabsolute[3])\n )\n if not preserve_name:\n fpath = genSavePath(outpath, fname, modstring=f\"cropped{cropabsolute}\")\n else:\n fpath = genSavePath(outpath, fname)\n else:\n if type(cropfactors) != list:\n raise Exception(\"cropfactors must be a list of length 4.\")\n else:\n if len(cropfactors) !=4:\n raise Exception(\"Please provide 4 values corresponsindg to [left, upper, right, lower]\")\n cropbox = (int(image.width * cropfactors[0]),\n int(image.height * cropfactors[1]),\n int(image.width * (1-cropfactors[2])),\n int(image.height * (1-cropfactors[3]))\n )\n if not preserve_name:\n fpath = genSavePath(outpath, fname, modstring=f\"cropped{cropfactors}\")\n else:\n fpath = genSavePath(outpath, fname)\n im = copy(image)\n im = im.crop(cropbox)\n try:\n im.save(fpath, subsample=\"keep\", qtables=image.quantization, optimize=True)\n\n except IOError as m:\n print( \"Crop({}) image creation failed for: {}. \\nReason:{}\".format(cropabsolute,cropfactors,fname,m))", "def _image_paste(self, image, dest_image, pos_x, pos_y):\n raise NotImplementedError", "def CropFrames(yuv_file_name, output_file_name, width, height, crop_height):\n # Component sizes = [Y_sizes, U_sizes, V_sizes].\n component_sizes = [(width, height, crop_height),\n (width/2, height/2, crop_height/2),\n (width/2, height/2, crop_height/2)]\n\n yuv_file = open(yuv_file_name, 'rb')\n output_file = open(output_file_name, 'wb')\n\n data_left = True\n while data_left:\n data_left = _CropOneFrame(yuv_file, output_file, component_sizes)\n\n yuv_file.close()\n output_file.close()", "def display_image(self, pic_num, crop_num):\n if (self.communicator.image_store.get_crop(pic_num, crop_num).completed == True):\n self.cd_crop_num = crop_num\n self.cd_pic_num = pic_num\n try:\n path = self.communicator.image_store.get_crop(pic_num, crop_num).path\n self.pixbuf = gtk.gdk.pixbuf_new_from_file(path)\n w = self.pixbuf.get_width()\n h = self.pixbuf.get_height()\n \n # draw the image\n self.drawing_area.window.draw_pixbuf(self.gc, self.pixbuf, \\\n 0, 0, 0, 0, w, h)\n #self.drawing_area.window.resize(w, h)\n self.drawing_area.set_size_request(w, h)\n \n # render the target and compass\n self.draw_target(pic_num, crop_num)\n self.draw_compass(pic_num)\n \n except glib.GError as e:\n print \"picture \" + str(pic_num) + \" crop \" + str(crop_num) + \\\n \" is corrupt!\"\n self.pixbuf = gtk.gdk.pixbuf_new_from_file(\"images/corrupt.png\")\n self.drawing_area.window.draw_pixbuf(self.gc, self.pixbuf, \\\n 0, 0, 0, 0, 234, 320)\n self.drawing_area.set_size_request(234, 320)\n \n else:\n #draw \"incomplete\" image\n path = \"%s/images/incomplete.jpg\" % (os.path.dirname(__file__),)\n self.pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(path, 800, 600)\n w = self.pixbuf.get_width()\n h = self.pixbuf.get_height()\n #draw the image\n self.drawing_area.window.draw_pixbuf(self.gc, self.pixbuf, \\\n 0, 0, 0, 0, w, h)\n self.drawing_area.set_size_request(w, h)\n \n # show the picture info and update it\n self.update_target_info(pic_num, crop_num)", "def file_name(self, file_name):\n\n self._file_name = file_name", "def file_name(self, file_name):\n\n self._file_name = file_name", "def crop(input_file, output_file, crop_x0, crop_x1, \n crop_y0, crop_y1, crop_stop_sec=None, vcodec='mpeg4', quality=2, \n overwrite=True, verbose=False, very_verbose=False):\n # Overwrite avoid\n if os.path.exists(output_file) and not overwrite:\n raise ValueError(\"%s already exists\" % output_file)\n \n # Set up width, height and origin of crop zone\n if crop_x0 > crop_x1:\n crop_x0, crop_x1 = crop_x1, crop_x0\n if crop_y0 > crop_y1:\n crop_y0, crop_y1 = crop_y1, crop_y0\n width = crop_x1 - crop_x0\n height = crop_y1 - crop_y0\n \n # Form the syscall\n crop_string = '\"crop=%d:%d:%d:%d\"' % (width, height, crop_x0, crop_y0)\n syscall_l = ['ffmpeg', '-i', input_file, '-y',\n '-vcodec', vcodec,\n '-q', str(quality),\n '-vf', crop_string]\n if crop_stop_sec is not None:\n syscall_l += ['-t', str(crop_stop_sec)]\n syscall_l.append(output_file)\n\n # Call, redirecting to standard output so that we can catch it\n if verbose:\n print(' '.join(syscall_l))\n \n # I think when -t parameter is set, it raises CalledProcessError\n #~ syscall_result = subprocess.check_output(syscall_l, \n #~ stderr=subprocess.STDOUT)\n #~ if very_verbose:\n #~ print syscall_result\n os.system(' '.join(syscall_l))", "def next_crop(self):\n image_crop = self.image[self.j : self.j + self.size, self.i : self.i + self.size].astype(numpy.float32)\n label_crop = self.label[self.j : self.j + self.size, self.i : self.i + self.size].astype(numpy.float32)\n\n # Asserts the crops have the good shape\n if image_crop.size != self.size*self.size:\n image_crop = numpy.pad(image_crop, ((0, self.size - image_crop.shape[0]), (0, self.size - image_crop.shape[1])), \"constant\")\n label_crop = numpy.pad(label_crop, ((0, self.size - label_crop.shape[0]), (0, self.size - label_crop.shape[1])), \"constant\")\n\n # Update the position of the crop\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.pos += 1\n if self.pos > len(self.images_names) - 1:\n return self.pos, numpy.array([]), numpy.array([])\n self.image = convert2int(tifffile.imread(self.images_names[self.pos]))\n self.label = tools.read_poly(self.labels_names[self.pos], self.image.shape)\n self.i, self.j = 0, 0\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step\n\n return self.pos, image_crop, label_crop", "def add_file_ref(self, file_name: str) -> None:\n self.referenced_in.add(file_name)", "def paste(self, image, xy=(0,0)):\n # Parse xy location from any type of unit to pixels\n x,y = xy\n x = units.parse_dist(x,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n y = units.parse_dist(y,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n xy = (x,y)\n # Need more options, eg anchor point, and coordinate xy\n self.drawer.flush()\n if isinstance(image, Canvas): image = image.img\n if image.mode == \"RGBA\":\n self.img.paste(image, xy, image) # paste using self as transparency mask\n else: self.img.paste(image, xy)\n self.update_drawer_img()\n return self", "def write_cif_file(self, file_name):\n cif_writer = CifWriter(self.dna_structure)\n cif_writer.write(file_name, self.infile, self.informat )", "def crop_image(self):\n\n image_data = Image.open(self.img_path)\n return image_data.crop(self.data_type)", "def add_image_file(self, imagefilename):\n #import matplotlib.image as mpimg\n\n # set aspect to auto mode\n self.axes.set_aspect('auto')\n\n img = matplotlib.image.imread(str(imagefilename))\n # lum_img = img[:,:,0]\n # FUTURE : refactor for image size, interpolation and origin\n imgplot = self.axes.imshow(img, extent=[0, 1000, 800, 0], interpolation='none', origin='lower')\n\n # Set color bar. plt.colorbar() does not work!\n if self._colorBar is None:\n # set color map type\n imgplot.set_cmap('spectral')\n self._colorBar = self.fig.colorbar(imgplot)\n else:\n self._colorBar.update_bruteforce(imgplot)\n\n self._flush()\n\n return", "def draw(self, file_name=None):\n self._draw(self.seq,\n file_name=file_name,\n mutations=self.mutations,\n scores=self.scores,\n fold=self.fold)", "def crop_id(self):\n return self._crop_id", "def test_crop(self):\r\n u = Uploader()\r\n size = (100, 100)\r\n im = Image.new('RGB', size)\r\n folder = tempfile.mkdtemp()\r\n u.upload_folder = folder\r\n im.save(os.path.join(folder, 'image.png'))\r\n coordinates = (0, 0, 50, 50)\r\n file = FileStorage(filename=os.path.join(folder, 'image.png'))\r\n with patch('pybossa.uploader.Image', return_value=True):\r\n err_msg = \"It should crop the image\"\r\n assert u.crop(file, coordinates) is True, err_msg\r\n\r\n with patch('pybossa.uploader.Image.open', side_effect=IOError):\r\n err_msg = \"It should return false\"\r\n assert u.crop(file, coordinates) is False, err_msg", "def _crop_image_and_paste(self, image, center, size):\n center_y, center_x = center\n target_h, target_w = size\n img_h, img_w, img_c = image.shape\n\n x0 = max(0, center_x - target_w // 2)\n x1 = min(center_x + target_w // 2, img_w)\n y0 = max(0, center_y - target_h // 2)\n y1 = min(center_y + target_h // 2, img_h)\n patch = np.array((int(x0), int(y0), int(x1), int(y1)))\n\n left, right = center_x - x0, x1 - center_x\n top, bottom = center_y - y0, y1 - center_y\n\n cropped_center_y, cropped_center_x = target_h // 2, target_w // 2\n cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)\n for i in range(img_c):\n cropped_img[:, :, i] += self.mean[i]\n y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)\n x_slice = slice(cropped_center_x - left, cropped_center_x + right)\n cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]\n\n border = np.array([\n cropped_center_y - top, cropped_center_y + bottom,\n cropped_center_x - left, cropped_center_x + right\n ],\n dtype=np.float32)\n\n return cropped_img, border, patch", "def add_picture(self, file, left, top, width=None, height=None):\n pkg = Package.containing(self.__slide)\n image = pkg._images.add_image(file)\n rel = self.__slide._add_relationship(RT_IMAGE, image)\n pic = self.__pic(rel._rId, file, left, top, width, height)\n self.__spTree.append(pic)\n picture = Picture(pic)\n self.__shapes.append(picture)\n return picture", "def execute_file(self, event=None):\n file_list = self.get_path_list()\n print(file_list)\n if not file_list:\n return\n # merge image\n # 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片\n try:\n self.photos.destroy()\n except:\n pass\n self.photos.imgs = file_list \n merged_photo = self.photos.merge_photos()\n\n # show image\n try:\n window.destroy()\n except:\n import traceback\n traceback.print_exc()\n window.build_img_canvas()\n window.show_img_in_canvas(merged_photo)", "def increment_filename(self, filename, path=\"\", insert=\"\"):\n path = path.strip(\"/\")\n basename, ext = os.path.splitext(filename)\n for i in itertools.count():\n if i:\n insert_i = \"{}{}\".format(insert, i)\n else:\n insert_i = \"\"\n name = \"{basename}{insert}{ext}\".format(\n basename=basename, insert=insert_i, ext=ext\n )\n if not self.exists(\"{}/{}\".format(path, name)):\n break\n return name", "def __init__(\n self,\n img_path: Union[str, \"Path\"],\n profile: dict,\n crop_size: int,\n padding: int = 0,\n **kwargs\n ):\n super().__init__()\n self.img_path = img_path\n self.crop_size = crop_size\n self.padding = padding\n\n profile.update(blockxsize=crop_size, blockysize=crop_size, tiled=True, **kwargs)\n\n # Create the file and get the indices of write locations\n with rasterio.open(self.img_path, \"w\", **profile) as dst:\n self.height = dst.height\n self.width = dst.width\n self.profile = dst.profile\n\n _y0s = range(0, self.height, self.crop_size)\n _x0s = range(0, self.width, self.crop_size)\n self.y0x0 = list(itertools.product(_y0s, _x0s))", "def add_image(self, image_name):\n if self.current_trip is None:\n print \"no trip to add image\"\n return\n self.current_trip.store_image(image_name)", "def Crop(self, path):\n global img, crop\n self.window_created = True\n self.path = path\n self.crop = tk.Toplevel(None)\n self.crop.protocol(\"WM_DELETE_WINDOW\", self.CloseCropWindow)\n \n# tk.Label(crop, text=self.path).grid()\n self.crop.title(\"Crop Window\")\n #load image specified in path var\n img = Image.open(self.path)\n img = ImageTk.PhotoImage(img)\n #print img.height()\n #create canvas to show image\n global crop_canvas \n crop_canvas = tk.Canvas(master=self.crop, bg='#000',\n width=img.width(), height=img.height())\n \n crop_canvas.bind('<Button-1>', self.Btn1Pressed)\n crop_canvas.bind('<ButtonRelease-1>', self.Btn1Released)\n crop_canvas.bind('<B1-Motion>', self.Btn1Motion)\n \n \n crop_canvas.create_image(0,0,anchor=tk.NW, image=img)\n crop_canvas.image = img #keep image reference\n crop_canvas.grid(sticky=tk.NW)\n self.crop.focus_set()\n \n #btns for zoom functionality\n \"\"\"\n zoom_in = tk.Button(master=self.crop_canvas,text='+', anchor=tk.NE,\n command=self.ZoomIn)\n zoom_out = tk.Button(master=self.crop_canvas,text='-',anchor=tk.NE, \n command=self.ZoomOut)\n \"\"\"\n #zoom_in.place(x=img.width()-14,y=0)\n #zoom_out.place(x=img.width()-14,y=30)", "def upload_file(name):\n subprocess.check_output(cmd_preamble + [\"cp\", name, f\"jot://{name}\"])", "def crop_center_img(self):\n # TODO Task 1.1\n img = self.data\n img_with_missing_crop = np.copy(img)\n dim =128\n crop = dim // 2\n start = crop - (crop // 2)\n #ground truth overlaps img_with_missing_crop by 7 pixels in all directions\n img_with_missing_crop[:,start+7:start + crop-7, start+7:start + crop-7,:] = 0\n #255\n #inpu = Image.fromarray((img_with_missing_crop[1,:,:,:]*255).astype('uint8'))\n #inpu.save(\"cropped.png\")\n groundtruth_crop = img[:,start:start + crop, start:start + crop,:]\n self.data = (img_with_missing_crop, groundtruth_crop)", "def read_from_filename(self, filename=''):\r\n self.raw_image = skimage.io.imread(filename)\r\n self.bk_image = np.copy( self.raw_image )", "def insert(self, file_token, *file_tokens, preprocessor):\n tokens = (file_token,) + file_tokens\n for token in tokens:\n preprocessor.insert_file(self._get_filename(token))", "def upload_new_photo(name, file, user_id=None):\n\t# Create photo entry\n\tphoto = create_photo(name)\n\n\t# Save photo\n\tupload_existing_photo(photo, file)\n\n\treturn photo", "def write_to_file(self, filename):\n\n loader = ImageLoader()\n loader.write(self, filename)", "def clip(name):\n global suffix, o, r\n try:\n if r:\n gscript.run_command('r.clip', flags='r', overwrite=o, input=name, output='%s_%s'%(name,suffix)) # With resampling\n else:\n gscript.run_command('r.clip', overwrite=o, input=name, output='%s_%s'%(name,suffix)) # Without resampling\n return \"'%s' has been cliped.\"%name\n except:\n return \"ERROR: '%s' has not been cliped. Please check for problem.\"%name", "def _add_profile_image(self):\r\n self.profile_image_is_set = True\r\n file_name = filedialog.askopenfilename(initialdir=\"/\", title=self.language.refactor(\"Select GIF file\"),\r\n filetypes=((\"GIF files\", \"*.gif\"),))\r\n if file_name == '':\r\n self.new_user_window.lift()\r\n return\r\n\r\n self.add_profile_gif_button.destroy()\r\n gif_canvas = Ctk.CCanvas(self.new_user_window, corners='angular', size=(180, 180),\r\n bg=self.new_user_window['background'])\r\n gif_canvas.create_gif(gif_path=file_name, corner='round', size=(175, 175), pos=(90, 90),\r\n transparent=True, speed='normal')\r\n gif_canvas.place(*(15, 50))\r\n\r\n self.gif_file_path = file_name\r\n\r\n self.new_user_window.lift()", "def add_photo(self):\n scroll_to_top()\n click_imageview_by_id('photo')\n # choose photo from gallery\n click_textview_by_index(0)\n camera.get_picture_by_camera()\n sleep(6)\n activityName = get_activity_name()\n if activityName == 'com.android.gallery3d.app.CropImage':\n click_textview_by_id('save')\n sleep(5)\n scroll_to_bottom()\n scroll_to_top()\n\n return", "def _crop_write_image(self, inroot, images, outroot):\n for image in images:\n inimage_path = osp.join(inroot, image)\n cvimg = cv2.imread(inimage_path)\n cvimg = cvimg[60:-30, 25:-25]\n h, w, _ = cvimg.shape\n assert h == w == 128\n outimage_path = osp.join(outroot, image)\n cv2.imwrite(outimage_path, cvimg)\n print(outimage_path)", "def create_clipping(request):\n if request.method == 'POST':\n image = request.POST.get('image')\n\n # check if the blanked image should be saved to the backend\n if request.POST.get('save_clipping') in ['false', False]:\n save = False\n else:\n save = True\n\n selection = {\n 'id': int(request.POST.get('selection[id]')),\n 'x': int(round(float(request.POST.get('selection[x]')))),\n 'y': int(round(float(request.POST.get('selection[y]')))),\n 'width': int(round(float(request.POST.get('selection[width]')))),\n 'height': int(round(float(request.POST.get('selection[height]')))),\n 'full_width': int(round(float(request.POST.get('selection[full_width]')))),\n 'full_height': int(round(float(request.POST.get('selection[full_height]'))))\n }\n\n # get the image id, the model object and the selection from the model\n edit_url = request.POST.get('edit_url')\n image_id = int(edit_url.split(\"/\")[-2])\n image_object = CustomImage.objects.get(id=image_id)\n original_selection = image_object.selections\n\n reseized_image = image_object.resize_url\n\n if selection['id'] == -1: # no selection -> use the whole image\n cropped_image = reseized_image\n else: # a specific selecion is used -> get the cropped image and selecion attributes\n image_object = CroppedImage.objects.get(id=selection['id'])\n cropped_image = original_selection[unicode(request.POST.get('selection[id]'))][\"url\"]\n\n # calculate the new blanking mask\n mask = get_mask_from_image(image, selection, cropped_image, save, image_object)\n\n # stream the new mask to the output\n stream = BytesIO()\n flat_mask = []\n for line in mask:\n flat_mask.extend(line)\n np.savetxt(stream, flat_mask, fmt=\"%u\", delimiter=', ', newline=', ')\n stream.seek(0)\n return HttpResponse(stream.read())\n\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )", "def set_imagefilename(self,imagefilename):\n self.imagefile = open(imagefilename,'r+')", "def dropEvent(self, event: QtGui.QDropEvent) -> None:\n if event.mimeData().hasImage:\n event.setDropAction(Qt.CopyAction)\n self.image = event.mimeData().urls()[0].toLocalFile()\n x = self.width()\n y = self.height()\n im = QPixmap(self.image).scaled(x, y) # , aspectRatioMode=Qt.KeepAspectRatio)\n im.save(os.getcwd() + \"/tmp.jpg\")\n self.image = (os.getcwd() + \"/tmp.jpg\")\n self.setPixmap(im)\n # self.setPixmap(QPixmap(self.image))\n self.setStyleSheet(\"\")\n event.accept()\n else:\n event.ignore()", "def crop_inference_bbox(image, boxes, file_name=\"cropped_inference_result\"):\n # create output folder if not present\n create_dir(\"output/\")\n # crop detections\n if len(boxes) > 0:\n for ind in range(len(boxes)):\n cropped_img = image[\n int(boxes[ind][0][1]) : int(boxes[ind][1][1]),\n int(boxes[ind][0][0]) : int(boxes[ind][1][0]),\n :,\n ]\n save_path = os.path.join(\"output/\", file_name + \"_\" + str(ind) + \".png\")\n cv2.imwrite(save_path, cv2.cvtColor(cropped_img, cv2.COLOR_RGB2BGR))", "def crop_save( img_path_filename, lines_boxes, lines_texts, lines_probs, filename, basename, output_dir_name ):\n\t# Read the image\n\timage = Image.open( img_path_filename )\n\t# Get image's size\n\twidth, height = image.size\n\n\ti = 0\n\ttext_local = \"\"\n\ttext_global = \"\"\n\twhile i < len(lines_boxes):\n\t\t##################################################################################################\n\t\t# Left Upper Corner\n\t\tx1 = lines_boxes[i][0]\n\t\tx1 = x1 - 8\n\t\tif x1 < 0:\n\t\t\tx1 = 0\n\n\t\ty1 = lines_boxes[i][1]\n\t\ty1 = y1 - 1\n\t\tif y1 < 0:\n\t\t\ty1 = 0\n\n\t\t# Right Lower Corner\n\t\tx2 = lines_boxes[i][2]\n\t\tx2 = x2 + 10\n\t\tif x2 > (width - 1):\n\t\t\tx2 = width - 1\n\n\t\ty2 = lines_boxes[i][3]\n\t\ty2 = y2 + 1\n\t\tif y2 > (height - 1):\n\t\t\ty2 = height - 1\n\n\t\t# Crop the block and save it\n\t\tn_line = \"%03d\" % (i+1)\n\t\tline_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".jpg\"\t\t\n\n\t\timg_cropped = image.crop( (x1, y1, x2, y2) )\n\t\timg_cropped.save( line_filename, 'JPEG', quality = 100 )\n\n\t\t##################################################################################################\n\t\t# Create the information about the cropped line for the local and global text files\n\t\ttext_line = basename + \"_\" + n_line + \".jpg\\t\" + str(x1) + \"\\t\" + str(y1) + \"\\t\" + str(x2) + \"\\t\" + str(y2) + \"\\t\" + ''.join(lines_texts[i]) + \"\\n\"\n\t\ttext_local += text_line\n\t\ttext_global += filename + \"\\t\" + text_line\n\n\t\t##################################################################################################\n\t\t# Creation of the text and probability file for each line\n\t\tj = 0\n\t\tcontent_text_file = \"\"\n\t\tcontent_prob_file = \"\"\n\t\twhile j<len(lines_texts[i]):\n\t\t\tcontent_text_file += lines_texts[i][j]\n\t\t\tcontent_prob_file += lines_texts[i][j] + '\\t' + str(lines_probs[i][j]) + '\\n'\n\t\t\tj = j + 1\n\t\t# Write to disk the text file\n\t\ttext_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".txt\"\n\t\twith open( text_filename, \"w+\" ) as f_text:\n\t\t\tf_text.write( content_text_file )\n\t\t# Write to disk the probabilities file\n\t\tprob_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".prob\"\n\t\twith open( prob_filename, \"w+\" ) as f_prob:\n\t\t\tf_prob.write( content_prob_file )\n\n\t\ti = i + 1\n\n\treturn( text_local, text_global )", "def add(self, file):\n if file not in self._files:\n self._files.insert(0, file)\n if len(self._files) > self.nbmax:\n del(self._files[-1])\n else:\n self._files.remove(file)\n self._files.insert(0, file)\n try:\n with open(self._filename, 'w') as file:\n file.write('\\n'.join(self._files))\n except Exception:\n # avoid raising errors if location is read-only or invalid path\n pass", "def save_image(self, filename):\n raster.save_image(filename, self.image, self.metadata)", "def newAvatarImage(self, imgPath, imgName): \n img = ocempgui.draw.Image.load_image(imgPath)\n if not self.images[imgName]: \n imgOcemp = guiobjects.OcempImageMapTransparent(img)\n imgOcemp.topleft = 528, 114\n self.window.add_child(imgOcemp)\n self.images[imgName] = imgOcemp\n else:\n self.images[imgName].picture = img", "def store_image(self, image_name):\n self.images.append(image_name)", "def _build_crop_fn(self, img_shape, crop_modes):\n h = img_shape[0]\n w = img_shape[1]\n\n w_crop = int(w * self.CROP_RATIO)\n h_crop = int(h * self.CROP_RATIO)\n\n top_pads = {\n Crop.TOP: 0,\n Crop.CENTER: int((h - h_crop) / 2),\n Crop.BOTTOM: h - h_crop\n }\n left_pads = {\n Crop.LEFT: 0,\n Crop.CENTER: int((w - self.CROP_RATIO) / 2),\n Crop.RIGHT: w - w_crop\n }\n\n def crop(image, directory):\n for crop_mode in crop_modes:\n top_pad = top_pads[crop_mode.vertical]\n left_pad = left_pads[crop_mode.horizontal]\n fname = self.name_generator.generate_aug_name(\n original=image.name,\n aug_name=\"{}_{}\".format(crop_mode.vertical, crop_mode.horizontal)\n )\n fpath = os.path.join(directory, fname)\n\n crop = image.x[top_pad:top_pad + h_crop, left_pad:left_pad + w_crop]\n crop = cv2.resize(crop, (w, h))\n cv2.imwrite(fpath, crop)\n\n return crop", "def save(self, **kwargs):\n self.remove_file()\n if not self.image:\n self.generate(save=False)\n else:\n self.image.name = self.file()\n super(FormatedPhoto, self).save(**kwargs)", "def write(self, filename):\n\n self.__image.save(filename)", "def crop(image, countours, num_img, path_des):\n i = 0\n if len(countours) == 0: # we d'ont fine a contour save the all image\n path = path_des\n cv2.imwrite(os.path.join(path, 'Barcod') + '_' + 'num' + str(i) + '_' + 'img' + num_img, image)\n i = i + 1\n for cntr in countours:\n # creates an approximate rectangle around contour\n x, y, w, h = cv2.boundingRect(cntr)\n of = 50\n # pulls crop out of the image based on dimensions\n # new_img = image[y-of:y + h+of, x-of:x + w+of]\n new_img = offset(image, y - of, y + h + of, x - of, x + w + of)\n\n path = path_des\n cv2.imwrite(os.path.join(path, 'Barcod') + '_' + 'num' + str(i) + '_' + 'img' + num_img, new_img)\n\n i = i + 1", "def crop_images(row, crop_path):\n def crop(im, box, square=True):\n \"\"\" box: list, [x_left, y_bottom, x_l + w, y_b + h]\n \"\"\"\n def pad_square(box):\n \"\"\" If box is a rectangle, expand it to a square.\n \"\"\"\n x, y, xw, yh = box\n w = xw-x\n h = yh-y\n if w < h:\n w = h\n elif h < w:\n h = w\n return [x, y, x+w, y+h]\n if square:\n box = pad_square(box)\n x, y, xw, yh = box\n return im[y:yh, x:xw]\n im = tiffread(row['Image'])\n im_crop = crop(im, row['Cropbox'], square=True)\n crop_file = os.path.join(crop_path, row['Name'], row['UID'])\n tiffwrite(crop_file, im_crop)", "def insert(self, item: Crop) -> None:\n self._content.append(item)\n self._file_counts[item.annot_type] = self._file_counts.get(item.annot_type, 0) + 1", "def insert_image(self, image_path: str) -> str:\n mm = self.anki.media.MediaManager(self.collection, None)\n name = mm.addFile(image_path)\n self.collection.save()\n return name", "def loadImage(self, file_name):\n self.surf = pygame.image.load(file_name)\n self.draw_angle = 0 # In degrees\n self.bullets = []", "def write_image(self, filename):\n cv2.imwrite(filename, self.image)", "def next_crop(self):\n image_crop = self.image[self.j : self.j + self.size, self.i : self.i + self.size]\n\n if image_crop.size != self.size*self.size:\n image_crop = numpy.pad(image_crop, ((0, self.size - image_crop.shape[0]), (0, self.size - image_crop.shape[1])), \"constant\")\n self.update()\n\n return image_crop, not self.no_more_crops", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def add_crop_center(self, shape):\n self.methods.append(self._crop_center)\n self.args.append([shape])", "def saveImageAs(self, name):\n\t\tself.image.save(name)", "def make_cover(filename, destination=None):\n _resize(filename, *COVER_SIZE, destination=destination)", "def insert(self, *args, **kwargs):\n return _image.image_insert(self, *args, **kwargs)" ]
[ "0.60004956", "0.5452988", "0.53952855", "0.53464556", "0.53207326", "0.52505255", "0.5239172", "0.518678", "0.5163597", "0.5147501", "0.5141308", "0.513236", "0.5121384", "0.5045127", "0.5008329", "0.50027025", "0.49967998", "0.4968934", "0.49625248", "0.4922145", "0.4885444", "0.48595783", "0.48428452", "0.48388302", "0.47980636", "0.4796322", "0.4778058", "0.47633728", "0.47617763", "0.4761282", "0.47368908", "0.4735016", "0.47344747", "0.47330216", "0.46778053", "0.46722198", "0.46711406", "0.46700698", "0.4647951", "0.46470326", "0.4634986", "0.46325117", "0.46259174", "0.4612462", "0.46046668", "0.46002153", "0.45971307", "0.45971307", "0.45956406", "0.45877498", "0.45774677", "0.45699137", "0.45648748", "0.45553747", "0.455101", "0.45489374", "0.45394033", "0.45363784", "0.45324555", "0.45305258", "0.4523327", "0.4521353", "0.45170367", "0.4511193", "0.4501939", "0.45018992", "0.44969803", "0.44954702", "0.44947258", "0.4488749", "0.4486349", "0.44856632", "0.4478888", "0.44698668", "0.44656417", "0.44628406", "0.44616655", "0.44605562", "0.44512978", "0.44507253", "0.44444552", "0.44430298", "0.44399783", "0.4439577", "0.4439202", "0.44374886", "0.44312748", "0.4417613", "0.44154245", "0.44146964", "0.44133037", "0.44125164", "0.44075012", "0.43993232", "0.43969542", "0.43969542", "0.4395951", "0.43799853", "0.43788058", "0.4376247" ]
0.56664383
1
Randomly divide this image into training, validation and test split. The image is divided into three randomly ordered consecutive patches. The fractions of the image going into each patch are given by val_split, test_split, and 1 (val_split + test_split), respectively. Columns in which the patches overlap are removed.
def select_randomly(self, val_split: float, test_split: float) -> {str: int}: def _select(start, n, label) -> int: """ Label all columns in [start, start+n) with label. """ n_selected = 0 for i in range(start, int(start + n)): x = self._x_positions[i] n_selected += self._cols[x].mark_as(label) return n_selected def _remove_overlaps(start, end) -> int: """ Remove unlabelled columns in [start-col_width, end+col_width]. """ start = self._x_positions[start % self.n_cols] end = self._x_positions[int(end) % self.n_cols] n_removed = 0 for x, col in self._cols.items(): if start - self.col_width <= x <= start or end <= x <= end + self.col_width: if col.label is None: n_removed += col.mark_as('ignore') return n_removed def _next_unlabelled_col(x): """ Return index of first unlabelled column after x. """ for i in range(self.n_cols): idx = (x + i) % self.n_cols x_current = self._x_positions[idx] if self._cols[x_current].label is None: return idx # When computing number of columns per split we must take into account # that some columns will be removed, i.e. we want to compute the split # sizes as fraction of the number of actual selected columns, not of # the total number of columns. delta_x = self._x_positions[1] - self._x_positions[0] n_to_remove_per_split = self.col_width / delta_x # * 2 because 2 gaps between 3 splits n_to_keep = self.n_cols - n_to_remove_per_split * 2 n_val = round(n_to_keep * val_split) n_test = round(n_to_keep * test_split) n_train = n_to_keep - n_val - n_test n_selected_crops_per_split = dict.fromkeys(['training', 'validation', 'test', 'ignore'], 0) # Place patches in arbitrary order start = 0 for n, label in random.sample(list(zip([n_train, n_val, n_test], ['training', 'validation', 'test'])), k=3): # Mark patch n_selected_crops_per_split[label] += _select(start, n, label) # Remove columns overlapping this patch n_selected_crops_per_split['ignore'] += _remove_overlaps(start, start + n - 1) # Next patch starts at next unlabelled column start = _next_unlabelled_col(start) return n_selected_crops_per_split
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def split_valid_sampling(inpath,\n patch_size, \n train_prop,\n val_prop,\n outpath,\n padding_mode='constant', \n padding_values=0, \n ignore_labels=[0]):\n outdir = outpath.joinpath('valid_sampling')\n outpath = outdir.joinpath(f'{patch_size}x{patch_size}_{padding_mode}_{train_prop:.2f}_{val_prop:.2f}.h5')\n \n if outpath.is_file():\n warnings.warn('Sampled data already exist, remove directory'\n ' \"{}\" to resample data!'.format(outpath))\n return outpath\n\n outdir.mkdir(parents=True, exist_ok=True)\n \n with h5py.File(inpath, 'r') as in_file, h5py.File(outpath, 'w') as out_file:\n out_file.attrs.update(in_file.attrs) # copy attributes\n patchgroup = out_file.create_group('patches')\n\n data, labels = in_file['data'], in_file['labels']\n\n\n # split image into subimages of size patch_size x patch_size\n h, w, _ = data.shape\n num_subimg_h = ceil(h/patch_size) # patches along vertical axis\n num_subimg_w = ceil(w/patch_size) # patches along horizontal axis\n\n subimgs = []\n subimg_labels = []\n\n for i in range(num_subimg_h):\n for j in range(num_subimg_w):\n start_idx_h = i*patch_size\n start_idx_w = j*patch_size\n end_idx_h = (i+1)*patch_size\n end_idx_w = (j+1)*patch_size\n\n # end_idx_h and end_idx_w may be greater than height and width of data array\n if end_idx_h > h:\n end_idx_h = h\n if end_idx_w > w:\n end_idx_w = w\n\n subimgs.append(data[start_idx_h:end_idx_h, start_idx_w:end_idx_w])\n subimg_labels.append(labels[start_idx_h:end_idx_h, start_idx_w:end_idx_w])\n\n # shuffle samples\n samples = list(zip(subimgs, subimg_labels))\n np.random.shuffle(samples)\n subimgs, subimg_labels = zip(*samples)\n\n # count how many pixels have non 'ignore_labels' and use result to assign approximately\n # train_prop share of non zero data to train set, val_prop of non zero data to validation set\n # and (1-(train_prop+val_prop)) to test set.\n if ignore_labels:\n cum_nonzero_labels = np.cumsum(\n [np.invert(np.isin(lbls, ignore_labels)).sum() for lbls in subimg_labels])\n split_idx_train = 0\n split_idx_val = 0\n if cum_nonzero_labels[-1] == 0:\n raise RuntimeError('Labelimage only contains ignored labels.')\n while(True):\n if (cum_nonzero_labels[split_idx_train]/cum_nonzero_labels[-1]) < train_prop:\n split_idx_train += 1\n if (cum_nonzero_labels[split_idx_val]/cum_nonzero_labels[-1]) < (train_prop + val_prop):\n split_idx_val += 1\n else:\n break\n print(f'{cum_nonzero_labels[split_idx_train]} / {cum_nonzero_labels[-1]}')\n print(f'{cum_nonzero_labels[split_idx_val]} / {cum_nonzero_labels[-1]}')\n else :\n split_idx_train = int(len(subimgs)*train_prop)\n split_idx_val = int(len(subimgs)*(train_prop + val_prop))\n\n # sample test and training data patches\n train_subimgs = subimgs[:split_idx_train]\n train_subimg_labels = subimg_labels[:split_idx_train]\n val_subimgs = subimgs[split_idx_train:split_idx_val]\n val_subimg_labels = subimg_labels[split_idx_train:split_idx_val]\n test_subimgs = subimgs[split_idx_val:]\n test_subimg_labels = subimg_labels[split_idx_val:]\n train_samplecount = _sample_patches(train_subimgs, train_subimg_labels, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels)\n val_samplecount = _sample_patches(val_subimgs, val_subimg_labels,\n patch_size,\n patchgroup,\n padding_mode,\n padding_values,\n ignore_labels,\n startidx=train_samplecount)\n test_samplecount = _sample_patches(test_subimgs, test_subimg_labels, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels,\n startidx=(train_samplecount+val_samplecount))\n\n train_samples = np.arange(train_samplecount)\n val_samples = np.arange(train_samplecount, train_samplecount+val_samplecount)\n test_samples = np.arange((train_samplecount+val_samplecount), \n (train_samplecount+val_samplecount+test_samplecount))\n\n out_file.create_dataset('trainsample_list', data=train_samples)\n out_file.create_dataset('valsample_list', data=val_samples)\n out_file.create_dataset('testsample_list', data=test_samples)\n out_file.attrs['train_prop'] = train_prop\n out_file.attrs['val_prop'] = val_prop\n\n return outpath", "def split(self, train_fraction=0.8, val_fraction=0.2, test_fraction=0, seed=1):\n if self.is_initialized():\n return\n self.ensure_fraction_sum(train_fraction, val_fraction, test_fraction)\n np.random.seed(seed)\n self.samples = sorted(self.samples)\n np.random.shuffle(self.samples)\n train_idx = ceil(train_fraction*(len(self.samples)))\n val_idx = train_idx + ceil(val_fraction*(len(self.samples)))\n test_idx = val_idx + ceil(test_fraction*(len(self.samples)))\n indices = list(range(len(self.samples)))\n self.indices[TRAIN_SUBSET] = indices[:train_idx]\n self.indices[VAL_SUBSET] = indices[train_idx:val_idx]\n self.indices[TEST_SUBSET] = indices[val_idx:test_idx]", "def split_data_by_image(self, test_fraction=0.5):\n image_id = BaseModel.get_image_id(self.inputs)\n test_idx = np.random.random(image_id.max()+1) <= test_fraction\n\n # Low image count edge case (mostly just for testing purposes)\n if True not in test_idx:\n test_idx[0] = True\n elif False not in test_idx:\n test_idx[0] = False\n \n test_idx = test_idx[image_id]\n if BaseModel.is_laue(self.inputs):\n train, test = self.split_laue_data_by_mask(test_idx)\n else:\n train, test = self.split_mono_data_by_mask(test_idx)\n\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test", "def split_train_test_data(total_data_df, frac):\n test_data_df = total_data_df.sample(frac=frac, random_state=1)\n train_data_df = total_data_df.loc[total_data_df.index.difference(test_data_df.index)]\n return train_data_df, test_data_df", "def split_random_sampling(inpath,\n patch_size, \n train_prop,\n val_prop,\n outpath,\n padding_mode='constant', \n padding_values=0, \n ignore_labels=[0]):\n \n outdir = outpath.joinpath('random_sampling')\n outpath = outdir.joinpath(f'{patch_size}x{patch_size}_{padding_mode}_{train_prop:.2f}_{val_prop:.2f}.h5')\n \n if outpath.is_file():\n warnings.warn('Sampled data already exist, remove directory'\n ' \"{}\" to resample data!'.format(outpath))\n return outpath\n\n outdir.mkdir(parents=True, exist_ok=True)\n \n with h5py.File(inpath, 'r') as in_file, h5py.File(outpath, 'w') as out_file:\n out_file.attrs.update(in_file.attrs) # copy attributes\n patchgroup = out_file.create_group('patches')\n \n # sample patches\n samplecount = _sample_patches([in_file['data'][:]], [in_file['labels'][:]], \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels) \n\n # define test, train and validation split\n split_idx_train = int(train_prop * samplecount)\n split_idx_val = int((train_prop + val_prop) * samplecount)\n\n samples = np.arange(samplecount)\n np.random.shuffle(samples)\n train_samples = samples[:split_idx_train]\n val_samples = samples[split_idx_train:split_idx_val]\n test_samples = samples[split_idx_val:]\n\n out_file.create_dataset('trainsample_list', data=train_samples)\n out_file.create_dataset('valsample_list', data=val_samples)\n out_file.create_dataset('testsample_list', data=test_samples)\n out_file.attrs['train_prop'] = train_prop\n out_file.attrs['val_prop'] = val_prop\n \n return outpath", "def split_datasets(img_lst):\n num = len(img_lst)\n\n idx = np.random.permutation(num)\n train_lst = np.array(img_lst)[idx[:int(num * .8)]] # 80/20 split\n validation_lst = np.array(img_lst)[idx[int(num * .8):int(num * .9)]]\n test_lst = np.array(img_lst)[idx[int(num * .9):]]\n return train_lst, validation_lst, test_lst", "def gen_train_val_test_split(self, splits=[0.7, 0.2, 0.1], random_state=42):\n patient_dirs = os.listdir(self.out_dir)\n msk_list, img_list, patient_list, is_tumor = [], [], [], []\n\n for patient in patient_dirs:\n patient_folder = os.path.join(self.out_dir, patient)\n if not os.path.isdir(patient_folder):\n continue\n\n patient_root = os.path.join(self.out_dir, patient)\n for file in os.listdir(patient_root):\n if \"mask\" not in file:\n patient_list.append(patient)\n img_list.append(os.path.join(patient_root, file))\n msk_list.append(os.path.join(patient_root, file[:file.find(\".npy\")] + \"_mask.npy\"))\n \n img = np.load(msk_list[-1])\n if np.mean(img) > 0:\n is_tumor.append(1)\n else:\n is_tumor.append(0)\n \n data = pd.DataFrame(data={\"Patient\": patient_list, \"Image\": img_list, \"Mask\": msk_list, \"Tumor Present\": is_tumor})\n self.train_df, self.valid_df, self.test_df = self._split_by_patients(data, val_split=splits[1], test_split=splits[2], random_state=random_state)\n # print(len(train), len(val), len(test))\n # self.train_df = data[data[\"Patient\"].isin(train)]\n # self.valid_df = data[data[\"Patient\"].isin(val)]\n # self.test_df = data[data[\"Patient\"].isin(test)]", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def train_test_split_data(aligned_image_lists, raw_image_lists, test_ratio ,validation = True ):\n\n assert len(aligned_image_lists) == len(raw_image_lists), \"images have different size\"\n mask = list(range(len(aligned_image_lists)))\n mask_train, mask_test = train_test_split(mask, test_size= test_ratio, shuffle=True)\n\n aligned_lists_train = [aligned_image_lists[i] for i in mask_train]\n aligned_lists_test = [aligned_image_lists[i] for i in mask_test]\n\n raw_lists_train = [raw_image_lists[i] for i in mask_train] \n raw_lists_test = [raw_image_lists[i] for i in mask_test] \n\n return [aligned_lists_train, aligned_lists_test, raw_lists_train, raw_lists_test]", "def get_test_split(self, fraction=0.1):\n rng = np.random.default_rng(42)\n test_size = int(round(len(self.all_asset_ids) * fraction))\n test_ids = rng.choice(self.all_asset_ids, size=test_size, replace=False)\n train_ids = [i for i in self.all_asset_ids if i not in test_ids]\n return train_ids, test_ids", "def test_train_split_per_value():\n shape = (1000, 1000, 3)\n\n input1 = np.random.randint(10, size=shape, dtype=int)\n input2 = np.random.randint(10, size=shape, dtype=int)\n\n patch1 = EOPatch()\n patch1[INPUT_MASK_FEATURE] = input1\n\n patch2 = EOPatch()\n patch2[INPUT_MASK_FEATURE] = input2\n\n bins = [0.2, 0.6]\n\n split_task = TrainTestSplitTask((*INPUT_MASK_FEATURE, NEW_FEATURE_NAME), bins, split_type='per_value')\n\n # seeds should get ignored when splitting 'per_value'\n patch1 = split_task(patch1, seed=1)\n patch2 = split_task(patch2, seed=1)\n\n otuput1 = patch1[NEW_MASK_FEATURE]\n otuput2 = patch2[NEW_MASK_FEATURE]\n\n unique = set(np.unique(input1)) | set(np.unique(input2))\n\n for uniq in unique:\n folds1 = otuput1[input1 == uniq]\n folds2 = otuput2[input2 == uniq]\n assert_array_equal(np.unique(folds1), np.unique(folds2))", "def populate_train_test_val_dirs_nonrandomly(root_dir, val_ratio=0.15, test_ratio=0.05, preliminary_clahe=True,\n apply_masks=True):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredBlurryImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n if val_ratio == 0.0:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n if test_ratio == 0.0:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)]\n if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names\n if filename not in val_file_names and filename not in test_file_names]\n\n # Print the file distribution among the folders\n logger.print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names),\n len(test_file_names))\n\n # Copy-Pasting images into train dataset\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/train/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/train/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/train/Masks')\n\n # Copy-Pasting images into val dataset\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/val/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/val/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/val/Masks')\n\n # Copy-Pasting images into test dataset\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/test/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/test/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/test/Masks')\n\n ''' Augment the images in each new folder '''\n # If we want to use preliminary adaptive equalization...\n if preliminary_clahe:\n pass\n # ... then first, apply Contrast Limited Adaptive Histogram Equalization to clear images in all folders\n CLAHE_image_folder(root_dir + '/train/ClearImages')\n CLAHE_image_folder(root_dir + '/val/ClearImages')\n CLAHE_image_folder(root_dir + '/test/ClearImages')\n\n # Then, apply histogram equalization to make the blurry images' histogram match that of the clear images\n hist_match_image_folder(root_dir=join(root_dir, 'train'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'val'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'test'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)", "def split_dataset(df, split_method, data_testing, random_seed, train_frac=0.8, test_frac=0.1):\n\n # Get data_type and data_value from split parameters\n # If no data_type is provided, data_type is the same as split_method\n data_type = data_testing['data_type'] if data_testing['data_type'] else split_method\n data_value = data_testing['data_value']\n\n if not split_method in df:\n raise KeyError(\"No split_method '{}' was not found in metadata\".format(split_method))\n if not data_type in df:\n logger.warning(\"No data_type named '{}' was found in metadata. Not taken into account \"\n \"to split the dataset.\".format(data_type))\n data_type = split_method\n\n # Filter dataframe with rows where split_method is not NAN\n df = df[df[split_method].notna()]\n\n # If no data_value list is provided, create a random data_value according to data_type and test_fraction\n # Split the TEST and remainder set using sklearn function\n if len(data_value) == 0 and test_frac != 0:\n data_value = sorted(df[data_type].unique().tolist())\n test_frac = test_frac if test_frac >= 1 / len(data_value) else 1 / len(data_value)\n data_value, _ = train_test_split(data_value, train_size=test_frac, random_state=random_seed)\n if len(data_value) != 0:\n for value in data_value:\n if value not in df[data_type].values:\n logger.warning(\"No data_value '{}' was found in '{}'. Not taken into account \"\n \"to split the dataset.\".format(value, data_type))\n X_test = df[df[data_type].isin(data_value)]['filename'].unique().tolist()\n X_remain = df[~df[data_type].isin(data_value)][split_method].unique().tolist()\n\n # List dataset unique values according to split_method\n # Update train fraction to apply to X_remain\n data = sorted(df[split_method].unique().tolist())\n train_frac_update = train_frac * len(data) / len(X_remain)\n if ((train_frac_update > (1 - 1 / len(X_remain)) and len(X_remain) < 2) or train_frac_update > 1):\n raise RuntimeError(\"{}/{} '{}' remaining for training and validation sets, train_fraction {} is too large, \"\n \"validation set would be empty.\".format(len(X_remain), len(data), split_method, train_frac))\n\n # Split remainder in TRAIN and VALID sets according to train_frac_update using sklearn function\n X_train, X_val = train_test_split(X_remain, train_size=train_frac_update, random_state=random_seed)\n\n # Print the real train, validation and test fractions after splitting\n real_train_frac = len(X_train)/len(data)\n real_valid_frac = len(X_val)/len(data)\n real_test_frac = 1 - real_train_frac - real_valid_frac\n logger.warning(\"After splitting: train, validation and test fractions are respectively {}, {} and {}\"\n \" of {}.\".format(round(real_train_frac, 3), round(real_valid_frac, 3),\n round(real_test_frac, 3), split_method))\n\n # Convert train and valid sets from list of \"split_method\" to list of \"filename\"\n X_train = df[df[split_method].isin(X_train)]['filename'].unique().tolist()\n X_val = df[df[split_method].isin(X_val)]['filename'].unique().tolist()\n\n # Make sure that test dataset is unseen during training\n # (in cases where there are multiple \"data_type\" for a same \"split_method\")\n X_train = list(set(X_train) - set(X_test))\n X_val = list(set(X_val) - set(X_test))\n\n return X_train, X_val, X_test", "def _split_train_test_per_group(x, y, frac, max_train, random_state):\n y_series = pd.Series(y)\n # split train test per group\n train_idx = []\n test_idx = []\n outlier_idx = []\n for cluster, sub_series in y_series.groupby(y_series):\n if (cluster == -1) or (sub_series.size < 3):\n outlier_idx += sub_series.index.tolist()\n else:\n n_train = max(1, min(max_train, int(sub_series.size * frac)))\n is_train = sub_series.index.isin(sub_series.sample(n_train, random_state=random_state).index)\n train_idx += sub_series.index[is_train].tolist()\n test_idx += sub_series.index[~is_train].tolist()\n x_train = x[train_idx]\n y_train = y[train_idx]\n x_test = x[test_idx]\n y_test = y[test_idx]\n return x_train, y_train, x_test, y_test", "def split_data(self, verbose=False):\n # group sample by patient and body part\n tmp = self.data_info.groupby(['patientID', 'body_part']).max()\n # get the index (i.e. patient and bodypart) where none of the body part XR of a given patient are abnormal\n idx_list_normal = tmp[tmp.body_part_abnormal == 0].index.to_list()\n # get the index (i.e. patient and bodypart) where at least one but not all of the body part XR of a given patient are abnormal\n idx_list_mixt = tmp[tmp.body_part_abnormal == 0.5].index.to_list()\n # get the index (i.e. patient and bodypart) where all one of the body part XR of a given patient are abnormal\n idx_list_abnormal = tmp[tmp.body_part_abnormal == 1].index.to_list()\n total = len(idx_list_normal)+len(idx_list_mixt)+len(idx_list_abnormal)\n train_size = self.train_frac*total\n assert train_size < len(idx_list_normal), f'There are not enough normal sample for the given train_frac : {self.train_frac}. \\\n There are {len(idx_list_normal)} normal sample over {total} total samples.'\n valid_size = (1-self.train_frac)*0.5*total\n test_size = (1-self.train_frac)*0.5*total\n # randomly pick (1-ratio_known_abnormal)*train_frac*total from the normal index for the train set\n train_idx_normal, remain = train_test_split(idx_list_normal, \\\n train_size=int((1-self.ratio_known_abnormal)*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_normal, test_idx_normal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # add ratio_known_abnormal*train_frac*total from the abnormal index\n if self.ratio_known_abnormal == 0.0:\n train_idx_abnormal, remain = [], idx_list_abnormal\n else:\n train_idx_abnormal, remain = train_test_split(idx_list_abnormal, \\\n train_size=int(self.ratio_known_abnormal*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_abnormal, test_idx_abnormal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # split the mixt between test and validation and consider them as abnormal patients bodypart\n valid_idx_mixt, test_idx_mixt = train_test_split(idx_list_mixt, test_size=0.5, random_state=self.random_state)\n valid_idx_abnormal += valid_idx_mixt\n test_idx_abnormal += test_idx_mixt\n # get the known and unknown index for each sets\n # get a fraction of normal known\n if self.ratio_known_normal == 0.0:\n train_idx_known, train_idx_unknown = [], train_idx_normal\n valid_idx_known, valid_idx_unknown = [], valid_idx_normal\n test_idx_known, test_idx_unknown = [], test_idx_normal\n else:\n train_idx_known, train_idx_unknown = train_test_split(train_idx_normal, \\\n train_size=int(self.ratio_known_normal*train_size),\\\n random_state=self.random_state)\n valid_idx_known, valid_idx_unknown = train_test_split(valid_idx_normal, \\\n train_size=int(self.ratio_known_normal*valid_size),\\\n random_state=self.random_state)\n test_idx_known, test_idx_unknown = train_test_split(test_idx_normal, \\\n train_size=int(self.ratio_known_normal*test_size), \\\n random_state=self.random_state)\n # get the abnormal known\n # all abnormal in train are known\n train_idx_known += train_idx_abnormal\n if self.ratio_known_abnormal == 0.0:\n valid_idx_unknown += valid_idx_abnormal\n test_idx_unknown += test_idx_abnormal\n else:\n valid_idx_known_abnormal, valid_idx_unknown_abnormal = train_test_split(valid_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*valid_size), \\\n random_state=self.random_state)\n valid_idx_known += valid_idx_known_abnormal\n valid_idx_unknown += valid_idx_unknown_abnormal\n test_idx_known_abnormal, test_idx_unknown_abnormal = train_test_split(test_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*test_size),\\\n random_state=self.random_state)\n test_idx_known += test_idx_known_abnormal\n test_idx_unknown += test_idx_unknown_abnormal\n\n # get the subsample dataframe with semi-label\n train_df = self.generate_semisupervized_label(train_idx_known, train_idx_unknown)\n valid_df = self.generate_semisupervized_label(valid_idx_known, valid_idx_unknown)\n test_df = self.generate_semisupervized_label(test_idx_known, test_idx_unknown)\n # shuffle the dataframes\n self.subsets['train'] = train_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['valid'] = valid_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['test'] = test_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n # Print summary\n if verbose:\n self.print_stat()", "def divide_data(n_reconstructions, ratio = [.8,.1,.1], test_set = True, random = False):\r\n\r\n\tassert np.sum(ratio) == 1.\r\n\r\n\tif test_set:\r\n\t\tassert len(ratio) == 3\r\n\r\n\telse:\r\n\t\tassert len(ratio) == 2\r\n\t\t\t\r\n\tif random:\r\n\t\tnp.random.seed(0)\r\n\r\n\t\tr = np.arange(n_reconstructions)\r\n\t\tnp.random.shuffle(r)\r\n\r\n\t\ti_train = r[:int(ratio[0]*n_reconstructions)]\r\n\t\ti_valid = r[int(ratio[0]*n_reconstructions):int((ratio[0]+ratio[1])*n_reconstructions)]\r\n\r\n\t\tif test_set:\r\n\t\t\ti_test = r[int((ratio[0]+ratio[1])*n_reconstructions):]\r\n\r\n\telse :\r\n\t\tr = np.arange(n_reconstructions)\r\n\t\ti_valid = r[(r+1) % int(ratio[1]*100) == 0]\r\n\r\n\t\tif test_set:\r\n\t\t\ti_test = r[(r+2) % int(ratio[2]*100) == 0]\r\n\t\t\ti_train = r[((r+1) % int(ratio[1]*100) != 0)*((r+2) % int(ratio[2]*100) != 0)]\r\n\t\t\r\n\t\telse:\r\n\t\t\ti_train = r[(r+1) % int(ratio[1]*100) != 0]\r\n\r\n\tif not(test_set):\r\n\t\ti_test = []\r\n\r\n\treturn i_train,i_valid,i_test", "def _train_validation_test_split(self, images, centroids):\n \n # Get test images from images list. Test images are images for which a centroid mask does NOT exist\n test = []\n filtered_images = []\n \n # split images in test and train/val set\n for image in images:\n if image not in centroids:\n test.append(image)\n else:\n filtered_images.append(image)\n \n # Generate train/validation split based on remaining images and centroids\n train, validation = train_test_split(filtered_images, train_size=self.split, random_state=self.seed)\n \n return train, validation, test", "def split(self,\n dataset,\n frac_train=.8,\n frac_valid=.1,\n frac_test=.1,\n log_every_n=1000):\n np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)\n scaffold_sets = self.generate_scaffolds(dataset)\n\n train_cutoff = frac_train * len(dataset)\n valid_cutoff = (frac_train + frac_valid) * len(dataset)\n train_inds, valid_inds, test_inds = [], [], []\n\n log(\"About to sort in scaffold sets\", self.verbose)\n for scaffold_set in scaffold_sets:\n if len(train_inds) + len(scaffold_set) > train_cutoff:\n if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff:\n test_inds += scaffold_set\n else:\n valid_inds += scaffold_set\n else:\n train_inds += scaffold_set\n return train_inds, valid_inds, test_inds", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n packed = np.vstack([y,x]).T\n np.random.shuffle(packed)\n N = y.shape[0]\n eightyN = int(ratio*N)\n xTrain = packed[0:eightyN,1]\n yTrain = packed[0:eightyN,0]\n xTest = packed[eightyN:N, 1]\n yTest = packed[eightyN:N,0]\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n return xTrain, yTrain, xTest, yTest", "def split_folder(data_dir, train_pct, val_pct):\n\n random.seed(1)\n\n IMG_SUFFIX = '*_sat.jpg'\n MASK_SUFFIX = '*_msk.png'\n\n glob_imgs = os.path.join(data_dir,IMG_SUFFIX)\n glob_masks = os.path.join(data_dir, MASK_SUFFIX)\n\n img_paths = np.array(sorted(glob.glob(glob_imgs)))\n mask_paths = np.array(sorted(glob.glob(glob_masks)))\n \n num_imgs = len(img_paths)\n index_lst = list(range(num_imgs))\n\n random.shuffle(index_lst)\n\n train_idx_bound = int(train_pct * num_imgs)\n train_imgs = img_paths[index_lst[:train_idx_bound]]\n train_masks = mask_paths[index_lst[:train_idx_bound]]\n\n val_idx_bound = int((train_pct + val_pct) * num_imgs)\n val_imgs = img_paths[index_lst[train_idx_bound: val_idx_bound]]\n val_masks = mask_paths[index_lst[train_idx_bound: val_idx_bound]]\n\n test_imgs = img_paths[index_lst[val_idx_bound:]]\n test_masks = mask_paths[index_lst[val_idx_bound:]]\n\n # Write the lists to their own directories\n copy_list_to_dir(train_imgs, \"train\")\n print(\"Moved images into: train\")\n copy_list_to_dir(train_masks, \"train\")\n print(\"Moved masks into: train\")\n copy_list_to_dir(val_imgs, \"val\")\n print(\"Moved images into: val\")\n copy_list_to_dir(val_masks, \"val\")\n print(\"Moved masks into: val\")\n copy_list_to_dir(test_imgs, \"test\")\n print(\"Moved images into: test\")\n copy_list_to_dir(test_masks, \"test\")\n print(\"Moved masks into: test\")", "def split_train_test_by_percentage(dataset, train_percentage=0.8):\n train_length = int(len(dataset) * train_percentage)\n return torch.utils.data.random_split(dataset, (train_length, len(dataset) - train_length))", "def random_valid_test_split(x_test, y_test, num_val_samples):\n rand_state = np.random.get_state()\n sampler = np.random.rand(len(y_test)) < PERCENT_VALIDATION\n x_val = x_test[:, sampler, :, :]\n x_test = x_test[:, ~sampler, :, :]\n np.random.set_state(rand_state)\n sampler = np.random.rand(len(y_test)) < PERCENT_VALIDATION\n y_val = y_test[sampler]\n y_test = y_test[~sampler]\n return np.array(x_val), np.array(y_val), np.array(x_test), np.array(y_test)", "def split_data_by_refl(self, test_fraction=0.5):\n if BaseModel.is_laue(self.inputs):\n harmonic_id = BaseModel.get_harmonic_id(self.inputs)\n test_idx = (np.random.random(harmonic_id.max()+1) <= test_fraction)[harmonic_id]\n train, test = self.split_laue_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test\n\n test_idx = np.random.random(len(self.inputs[0])) <= test_fraction\n train, test = self.split_mono_data_by_mask(test_idx)\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def split_test_from_training_data(network, ratio=0.1):\n test_edges = random.sample(list(network.edges()), int(len(network.edges()) * ratio))\n training_network = network.copy()\n training_network.remove_edges_from(test_edges)\n test_network = network.copy()\n test_network.remove_edges_from(training_network.edges())\n return training_network, test_network", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def split(self, train_mask, test_mask):\n train = self.dataset.loc[train_mask]\n test = self.dataset.loc[test_mask]\n return PandasTrainTestSplit.from_dfs(train, test, self.fguide)", "def train_test_split(df, frac):\n frac = round(len(df)*frac)\n train = df[:frac]\n test = df[frac:]\n\n return train, test", "def create_validation_split(train_data, fraction_per_class=0.1, shuffle=True):\n\n subset_train_data = []\n val_data = []\n val_label_counts = {}\n\n class_labels = [i['class']['label'] for i in train_data]\n images_per_class = Counter(class_labels)\n val_images_per_class = {label: 0 for label in images_per_class.keys()}\n\n # Sanity check to make sure each class has more than 1 label\n for label, image_count in images_per_class.items():\n if image_count <= 1:\n print(\"Warning: label %d has only %d images\" % (label, image_count))\n\n if shuffle:\n random.shuffle(train_data)\n\n for image_data in train_data:\n label = image_data['class']['label']\n\n if label not in val_label_counts:\n val_label_counts[label] = 0\n\n if val_images_per_class[label] < images_per_class[label] * fraction_per_class:\n val_data.append(image_data)\n val_images_per_class[label] += 1\n else:\n subset_train_data.append(image_data)\n\n return subset_train_data, val_data", "def __train_test_splits(self):\n # By default, our indices are just 0-n\n split_indices = list(range(len(self.data)))\n # If shuffling, use our shared Random instance to shuffle our indices before slicing\n if self.shuffle:\n np.random.shuffle(split_indices)\n # Regardless of shuffle, take the first self.train_proportion for training, and the last\n # 1 - self.train_proportion records as test\n train_n = int(self.train_proportion * len(self.data))\n training_indices = split_indices[:train_n]\n test_indices = split_indices[train_n:]\n return training_indices, test_indices", "def split(self, X=None, y=None, groups=None):\n\n for train_index in [0,1]:\n train_indices=np.where(self.test_fold==train_index)[0]\n test_indices=np.where(self.test_fold==(train_index+1)%2)[0]\n if self.shuffle:\n self.rng.shuffle(train_indices)\n self.rng.shuffle(test_indices)\n yield train_indices, test_indices", "def data_split(dataset, val_ratio=0.1, test_ratio=0.1, seed=1234):\n\n\t# How you grab the labels will depend on what type of Pytorch Dataset object 'dataset' is\n\t# (i.e. ImageFolder/DatasetFolder or not)\n\n\t# For fun, check the method resolution order (MRO) of 'dataset'\n\tprint('Dataset object\\'s inheritance: ', type(dataset).__mro__)\n\n\t# Determine what kind of Dataset object it is, then grab labels\n\t# Warning: currently this will break for anything other than an ImageFolder or CIFAR10 train set\n\tif isinstance(dataset, datasets.CIFAR10):\n\t\tlabels = dataset.train_labels\n\telif isinstance(dataset, datasets.ImageFolder):\n\t\tlabels = [img[1] for img in dataset.imgs]\n\telse:\n\t\terror('Dataset not supported yet')\n\n\t# Calculate class priors, (number in class)/(size of dataset)\n\tidcs = [i for i in range(len(dataset))]\n\tsamples_per_class = np.bincount(np.array(labels))\n\tpriors = samples_per_class/len(labels)\n\n\t# Number of samples in each class for val and test set \n\tval_per_class = np.ceil(samples_per_class*val_ratio).astype(np.int)\n\ttest_per_class = np.ceil(samples_per_class*test_ratio).astype(np.int)\n\n\t# Copy and shuffle the labels and corresponding indices to randomize before splitting\n\tshuffled_labels = list(labels)\n\tshuffled_idcs = list(idcs)\n\trandom.Random(seed).shuffle(shuffled_labels)\n\trandom.Random(seed).shuffle(shuffled_idcs)\n\n\t# Iterate through, grabbing indices for each class to place in validation set\n\t# until the desired number is reached\n\tval_idcs = []\n\tval_counts = np.zeros(val_per_class.shape)\n\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if validation set quota has been reached yet for this class\n\t\tif val_counts[l] < val_per_class[l]:\n\t\t\tval_idcs.append(i)\n\t\t\tval_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (val_counts == val_per_class).all():\n\t\t\tbreak\n\n\t# Repeat for test set\n\ttest_idcs = []\n\ttest_counts = np.zeros(test_per_class.shape)\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if this index is already in val set\n\t\tif i in val_idcs:\n\t\t\tcontinue\n\n\t\t# Check if test set quota has been reached yet for this class\n\t\tif test_counts[l] < test_per_class[l]:\n\t\t\ttest_idcs.append(i)\n\t\t\ttest_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (test_counts == test_per_class).all():\n\t\t\tbreak\n\n\t# Get train indices too (all the remaining samples not in val or test)\n\ttrain_idcs = [j for j in idcs if j not in val_idcs+test_idcs]\n\n\t# Split the data\n\ttrain = Subset(dataset, train_idcs)\n\tval = Subset(dataset, val_idcs)\n\ttest = Subset(dataset, test_idcs)\n\n\treturn train, val, test", "def groupby_train_test_split(df, selected_features=None, test_ratio=0.2, seed=12345, groupby='user_id'):\n\n ############################################################\n # Train Test Split\n ############################################################\n\n grp = df[groupby]\n n_splits = int(1 / test_ratio)\n groupkfold = GroupKFold(n_splits=n_splits)\n random.seed(seed)\n folds = groupkfold.split(df, groups = grp)\n train_idx, test_idx = next(folds)\n df_train, df_test = df.iloc[train_idx], df.iloc[test_idx]\n \n return df_train, df_test", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def split_train_test_classifier(self, split_method, method):\n \n # split data balance based on user and act (if provided)\n if method == 'window_based':\n data_train, data_val, label_user_train, label_user_val, id_window_train, id_window_val = self.split_train_val_classifier(\n self.classifier['data'], self.classifier['user_label'], self.classifier['act_label'], self.classifier['id'], 'standard', train_size=0.9) \n\n print(f'Train window before delete overlap sequence: {data_train.shape[0]}')\n\n # delete overlap sequence\n if self.overlap != 0:\n if self.overlap == 0.5:\n distance_to_delete = [1]\n elif self.overlap == 0.75:\n distance_to_delete = [1,2,3]\n invalid_idx = delete_overlap(id_window_train, id_window_val, distance_to_delete)\n data_train = np.delete(data_train, invalid_idx, axis=0)\n label_user_train = np.delete(label_user_train, invalid_idx, axis=0)\n\n print(f'Train window after delete overlap sequence: {data_train.shape[0]}')\n print(f'Validation set: {data_val.shape[0]}')\n \n elif method == 'cycle_based':\n data_train, data_val, label_user_train, label_user_val = self.split_train_val_classifier(\n self.classifier['data'], self.classifier['user_label'], self.classifier['act_label'], None, split_method, train_size=0.9) \n\n self.train = data_train\n self.train_user = label_user_train\n self.val = data_val\n self.val_user = label_user_val", "def train_test_split(X: list, y: list, test_frac: float = 0.2, seed: int = 42) -> list:\n return train_test_split(X, y, test_size=test_frac, random_state = seed)", "def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes", "def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ", "def split_data(self):\n np.random.seed(seed=self.seed)\n indices = np.random.permutation(self.predictor_vars.shape[0])\n split_row = round(self.predictor_vars.shape[0] * self.train_split)\n train_idx, test_idx = indices[:split_row], indices[split_row:]\n self.predictor_vars_train, self.predictor_vars_test = (\n self.predictor_vars[train_idx, :],\n self.predictor_vars[test_idx, :],\n )\n self.response_var_train, self.response_var_test = (\n self.response_var[train_idx],\n self.response_var[test_idx],\n )", "def mask_test_train(data, split): \n # create a copy of the full data for reduction\n training_set = data.copy()\n\n # find index of values which are not empty\n nonzero_inds = training_set.nonzero()\n\n # create list of index pairs\n nonzero_pairs = list(zip(nonzero_inds[0], nonzero_inds[1]))\n\n # calculate the number of samples to be removed in training set\n num_samples = int(np.ceil(split*len(nonzero_pairs)))\n\n # get random samples\n samples = random.sample(nonzero_pairs, num_samples)\n\n # remove selected samples in training set\n user_inds = [index[0] for index in samples]\n item_inds = [index[1] for index in samples]\n training_set[user_inds, item_inds] = 0 \n\n return training_set, list(set(user_inds)), np.array(samples)", "def populate_train_test_val_dirs_nonrandomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)] if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names if filename not in val_file_names and filename not in test_file_names]\n\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')", "def split_data(x, y, ratio, seed=1):\n np.random.seed(seed)\n\n N = len(y)\n rat = int(np.floor(ratio*N))\n idx = np.random.choice(np.arange(len(x)), N, replace=False)\n \n x_ = x[idx]\n y_ = y[idx]\n \n train_x = x_[:rat]\n test_x = x_[rat:]\n \n train_y = y_[:rat]\n test_y = y_[rat:]\n \n return train_x, train_y, test_x, test_y", "def train_dev_test_split(sequence, train_frac, dev_frac, random_seed=None):\n if random_seed:\n random.seed(random_seed)\n if train_frac > 1 or train_frac < 0 or dev_frac > 1 or dev_frac < 0:\n raise ValueError(\"train and dev fractions must be between 0 and 1!\")\n if train_frac + dev_frac > 1:\n raise ValueError(\"sum of train and dev fractions can't be greater than 1!\")\n desired_train_num = int(len(sequence) * train_frac)\n desired_dev_num = int((len(sequence) * dev_frac))\n train = random.sample(sequence, desired_train_num)\n devtest = [item for item in sequence if item not in train]\n dev = random.sample(devtest, desired_dev_num)\n test = [item for item in devtest if item not in dev]\n assert len(train) + len(dev) + len(test) == len(sequence)\n return train, dev, test", "def splitTrainValidate(df, perc_training = 0.8):\n train = df.sample(frac=perc_training)#, random_state=200)\n validate = df.drop(train.index)\n return (train, validate)", "def train_test_split_drifters():\n df = process_raw_df()\n ids = np.unique(df.index.get_level_values(level=0))\n rng = np.random.default_rng(seed=1)\n train_ids = np.sort(rng.choice(ids, size=len(ids)//2, replace=False))\n test_ids = np.sort(np.setdiff1d(ids, train_ids))\n train_df = df[df.index.get_level_values(level=0).isin(train_ids)].copy()\n test_df = df[df.index.get_level_values(level=0).isin(test_ids)].copy()\n return train_df, test_df", "def split_test_data():\n outputvis = ROOT_DIR + 'test_imaging/test_split_1eb.ms'\n targ = TARGETS['NGC1333IRAS4A']\n spw = '{0}:236~276'.format(SPWS[targ.name]['NH3_11'].spw_id)\n split(\n vis=get_vis_name(targ),\n outputvis=outputvis,\n field=targ.name,\n spw=spw,\n )", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def split_data(x, y, ratio, seed=1):\n # number of value\n num_points = len(y)\n # compute the index that split the datas\n split = int(np.floor(num_points * ratio))\n\n # set the seed to the given value\n np.random.seed(seed)\n # compute random indexes for training and testing\n rand_indexes = np.random.permutation(num_points)\n index_training = rand_indexes[:split]\n index_testing = rand_indexes[split:]\n\n return x[index_training], y[index_training], x[index_testing], y[index_testing]", "def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])", "def train_test_split(filename: str, split=0.5) -> tuple:\n training_set = []\n test_set = []\n content = load_from_csv(filename)\n for _, value in enumerate(content):\n if random.random() < split:\n training_set.append(value)\n else:\n test_set.append(value)\n return training_set, test_set", "def train_val_test_split(messages, labels, split_frac, random_seed=None):\n # make sure that number of messages and labels allign\n assert len(messages) == len(labels)\n # random shuffle data\n if random_seed:\n np.random.seed(random_seed)\n shuf_idx = np.random.permutation(len(messages))\n messages_shuf = np.array(messages)[shuf_idx] \n labels_shuf = np.array(labels)[shuf_idx]\n\n #make splits\n split_idx = int(len(messages_shuf)*split_frac)\n train_x, val_x = messages_shuf[:split_idx], messages_shuf[split_idx:]\n train_y, val_y = labels_shuf[:split_idx], labels_shuf[split_idx:]\n\n test_idx = int(len(val_x)*0.5)\n val_x, test_x = val_x[:test_idx], val_x[test_idx:]\n val_y, test_y = val_y[:test_idx], val_y[test_idx:]\n\n return train_x, val_x, test_x, train_y, val_y, test_y", "def split_image(image, label):\n # [left, right, top, botton]\n left_margin = label[0]\n right_margin = image_size[0] - (label[0] + breach_size[0])\n top_margin = label[1]\n bottom_margin = image_size[1] - (label[1] + breach_size[1])\n margin = [left_margin, right_margin, top_margin, bottom_margin]\n # Calculate probabilities about the margin for all directions\n probabilities = normalize(margin)\n # Pick directions and boundary\n direction, size = random_pick(margin, probabilities)\n boundary = random.randint(0, size)\n\n image_np = image2matrix(image)\n image_new_np, deviation = random_split_matrix(image_np, boundary, direction)\n image_new = matrix2image(image_new_np)\n label_new = [label[0] + deviation[0], label[1] + deviation[1]]\n\n return image_new, label_new", "def train_test_split(X, y, test_size=0.33, random_state=None, shuffle=True):\n\n copyX = copy.deepcopy(X)\n copyY = copy.deepcopy(y)\n if random_state is not None:\n # TODO: seed your random number generator\n #Seed random number generator\n np.random.seed(random_state)\n \n if shuffle: \n # TODO: shuffle the rows in X and y before splitting\n # be sure to maintain the parallel order of X and y!!\n # note: the unit test for train_test_split() does not test\n # your use of random_state or shuffle, but you should still \n # implement this and check your work yourself\n copyX, copyY = myutils.randomize_in_place(copyX,copyY)\n\n #Define Variables\n X_train = []\n X_test = []\n y_train = []\n y_test = []\n prop_sum = 0.0\n numTest = 0\n proportion = 1.0/float(len(X))\n\n #Determine how many values to put in test set\n while(prop_sum < test_size):\n numTest = numTest + 1\n prop_sum = prop_sum + proportion\n \n #Put values in train/test sets\n for i in range(len(X)):\n if(test_size>=1):\n if(i<=len(X)-1-test_size):\n X_train.append(copyX[i])\n y_train.append(copyY[i])\n else:\n X_test.append(copyX[i])\n y_test.append(copyY[i])\n else:\n if(i<=len(X)-1-numTest):\n X_train.append(copyX[i])\n y_train.append(copyY[i])\n else:\n X_test.append(copyX[i])\n y_test.append(copyY[i])\n\n return X_train, X_test, y_train, y_test", "def getSplits(df, train_size, val_size, test_size, seed=None):\n size = len(df)\n\n # size is considered a percentage if less than 1:\n train_size = int(train_size * size) if train_size < 1 else train_size\n val_size = int(val_size * size) if val_size < 1 else val_size\n test_size = int(test_size * size) if test_size < 1 else test_size\n\n if not seed is None:\n np.random.seed(seed)\n\n train_val_idx = np.random.choice(\n a=range(size),\n size=train_size + val_size,\n replace=False\n )\n train_idx = train_val_idx[:train_size]\n val_idx = train_val_idx[train_size:]\n\n train = df.iloc[train_idx]\n val = df.iloc[val_idx]\n test = df.drop(train.index).drop(val.index) # test is equal to the leftover\n\n assert len(train) + len(val) + len(test) == len(df)\n\n return train, val, test", "def split_to_train_test(split_ratio, input_data):\n\n data = input_data.drop_duplicates()\n data = data.sample(frac = 1)\n data = np.r_[data]\n rows, columns = data.shape\n a = int(rows*split_ratio)\n train_data = data[0: a]\n test_data = data[a: rows+1]\n\n return train_data, test_data", "def split_training_validation(classes, validation_size = 0.2, shuffle = False):\n num_samples=len(classes)\n classes=np.array(classes)\n classes_unique=np.unique(classes)\n num_classes=len(classes_unique)\n indices=np.arange(num_samples)\n #indices_folds=np.zeros([num_samples],dtype=int)\n training_indice = []\n training_label = []\n validation_indice = []\n validation_label = []\n for cl in classes_unique:\n indices_cl=indices[classes==cl]\n num_samples_cl=len(indices_cl)\n\n # split this class into k parts\n if shuffle:\n random.shuffle(indices_cl) # in-place shuffle\n \n # module and residual\n num_samples_each_split=int(num_samples_cl*validation_size)\n res=num_samples_cl - num_samples_each_split\n \n training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]\n training_label = training_label + [cl] * res\n \n validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]\n validation_label = validation_label + [cl]*num_samples_each_split\n\n training_index = np.arange(len(training_label))\n random.shuffle(training_index)\n training_indice = np.array(training_indice)[training_index]\n training_label = np.array(training_label)[training_index]\n \n validation_index = np.arange(len(validation_label))\n random.shuffle(validation_index)\n validation_indice = np.array(validation_indice)[validation_index]\n validation_label = np.array(validation_label)[validation_index] \n \n \n return training_indice, training_label, validation_indice, validation_label", "def split_training_validation(classes, validation_size = 0.2, shuffle = False):\n num_samples=len(classes)\n classes=np.array(classes)\n classes_unique=np.unique(classes)\n num_classes=len(classes_unique)\n indices=np.arange(num_samples)\n #indices_folds=np.zeros([num_samples],dtype=int)\n training_indice = []\n training_label = []\n validation_indice = []\n validation_label = []\n for cl in classes_unique:\n indices_cl=indices[classes==cl]\n num_samples_cl=len(indices_cl)\n\n # split this class into k parts\n if shuffle:\n random.shuffle(indices_cl) # in-place shuffle\n \n # module and residual\n num_samples_each_split=int(num_samples_cl*validation_size)\n res=num_samples_cl - num_samples_each_split\n \n training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]\n training_label = training_label + [cl] * res\n \n validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]\n validation_label = validation_label + [cl]*num_samples_each_split\n\n training_index = np.arange(len(training_label))\n random.shuffle(training_index)\n training_indice = np.array(training_indice)[training_index]\n training_label = np.array(training_label)[training_index]\n \n validation_index = np.arange(len(validation_label))\n random.shuffle(validation_index)\n validation_indice = np.array(validation_indice)[validation_index]\n validation_label = np.array(validation_label)[validation_index] \n \n \n return training_indice, training_label, validation_indice, validation_label", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def make_splits(input_pkl, test_split=0.1, val_split=0.1):\n if (test_split > 1) or (val_split > 1) or (test_split + val_split > 1) or (test_split <= 0) or (val_split <= 0):\n logging.warning('Check the input for make splits, quitting')\n exit()\n\n main_dict = load_pickle(input_pkl)\n data, labels = main_dict['data'], main_dict['labels']\n idx_arr = np.random.choice(len(data), len(data))\n data, labels = data[idx_arr], labels[idx_arr]\n print(len(data[0][-1]))\n # Find the split sizes\n val_split = int(len(data) * val_split)\n test_split = val_split + int(len(data) * test_split)\n\n # Make and save the splits\n save_pickle({'data': data[:val_split], 'labels': labels[:val_split]}, 'data/val.pkl')\n save_pickle({'data': data[val_split:test_split], 'labels': labels[val_split:test_split]}, 'data/test.pkl')\n save_pickle({'data': data[test_split:], 'labels': labels[test_split:]}, 'data/train.pkl')", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def train_test_split_eopatches(patch_array, test_ratio, features_dict, labels_dict):\n # define EOPatches for training and testing\n trainIDs = list(range(len(patch_array)))\n testIDs = trainIDs[0::test_ratio] # take every xth patch for testing\n\n for elem in trainIDs:\n if elem in testIDs:\n trainIDs.remove(elem)\n\n # get number of features\n t, w, h, f = patch_array[0].data[features_dict].shape\n timeframes_count = t\n features_count = f\n f_count = t * f\n\n\n # create training and test dataset\n features_train = np.zeros([0, f_count])\n for eopatch in patch_array[trainIDs]:\n addfeatures_train = np.array([eopatch.data[features_dict]])\n p, t, w, h, f = addfeatures_train.shape\n addfeatures_train = np.moveaxis(addfeatures_train, 1, 3).reshape(p * w * h, t * f)\n features_train = np.concatenate((features_train, addfeatures_train))\n\n features_test = np.zeros([0, f_count])\n for eopatch in patch_array[testIDs]:\n addfeatures_test = np.array([eopatch.data[features_dict]])\n p, t, w, h, f = addfeatures_test.shape\n addfeatures_test = np.moveaxis(addfeatures_test, 1, 3).reshape(p * w * h, t * f)\n features_test = np.concatenate((features_test, addfeatures_test))\n\n labels_train = np.zeros([0, ])\n for eopatch in patch_array[trainIDs]:\n addlabels_train = np.array([eopatch.mask_timeless[labels_dict]])\n p, w, h, f = addlabels_train.shape\n addlabels_train = np.moveaxis(addlabels_train, 1, 2).reshape(p * w * h, 1).squeeze()\n labels_train = np.concatenate((labels_train, addlabels_train))\n\n labels_test = np.zeros([0, ])\n for eopatch in patch_array[testIDs]:\n addlabels_test = np.array([eopatch.mask_timeless[labels_dict]])\n p, w, h, f = addlabels_test.shape\n addlabels_test = np.moveaxis(addlabels_test, 1, 2).reshape(p * w * h, 1).squeeze()\n labels_test = np.concatenate((labels_test, addlabels_test))\n\n return features_train, features_test, labels_train, labels_test, timeframes_count, features_count", "def split_train_test(data: DF, test_ratio: float, random_state: tp.Optional[int] = None):\n if random_state:\n np.random.seed(random_state)\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]", "def split_training_testing(X, Y, gnd, negative=10000, per=0.05):\n df_x = pd.DataFrame(X)\n df_x['y'] = Y\n df_x['gnd'] = gnd\n df_x.sort_values(by=['y'], inplace=True, ascending=False)\n frac_positive = (df_x[df_x['y'] == 1].shape[0])/float(df_x.shape[0])\n split = int(frac_positive * per * df_x.shape[0])\n df_x.reset_index(drop=True, inplace=True)\n fraud = df_x[df_x['y'] == 1]\n # Shuffle inplace\n fraud = fraud.sample(frac=1, random_state=0).reset_index(drop=True)\n test = fraud.iloc[:split]\n train_ = fraud.iloc[split:]\n train = pd.concat([train_, df_x.iloc[fraud.shape[0]:].sample(n = negative, random_state=0)], ignore_index=True)\n # Shuffle inplace\n train = train.sample(frac=1, random_state=0).reset_index(drop=True)\n #train = randomSample(train, negative)\n y_train = train['y'].as_matrix()\n y_train_gnd = train['gnd'].as_matrix()\n train = train.drop(['y'], axis=1)\n train = train.drop(['gnd'], axis=1)\n \n y_test = test['y'].as_matrix()\n y_test_gnd = test['gnd'].as_matrix()\n test = test.drop(['y'], axis=1)\n test = test.drop(['gnd'], axis=1)\n return train.as_matrix(), y_train, y_train_gnd, test.as_matrix(), y_test, y_test_gnd", "def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test", "def split_test_training(data_path, sequence_length):\n\n # logic for loading the CSV, using 'result' (2nd) column as basis for prediction\n with open(data_path) as f:\n record = csv.reader(f, delimiter=\",\")\n next(record, None)\n spat = []\n nb_of_values = 0\n for line in record:\n spat.append(float(line[2]))\n nb_of_values += 1\n\n # break file into chunks based on sequence length\n result = []\n for index in range(len(spat) - sequence_length):\n result.append(spat[index: index + sequence_length])\n result = np.array(result)\n\n # divide set into 20% for test, 80% for training\n row = int(round(0.8 * result.shape[0]))\n train = result[:row, :]\n np.random.shuffle(train)\n X_train = train[:, :-1]\n y_train = train[:, -1]\n X_test = result[row:, :-1]\n y_test = result[row:, -1]\n X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\n X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n\n return [X_train, y_train, X_test, y_test]", "def split_dataset(dataset, eval_proportion, shuffle=False):\n split_sizes = [1. - eval_proportion, eval_proportion]\n split_frames = []\n split_demos = []\n num_demos = dataset.get_num_demos()\n split_num_demos = [int(fraction * num_demos) for fraction in split_sizes]\n split_num_demos[0] += num_demos - sum(split_num_demos)\n num_instances = len(dataset)\n demos = list(range(num_demos))\n if shuffle:\n np.random.shuffle(demos)\n start_idx = 0\n for split_idx in range(len(split_sizes)):\n if split_sizes[split_idx] == 0:\n split_frames.append(None)\n continue\n split_frames.append([])\n split_demos.append(range(start_idx, start_idx + split_num_demos[split_idx]))\n for demo_idx in split_demos[split_idx]:\n demo_slice = dataset.get_demo_frame_idxs(demos[demo_idx])\n split_frames[split_idx].extend(\n list(range(demo_slice.start, demo_slice.stop)))\n start_idx += split_num_demos[split_idx]\n # Check if the split indices are unique\n assert len(set(split_frames[split_idx])) == len(split_frames[split_idx])\n\n if eval_proportion > 0:\n # Check that splits do not intersect\n for split_idx in range(len(split_frames)):\n for split_idx2 in range(split_idx + 1, len(split_frames)):\n assert len(set(split_frames[split_idx]).intersection(split_frames[split_idx2])) == 0\n assert sum([len(s) for s in split_frames]) == num_instances\n\n split_datasets = [Subset(dataset, split) if split is not None else None for split in split_frames]\n return split_datasets", "def data_split(folder=CONFIG.data_folder, val_proportion=CONFIG.val_proportion):\r\n files = os.listdir(folder)\r\n train_files, val_files, test_files = list(), list(), list()\r\n period = int(np.round(1 / val_proportion))\r\n\r\n corruption = list()\r\n\r\n max_H, max_W = 0, 0\r\n\r\n for (i, file) in enumerate(files):\r\n\r\n for char in file[:-4]:\r\n if char not in 'abcdefghijklmnopqrstuvwxyz':\r\n print(file)\r\n\r\n if i % period == 0:\r\n val_files.append(file)\r\n elif i % period == 1:\r\n test_files.append(file)\r\n else:\r\n train_files.append(file)\r\n try:\r\n image = rgb2grey(mpimg.imread(os.path.join(folder, file)))\r\n except ValueError:\r\n corruption.append(file)\r\n print(file)\r\n max_H = np.max([np.shape(image)[0], max_H])\r\n try:\r\n max_W = np.max([np.shape(image)[1], max_W])\r\n except IndexError:\r\n corruption.append(file)\r\n print(file)\r\n max_label_length = np.max([len(file) - 4 for file in files])\r\n\r\n return train_files, val_files, test_files, max_H, max_W, max_label_length + 2", "def train_test_split_custom(df, fraud_ratio=.1):\n fraud = df[df['Class'] == 1]\n clean = df[df['Class'] == 0]\n\n test_size_clean = int(len(fraud) * 1 / fraud_ratio)\n clean_train, clean_test = train_test_split(clean,\n test_size=test_size_clean,\n random_state=RANDOM_STATE)\n\n # Only clean to train on (autoencoder trained on non-abnormal data)\n train = clean_train\n train = train.sample(frac=1, random_state=RANDOM_STATE)\\\n .reset_index(drop=True)\n\n # Test with both clean and fraud\n test = pd.concat([fraud, clean_test])\n test = test.sample(frac=1, random_state=RANDOM_STATE)\\\n .reset_index(drop=True)\n\n return train, test", "def splitData(groupList, trainSize):\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n\r\n groupList[0]['text'] = cleanRealTexts(list(groupList[0]['text']))\r\n\r\n classLabels = np.array([])\r\n for i, group in enumerate(groupList):\r\n classLabels = np.append(classLabels, np.repeat(i, len(group)))\r\n\r\n classData = pd.concat(groupList).reset_index(drop=True)\r\n\r\n splits = list(StratifiedShuffleSplit(n_splits=i,\r\n test_size=1-trainSize,\r\n train_size=trainSize,\r\n random_state=0).split(X=classData, y=classLabels))[0]\r\n trainIdx, testIdx = splits\r\n\r\n trainData = classData.iloc[trainIdx]\r\n testData = classData.iloc[testIdx]\r\n trainLabels = classLabels[trainIdx]\r\n testLabels = classLabels[testIdx]\r\n\r\n return [[trainData, trainLabels], [testData, testLabels]]", "def _sample_patches(imgs, \n labelimgs, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels,\n startidx=0):\n samplelist = []\n \n # number of bands should be constant, therefore the dimensionality can be read from any \n # sub img\n bands = imgs[0].shape[-1]\n\n # calculate remapping for labels when removing `ignore_labels`\n # flatten labelimgs and convert to numpy array to use np.unique function on it\n flattened_labelimgs = np.concatenate([labelimg.reshape(-1) for labelimg in labelimgs])\n max_label = np.unique(flattened_labelimgs).max()\n remaining_labels = np.setdiff1d(np.arange(max_label+1), ignore_labels)\n label_remap = np.full((max_label+1), -1)\n for i, val in enumerate(remaining_labels):\n label_remap[val] = i\n\n valid_sample_count = 0\n for labelimg in labelimgs:\n valid_sample_count += np.invert(np.isin(labelimg, ignore_labels)).sum()\n print(f'Extracting {valid_sample_count} valid samples...')\n \n if ('data' in patchgroup) and ('labels' in patchgroup):\n # resize existing dataset to append patches from test set\n patchgroup['data'].resize((patchgroup['data'].shape[0] + valid_sample_count), axis=0)\n patchgroup['labels'].resize((patchgroup['labels'].shape[0] + valid_sample_count), axis=0)\n else:\n patchgroup.create_dataset('data', (valid_sample_count, patch_size, patch_size, bands)\n , chunks=(1, patch_size, patch_size, bands)\n , maxshape=(None, patch_size, patch_size, bands)\n , dtype=imgs[0].dtype) # datatype should be the same for all imgs\n patchgroup.create_dataset('labels', (valid_sample_count,1)\n , chunks=True, maxshape=(None, 1)\n , dtype=labelimgs[0].dtype) # datatype should be the same for all labelimgs\n \n idx = startidx\n with tqdm(total=valid_sample_count) as pbar:\n for img, labelimg in zip(imgs, labelimgs):\n\n # pad along spatial axes\n margin = int((patch_size - 1) / 2)\n X = np.pad(img, ((margin, margin), (margin, margin), (0,0)), \n mode=padding_mode, constant_values=padding_values) \n\n # split patches\n for r in range(margin, X.shape[0] - margin):\n for c in range(margin, X.shape[1] - margin):\n patchlabel = labelimg[r-margin, c-margin]\n\n # do not create a sample for 'ignore_labels'\n if patchlabel in ignore_labels:\n continue\n else :\n # correct label\n patchlabel = label_remap[patchlabel]\n\n patch = X[r - margin:r + margin + 1, c - margin:c + margin + 1]\n # store sample in hdf file\n patchgroup['data'][idx] = patch\n patchgroup['labels'][idx] = patchlabel\n\n # update\n idx += 1\n pbar.update(1)\n\n patchgroup.attrs['patch_size'] = patch_size\n patchgroup.attrs['padding_mode'] = padding_mode\n patchgroup.attrs['padding_values'] = padding_values\n patchgroup.attrs['ignore_labels'] = ignore_labels\n\n return valid_sample_count", "def split_data_crossvalid(data):\n X_trainfolder = []\n X_testfolder = []\n y_trainfolder = []\n y_testfolder = []\n data = data[data[:, 0].argsort()]\n number_one = np.count_nonzero(data[:, :1])\n data_one = data[np.where(data[:, 0] == 1)]\n data_zero = data[np.where(data[:, 0] == 0)]\n one_ratio = round(number_one / len(data), 1)\n one_zero_ratio = 1 - one_ratio\n batch_one = int(70 * one_ratio)\n batch_zero = int(70 * one_zero_ratio)\n batchs = len(data) // 70\n for i in range(batchs):\n test_one = data_one[i * batch_one:(i + 1) * batch_one, :]\n train_one = np.delete(data_one, test_one, axis = 0)\n test_zero = data_zero[i * batch_zero:(i + 1) * batch_zero, :]\n train_zero = np.delete(data_zero, test_zero, axis = 0)\n train_sets = np.concatenate((train_one, train_zero), axis=0)\n test_sets = np.concatenate((test_one, test_zero), axis=0)\n np.random.shuffle(train_sets)\n np.random.shuffle(test_sets)\n X_trainfolder.append(train_sets[:, 1:])\n y_trainfolder.append(train_sets[:, 0])\n X_testfolder.append(test_sets[:, 1:])\n y_testfolder.append(test_sets[:, 0])\n return X_trainfolder, y_trainfolder, X_testfolder, y_testfolder", "def train_test_split(\n self,\n default_train_prop: float,\n *,\n train_props: dict[str | int, dict[str | int, float]] | None = None,\n seed: int | None = None,\n ) -> NICOTrainTestSplit:\n # Initialise the random-number generator\n rng = np.random.default_rng(seed)\n # List to store the indices of the samples apportioned to the train set\n # - those for the test set will be computed by complement\n train_inds: list[int] = []\n # Track which indices have been sampled for either split\n unvisited = np.ones(len(self), dtype=np.bool_)\n\n def _sample_train_inds(\n _mask: np.ndarray,\n *,\n _context: str | int | None = None,\n _concept: str | None = None,\n _train_prop: float = default_train_prop,\n ) -> list[int]:\n if _context is not None and _concept is None:\n raise ValueError(\"Concept must be specified if context is.\")\n if _context is not None:\n # Allow the context to be speicifed either by its name or its label-encoding\n _context = (\n self.context_label_decoder(_context) if isinstance(_context, int) else _context\n )\n if _context not in self.class_tree[_concept]:\n raise ValueError(\n f\"'{_context}' is not a valid context for concept '{_concept}'.\"\n )\n # Condition the mask on the context\n _mask = _mask & (self.metadata[\"context\"] == _context).to_numpy()\n # Compute the overall size of the concept/context subset\n _subset_size = np.count_nonzero(_mask)\n # Compute the size of the train split\n _train_subset_size = round(_train_prop * _subset_size)\n # Sample the train indices (without replacement)\n _train_inds = rng.choice(\n np.nonzero(_mask)[0], size=_train_subset_size, replace=False\n ).tolist()\n # Mark the sampled indices as 'visited'\n unvisited[_mask] = False\n\n return _train_inds\n\n if train_props is not None:\n for concept, value in train_props.items():\n # Allow the concept to be speicifed either by its name or its label-encoding\n concept = (\n self.concept_label_decoder[concept] if isinstance(concept, int) else concept\n )\n if concept not in self.class_tree.keys():\n raise ValueError(f\"'{concept}' is not a valid concept.\")\n concept_mask = (self.metadata[\"concept\"] == concept).to_numpy()\n # Specifying proportions at the context/concept level, rather than concept-wide\n if isinstance(value, dict):\n for context, train_prop in value.items():\n train_inds.extend(\n _sample_train_inds(\n _mask=concept_mask,\n _concept=concept,\n _context=context,\n _train_prop=train_prop,\n )\n )\n # Split at the class level (without conditioning on contexts)\n else:\n train_inds.extend(\n _sample_train_inds(_mask=concept_mask, _context=None, _train_prop=value)\n )\n # Apportion any remaining samples to the training set using default_train_prop\n train_inds.extend(_sample_train_inds(_mask=unvisited, _train_prop=default_train_prop))\n # Compute the test indices by complement of the train indices\n train_data = self.make_subset(indices=train_inds)\n test_inds = list(set(range(len(self))) - set(train_inds))\n test_data = self.make_subset(indices=test_inds)\n\n return NICOTrainTestSplit(train=train_data, test=test_data)", "def split_train_test_genes(length_wise_binned_genes, train_proportion=0.7):\n np.random.seed(42)\n training_genes = []\n testing_genes = []\n for bin_number, bin_genes in list(length_wise_binned_genes.items()):\n n_genes = len(bin_genes)\n np.random.shuffle(bin_genes)\n training_genes += bin_genes[:int(n_genes * train_proportion)]\n testing_genes += bin_genes[int(n_genes * train_proportion):]\n\n return training_genes, testing_genes", "def partition_train_valid_test2(data, classes, others, ratio=(1,1,1), rng=np.random.RandomState(1000)):\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n train_set_x=data[train_ind]\n train_set_y=classes[train_ind]\n if others is not None:\n train_set_others=others[train_ind]\n else:\n train_set_others=None\n valid_set_x=data[valid_ind]\n valid_set_y=classes[valid_ind]\n if others is not None:\n valid_set_others=others[valid_ind]\n else:\n valid_set_others=None\n test_set_x=data[test_ind]\n test_set_y=classes[test_ind]\n if others is not None:\n test_set_others=others[test_ind]\n else:\n test_set_others=None\n \n return train_set_x,train_set_y,train_set_others,valid_set_x,valid_set_y,valid_set_others,test_set_x,test_set_y,test_set_others", "def splitData(filename, testing_set_percentage):\n matFile = sio.loadmat(filename)\n data = matFile['mydata']\n\n np.savetxt('test.out', data, fmt='%d', delimiter=',') # X is an array\n print('BEFORE SHUFFLE', data)\n\n np.random.shuffle(data)\n\n np.savetxt('test_after.out', data, fmt='%d', delimiter=',') # X is an array\n\n print('AFTER SHUFFLE', data)\n\n r, c = np.array(data).shape\n\n testing_set_size = int(r * testing_set_percentage)\n training_set_size = r - testing_set_size\n\n train_data = data[:training_set_size]\n test_data = data[training_set_size:]\n\n return train_data, test_data", "def make_testing_training(data, percent_training, random_split=False, seed=None):\n ## Making testing and training sets\n data['computed Case Date/Time Closed'] = pd.to_datetime(data['Case Date/Time Closed'])\n ordered_data = data.sort(columns=['computed Case Date/Time Closed'])\n np.random.seed(seed=seed) \n nrows, ncols = ordered_data.shape\n\n if random_split:\n training_indices = np.random.choice(ordered_data.index, size=int(nrows*percent_training), replace=False)\n training = ordered_data.ix[training_indices]\n testing = ordered_data[~data['case_id'].isin(training['case_id'])]\n else: # split by date\n training_stop_index = int(percent_training * nrows)\n training = ordered_data[:training_stop_index]\n testing = ordered_data[training_stop_index:]\n\n return training, testing", "def populate_train_test_val_dirs_randomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = root_dir # The folder to copy images from\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n np.random.shuffle(all_file_names)\n\n train_file_names, val_file_names, test_file_names = np.split(np.array(all_file_names),\n [int(len(all_file_names) * (\n 1 - val_ratio + test_ratio)),\n int(len(all_file_names) * (1 - test_ratio))])\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')", "def split_data(df, train_prop):\n # Create random Tensors to hold inputs and outputs, and wrap them in Variables\n train_df = df.sample(frac=train_prop)\n test_df = df.loc[~df.index.isin(train_df.index)]\n return train_df, test_df", "def train_val_split(self):\n idx = np.arange(self.num_data)\n np.random.shuffle(idx)\n val_num = int(self.ratio * self.num_data)\n dev_num = int(self.dev_ratio * self.num_data)\n self.num_train = self.num_data - val_num\n\n self.val_data = self.data[idx[:val_num]]\n self.val_label = self.label[idx[:val_num]]\n \n self.train_data = self.data[idx[val_num:]]\n self.train_label = self.label[idx[val_num:]]\n\n self.dev_data = self.data[idx[:dev_num]]\n self.dev_label = self.label[idx[:dev_num]]", "def _split_by_patients(self, patients, val_split=0.2, test_split=0.1, random_state=42):\n train, test = train_test_split(patients, test_size=test_split, random_state=random_state)\n train, val = train_test_split(train, test_size=val_split, random_state=random_state)\n\n return train, val, test", "def train_test_split(X, y, test_portion):\n joint_list = list(zip(X, y))\n random.shuffle(joint_list)\n\n shuffled_X, shuffled_y = zip(*joint_list)\n pivot = int(len(X) * test_portion)\n\n return (\n shuffled_X[pivot:],\n shuffled_X[:pivot],\n shuffled_y[pivot:],\n shuffled_y[:pivot],\n )", "def populate_train_test_val_dirs_randomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n # Creating partitions of the data after shuffling\n # Folder to copy images from\n src = root_dir # The folder to copy images from\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n np.random.shuffle(all_file_names)\n\n train_file_names, val_file_names, test_file_names = np.split(np.array(all_file_names),\n [int(len(all_file_names) * (\n 1 - val_ratio + test_ratio)),\n int(len(all_file_names) * (1 - test_ratio))])\n # Print the file distribution amongst the folders\n logger.print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names),\n len(test_file_names))\n\n print(train_file_names)\n\n # Copy-Pasting Images\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/train/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/train/ClearImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/val/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/val/ClearImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/test/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/test/ClearImages')", "def train_val_test_split(df):\n from sklearn.model_selction import train_test_split\n train, test = train_test_split(df, train_size = 0.80, test_size=0.20,\n random_state = 42)\n train, val = train_test_split(train, train_size = 0.70, val_size=0.30)\n print(train.shape, val.shape, test.shape)\n\n return train, val, test", "def get_random(everything_path, split_ratio=0.5, seed=0):\n # Loading data\n with h5py.File(everything_path, 'r') as data:\n labels = data['labels'].value\n \n # Splitting classes\n fri_i = np.where(labels==1)[0]\n frii_i = np.where(labels==2)[0]\n rand_i = np.where(labels==0)[0]\n\n # Shuffling\n fri_i = shuffle(fri_i, random_state=seed)\n frii_i = shuffle(frii_i, random_state=seed)\n rand_i = shuffle(rand_i, random_state=seed)\n\n # Splitting into training and testing sets\n cut = int(np.round(split_ratio * fri_i.shape[0]))\n train_fri = fri_i[cut:]\n test_fri = fri_i[:cut]\n\n cut = int(np.round(split_ratio * frii_i.shape[0]))\n train_frii = frii_i[cut:]\n test_frii = frii_i[:cut]\n\n cut = int(np.round(split_ratio * rand_i.shape[0]))\n train_rand = rand_i[cut:]\n test_rand = rand_i[:cut]\n\n train_i = np.concatenate((train_fri, train_frii, train_rand), axis=0)\n test_i = np.concatenate((test_fri, test_frii, test_rand), axis=0)\n\n return train_i, test_i", "def split_train_val(self,ratio=.1):\n lim = int(np.ceil(len(self.train) * ratio))\n order = list(range(len(self.train)))\n np.random.shuffle(order)\n self.train_train = self.train.ix[order[lim:]]\n self.train_val = self.train.ix[order[:lim]]\n log(\"Split data into training/val: {} -> {} {}\".format(\n len(self.train),len(self.train_train),lim))", "def train_test_split(X, y, test_size=0.33, random_state=None, shuffle=True):\r\n if random_state is not None:\r\n random.seed(random_state)\r\n \r\n if shuffle: \r\n myutils.randomize_in_place(X, parallel_list=y)\r\n\r\n num_instances = len(X)\r\n if isinstance(test_size, float):\r\n test_size = math.ceil(num_instances * test_size)\r\n split_index = num_instances - test_size\r\n \r\n return X[:split_index], X[split_index:], y[:split_index], y[split_index:]", "def split_dataset(frames, frames_json, patch_dir, test_size = 0.2, val_size = 0.2):\n if os.path.isfile(frames_json):\n print(\"Reading train-test split from file\")\n with open(frames_json, 'r') as file:\n fjson = json.load(file)\n training_frames = fjson['training_frames']\n testing_frames = fjson['testing_frames']\n validation_frames = fjson['validation_frames']\n else:\n print(\"Creating and writing train-test split from file\")\n frames_list = list(range(len(frames)))\n # Divide into training and test set\n training_frames, testing_frames = train_test_split(frames_list, test_size=test_size)\n\n # Further divide into training set into training and validataion set\n training_frames, validation_frames = train_test_split(training_frames, test_size=val_size)\n frame_split = {\n 'training_frames': training_frames,\n 'testing_frames': testing_frames,\n 'validation_frames': validation_frames\n }\n if not os.path.exists(patch_dir):\n os.makedirs(patch_dir)\n with open(frames_json, 'w') as f:\n json.dump(frame_split, f)\n\n print('training_frames', training_frames)\n print('validation_frames', validation_frames)\n print('testing_frames', testing_frames)\n return (training_frames,validation_frames, testing_frames )", "def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)", "def split_patch_train_val(data_dir, output_dir, label_file, stride, patch_size,\n slice_steps=1, per_val=0.2, log_config=None):\n\n if log_config is not None:\n logging.config.fileConfig(log_config)\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Splitting data into patches .... \")\n logger.info(f\"Reading data from {data_dir}\")\n\n logger.info(f\"Loading {label_file}\")\n labels = np.load(label_file)\n logger.debug(f\"Data shape [iline|xline|depth] {labels.shape}\")\n\n iline, xline, depth = labels.shape\n # Inline sections\n train_iline_range, val_iline_range = _get_aline_range(iline,\n per_val,\n slice_steps)\n\n # Xline sections\n train_xline_range, val_xline_range = _get_aline_range(xline,\n per_val,\n slice_steps)\n\n # Generate patches from sections\n # Vertical locations is common to all patches processed\n vert_locations = range(0, depth - patch_size, patch_size)\n logger.debug(vert_locations)\n\n # Process inlines\n def _i_extract_patches(iline_range, horz_locations, vert_locations):\n for i in iline_range:\n locations = ([j, k] for j in horz_locations\n for k in vert_locations)\n for j, k in locations:\n yield \"i_\" + str(i) + \"_\" + str(j) + \"_\" + str(k)\n\n # Process inlines - train\n logger.debug(\"Generating Inline patches\")\n logger.debug(\"Generating Inline patches - Train\")\n # iline = xline x depth\n val_iline = math.floor(xline * per_val / 2)\n logger.debug(val_iline)\n\n # Process ilines - train\n horz_locations_train = range(val_iline, xline - val_iline, max(1,patch_size))\n logger.debug(horz_locations_train)\n train_i_list = list(_i_extract_patches(train_iline_range,\n horz_locations_train,\n vert_locations))\n\n # val_iline - define size of the validation set for the fist part\n val_iline_range = list(val_iline_range)\n\n # Process inlines - validation\n horz_locations_val = itertools.chain(range(0, val_iline, max(1,patch_size)),\n range(xline - val_iline, xline, max(1,patch_size)))\n val_iline_range = list(val_iline_range)\n val_i_list = list(_i_extract_patches(val_iline_range,\n horz_locations_val,\n vert_locations))\n\n logger.debug(train_iline_range)\n logger.debug(val_iline_range)\n\n # Process crosslines\n def _x_extract_patches(xline_range, horz_locations, vert_locations):\n for j in xline_range:\n locations = ([i, k] for i in horz_locations\n for k in vert_locations)\n for i, k in locations:\n yield \"x_\" + str(i) + \"_\" + str(j) + \"_\" + str(k)\n\n logger.debug(\"Generating Crossline patches\")\n logger.debug(\"Generating Crossline patches - Train\")\n # xline = iline x depth\n val_xline = math.floor(iline * per_val / 2)\n logger.debug(val_xline)\n\n # Process xlines - train\n horz_locations_train = range(val_xline, iline - val_xline, max(1,patch_size))\n logger.debug(horz_locations_train)\n train_x_list = list(_x_extract_patches(train_xline_range,\n horz_locations_train,\n vert_locations))\n\n # val_xline - define size of the validation set for the fist part\n val_xline_range = list(val_xline_range)\n\n # Process xlines - validation\n horz_locations_val = itertools.chain(range(0, val_xline, max(1,patch_size)),\n range(iline - val_xline, iline, max(1,patch_size)))\n val_xline_range = list(val_xline_range)\n val_x_list = list(_x_extract_patches(val_xline_range,\n horz_locations_val,\n vert_locations))\n\n logger.debug(train_xline_range)\n logger.debug(val_xline_range)\n\n train_list = train_x_list + train_i_list\n val_list = val_x_list + val_i_list\n\n logger.debug(train_list)\n logger.debug(val_list)\n\n\n # write to files to disk:\n # NOTE: This isn't quite right we should calculate the patches\n # again for the whole volume\n logger.info(f\"Writing {output_dir}\")\n _write_split_files(output_dir, train_list, val_list, \"patch\")", "def train_test_split(X_df, y_df, frac):\n \n X = np.array(X_df)\n y = np.array(y_df)\n skf = StratifiedShuffleSplit(n_splits=2, test_size=(1-frac))\n for train_index, test_index in skf.split(X, y):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n return X_train, X_test, y_train, y_test", "def split_dataset(dataset, Ntotal, val_frac,\n batch_size, num_workers,\n random_seed=0, shuffle=True, balance=False):\n \n Nval = math.floor(Ntotal*val_frac)\n train_ds, val_ds = ch.utils.data.random_split(dataset, \n [Ntotal - Nval, Nval], \n generator=ch.Generator().manual_seed(random_seed))\n if balance: \n val_ds = balance_dataset(val_ds)\n split_datasets = [train_ds, val_ds]\n \n split_loaders = []\n for ds in split_datasets:\n split_loaders.append(ch.utils.data.DataLoader(ds, \n num_workers=num_workers, \n batch_size=batch_size, \n shuffle=shuffle))\n return split_datasets, split_loaders", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def partition_train_valid_test(data, classes, ratio=(1,1,1), rng=np.random.RandomState(1000)):\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n train_set_x=data[train_ind]\n train_set_y=classes[train_ind]\n valid_set_x=data[valid_ind]\n valid_set_y=classes[valid_ind]\n test_set_x=data[test_ind]\n test_set_y=classes[test_ind]\n return train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y", "def split_train_test(data, test_ratio):\n shuffled_index = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n\n train_indices = shuffled_index[test_set_size:]\n test_indices = shuffled_index[:test_set_size]\n\n return data.iloc[train_indices], data.iloc[test_indices]", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # source: https://stackoverflow.com/a/3677283\n indices = np.random.permutation(x.shape[0])\n rate = int(np.floor(indices.shape[0] * ratio))\n training_idx, test_idx = indices[:rate], indices[rate:]\n training = (x[training_idx], y[training_idx])\n test = (x[test_idx], y[test_idx])\n return training, test", "def train_test_split(y, tx, ratio, seed=1):\n np.random.seed(seed)\n permutation = np.random.permutation(len(y))\n shuffled_tx = tx[permutation]\n shuffled_y = y[permutation]\n split_position = int(len(y) * ratio)\n tx_training, tx_test = shuffled_tx[: split_position], shuffled_tx[split_position:]\n y_training, y_test = shuffled_y[: split_position], shuffled_y[split_position:]\n return y_training, tx_training, y_test, tx_test", "def train_val_test_split(data):\n raise NotImplementedError", "def validation_split(D_exp, val_fraction):\n n = D_exp['x'].shape[0]\n\n if val_fraction > 0:\n n_valid = int(val_fraction*n)\n n_train = n-n_valid\n I = np.random.permutation(range(0,n))\n I_train = I[:n_train]\n I_valid = I[n_train:]\n else:\n I_train = range(n)\n I_valid = []\n\n return I_train, I_valid", "def validation_split(D_exp, val_fraction):\n n = D_exp['x'].shape[0]\n\n if val_fraction > 0:\n n_valid = int(val_fraction * n)\n n_train = n - n_valid\n I = np.random.permutation(range(0, n))\n I_train = I[:n_train]\n I_valid = I[n_train:]\n else:\n I_train = range(n)\n I_valid = []\n\n return I_train, I_valid" ]
[ "0.6673594", "0.6663889", "0.6621361", "0.65526426", "0.6512163", "0.6374599", "0.63245976", "0.6266016", "0.62305325", "0.62242997", "0.6173855", "0.61674833", "0.61449915", "0.612918", "0.610995", "0.6105582", "0.6080789", "0.6075082", "0.6059651", "0.6052234", "0.6046871", "0.6004282", "0.59943306", "0.5974477", "0.59685", "0.59511316", "0.5950437", "0.5943239", "0.5943087", "0.5938203", "0.5934355", "0.592911", "0.5928251", "0.5916622", "0.5903908", "0.5887399", "0.58778197", "0.5873128", "0.5869712", "0.5850146", "0.5843678", "0.58417696", "0.58359635", "0.5811954", "0.5807129", "0.5806719", "0.58029705", "0.57982713", "0.5788608", "0.57824725", "0.5771795", "0.5771784", "0.5766875", "0.57635975", "0.57606757", "0.57595557", "0.5756374", "0.5756374", "0.57545984", "0.57430404", "0.57384145", "0.57375497", "0.5732483", "0.5725433", "0.57208866", "0.5709349", "0.5705076", "0.5704674", "0.57035214", "0.56958866", "0.5693875", "0.56882685", "0.5681423", "0.5677424", "0.5676958", "0.56763405", "0.5674828", "0.56695366", "0.566757", "0.56663865", "0.56577486", "0.565152", "0.56456476", "0.563895", "0.5638799", "0.5634619", "0.5631268", "0.56285465", "0.5627288", "0.5615079", "0.5614688", "0.5612523", "0.5610941", "0.56101143", "0.56097025", "0.56073606", "0.56023836", "0.56019074", "0.56005704", "0.56004924", "0.5593267" ]
0.0
-1
Label all columns in [start, start+n) with label.
def _select(start, n, label) -> int: n_selected = 0 for i in range(start, int(start + n)): x = self._x_positions[i] n_selected += self._cols[x].mark_as(label) return n_selected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_excel_column_labels(start: str, end: str):\n if not start or not end:\n raise ValueError(\"{0} missing\".format(\"start\" if start is None else \"end\"))\n\n end = end.upper()\n start = start.upper()\n\n _check_start_end_acceptable(start, end)\n\n range_builder = [start]\n\n start_list = _convert_str_to_base_26(start)\n end_list = _convert_str_to_base_26(end)\n\n # we always want to add start_list value, we're fairly sure that end >= start\n while start_list != end_list:\n start_list = _base_26_number_plus_1(start_list)\n # convert this funny base 26 list to a string\n range_builder.append(\"\".join([chr(x + 64) for x in start_list]))\n return range_builder", "def label_columns(df,feats,prefix):\n feats_new=[prefix+x for x in feats]\n df=df.rename(columns=dict(zip(feats,feats_new)))\n return df", "def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames", "def create_labels_base(df, col_name, window_size=11):\n\n #self.log(\"creating label with original paper strategy\")\n row_counter = 0\n total_rows = len(df)\n labels = np.zeros(total_rows)\n labels[:] = np.nan\n print(\"Calculating labels\")\n\n while row_counter < total_rows:\n if row_counter >= window_size - 1:\n window_begin = row_counter - (window_size - 1)\n window_end = row_counter\n window_middle = int((window_begin + window_end) / 2)\n\n min_ = np.inf\n min_index = -1\n max_ = -np.inf\n max_index = -1\n for i in range(window_begin, window_end + 1):\n price = df.iloc[i][df.columns.get_loc(col_name)]\n if price < min_:\n min_ = price\n min_index = i\n if price > max_:\n max_ = price\n max_index = i\n\n if max_index == window_middle:\n labels[window_middle] = 0\n elif min_index == window_middle:\n labels[window_middle] = 1\n else:\n labels[window_middle] = 2\n\n row_counter = row_counter + 1\n\n return labels", "def _set_labels(loop_data):\n \n # Ensure the index is linear, but set it up so the original\n # index can be replaced\n old_columns = loop_data.columns\n loop_data = loop_data.reset_index(drop=False)\n index_column = list(set(loop_data.columns) - set(old_columns))\n \n # The non-zero values of the loops and their new index\n tag_values = loop_data.ix[loop_data.loop !=0,u'loop'].tolist()\n tag_index = loop_data.ix[loop_data.loop !=0].index + 1\n\n loop_data[u'tag'] = 0\n loop_data.ix[tag_index, u'tag'] = tag_values\n \n # Reset the index\n loop_data.set_index(index_column, drop=True)\n if u'index' in loop_data.columns:\n loop_data = loop_data.drop([u'index'], axis = 1)\n \n return loop_data", "def labels(self, start, end, numlabels=None, char_width=None):\n ticks = self.ticks(start, end, numlabels)\n labels = self.formatter.format(ticks, numlabels, char_width)\n return zip(ticks, labels)", "def labeling_func(df_clus):\n\n df_all_labeled = df_all_columns.copy()\n df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()\n df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)\n for i in range(0, clus_params['n_components']):\n df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()\n\n return df_all_labeled", "def range_to_label(arange):\r\n # pass\r\n C = arange.size - 1\r\n label = np.ones((arange[-1], ), dtype=np.int)\r\n for i in xrange(1, C):\r\n label[arange[i]: arange[i+1]] *= (i+1)\r\n return label", "def label_columns(mapping):\n columns = []\n for name, column in mapping.items():\n columns.append(column.label(name))\n return columns", "def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['labels']):\n final_column += 1\n header = sheet.cell(row + header_row, final_column).value\n return [sheet.cell(row + header_row, i).value for i in\n range(col, final_column)]", "def populateWithComplexLabels(self):\n # setting righe e colonne\n self.table.setRowCount(0)\n self.table.setColumnCount(0)\n rows = self.table.rowCount()\n cols = self.table.columnCount()\n if rows != 6:\n self.table.setRowCount(6)\n if cols != 7:\n self.table.setColumnCount(7)\n for row in range(6):\n for col in range(7):\n itemWidget = ComplexLabel(self.table)\n self.table.setCellWidget(row, col, itemWidget)\n self.setComplexLabels(self.indexMonth)\n self.formatHeaderNames()", "def _starts(self, column_labels):\n val = [self[c][0] for c in column_labels]\n starts = [0]\n values = [val]\n for i in range(1,self.num_rows):\n ival = [self[c][i] for c in column_labels ]\n if ival != val:\n starts.append(i)\n values.append(ival)\n val = ival\n return values, starts", "def auto_labels(self,top=True,bottom=True,top_label='',bottom_label='',\\\n col_index=0,row_index=0):\n param=self.x_param\n\n top_label=[top_label+\" \"+ x for x in param.labels]\n\n bottom_label=[bottom_label+\"{:02d} x {:02d}\".format(col_index,y) for y in range(row_index,row_index+len(param))]\n\n if top==True :\n\n self.labels_top=top_label\n\n else:\n\n self.labels_top=None\n\n if bottom==True :\n\n self.labels_bottom=bottom_label\n\n else:\n\n self.labels_bottom=None", "def repair_labels(labels):\n ret = np.copy(labels)\n ret[:, 0] = 10 # overwrite length to be stop seq\n ret = np.roll(ret, -1, axis=1) # move first to last\n return ret", "def setMyLabels(labels, axes='XYZ', step=1):\n resetParameter('mylab')\n for index in range(0, len(labels), step):\n dislin.mylab(labels[index], index/step+1, axes)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, v):\n return self._set(labelColumn=v)", "def _get_padded_column_labels(self):\n rmax = max(len(s) for s in self.row_labels)\n cmax = max(len(s) for s in self.column_labels)\n column_labels = [''] * rmax + self.column_labels\n return [Monospace.left_justify(x, cmax, '.') for x in column_labels]", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return 'Column %d' % (self.index + 1)", "def fitCols(self, col_start, col_end, sheet):\r\n col_n = col_start\r\n while col_n <= col_end:\r\n self.fitCol(col_n, sheet)\r\n col_n = col_n + 1", "def assign_labels(dataset: pd.DataFrame, hypershapes: Dict[int, Dict], n_classes: int) -> pd.DataFrame:\n\n labels = np.zeros(shape=(len(dataset), n_classes))\n labels = pd.DataFrame(labels, columns=[\"l{}\".format(i) for i in range(n_classes)])\n for point, label in zip(dataset.values, labels.values):\n for cla in hypershapes:\n for shape in hypershapes[cla].values():\n if shape[\"shape\"] == \"cubes\":\n if is_point_inside_hypercube(point, shape[\"center\"], shape[\"radius\"]):\n label[int(cla)] = 1\n elif shape[\"shape\"] == \"spheres\":\n if is_point_inside_hypersphere(point, shape[\"center\"], shape[\"radius\"]):\n label[int(cla)] = 1\n elif shape[\"shape\"] == \"moons\":\n if is_point_inside_hypermoon(point, (shape[\"center_big\"], shape[\"center_small\"]),\n (shape[\"radius_big\"], shape[\"radius_small\"])):\n label[int(cla)] = 1\n\n return labels", "def assign_random_labels(dataset: pd.DataFrame, n_classes: int) -> pd.DataFrame:\n labels = np.zeros(shape=(len(dataset), n_classes))\n labels = pd.DataFrame(labels, columns=[\"l{}\".format(i) for i in range(n_classes)])\n for label in labels.values:\n for cla in range(n_classes):\n label[cla] = random.randint(0, 1)\n\n return labels", "def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])", "def _get_labels(self, ind):\n pass", "def add_labels(self, labels):\n for i, axis in enumerate(self.bottom):\n self.grid[axis].set_xlabel(labels[i])\n\n for i, axis in enumerate(np.array(self.left)[-1::-1]):\n if axis == self.upperleft:\n continue\n\n self.grid[axis].set_ylabel(labels[i]) \n\n pl.draw()", "def digit_indices_to_labels(digits_run1, digits_run2):\n labels_run1, labels_run2 = np.zeros(shape=256), np.zeros(shape=256)\n for finger_i in range(1, 6):\n labels_run1[digits_run1[finger_i - 1]] = finger_i\n labels_run2[digits_run2[finger_i - 1]] = finger_i\n return labels_run1, labels_run2", "def label(filenames, train_path='../data/train_molecules_30.mat'):\n unlabeled = [scipy.io.loadmat(fname) for fname in filenames]\n unlabeled_X = np.vstack([data['X'] for data in unlabeled])\n X, Y = load_data(train_path, shape=(-1, 30, 30, 30))\n\n num_unlabeled = unlabeled_X.shape[0]\n unlabeled_Y = np.zeros(num_unlabeled) - 1\n unlabeled_Y = unlabeled_Y.reshape((-1, 1))\n Y = Y.reshape((-1, 1))\n Y_all = np.vstack((Y, unlabeled_Y))\n\n X_all = np.vstack((X, unlabeled_X))\n X_all = X_all.reshape((-1, 27000))\n\n label_prop_model = LabelSpreading()\n label_prop_model.fit(X_all, Y_all)\n Y_all = label_prop_model.transduction_\n unlabeled_Y = Y_all[num_unlabeled:]\n return (unlabeled_X, unlabeled_Y), (X_all, Y_all)", "def make_labels(self, ilines):\n\n llist = []\n for lind, lstr in enumerate(ilines):\n # get label and value list\n rv, label, vals = self.get_label_vals(lstr)\n if rv < 1: continue\n\n nvals = len(vals)\n\n # label = self.find_parent_label(label)\n\n if self.verb > 2: print('++ label: %s, %d val(s)' % (label, nvals))\n\n llist.append(label)\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0\n\n if not UTIL.vals_are_unique(llist):\n print('** warning: labels are not unique, will use only last values')\n llist = UTIL.get_unique_sublist(llist)\n\n return 0, llist", "def createlabel(q, n):\n # When using dec2base function make sure to pad the string with the right number of zeros e.g for base 3 dec2base\n # gives 1 rather than 01 if we were dealing with 2 qubits.\n # The number of kraus matrices or labels is n^q\n\n label = []\n for i in range(pow(n, q)):\n label.append(dec2base(i, n))\n\n # Next we make sure that each element in the label list has length the number of qubits if not add a zero\n for x in range(len(label)):\n if len(label[x]) < q:\n label[x] = label[x].zfill(q)\n else:\n break\n return label", "def labeledfeatures(eqdata, featurefunc, labelfunc):\n _size = len(eqdata.index)\n _labels, _skipatend = labelfunc(eqdata)\n _features, _skipatstart = featurefunc(eqdata.iloc[:(_size - _skipatend), :])\n return _features, _labels.iloc[_skipatstart:, :]", "def label_extraction(self) -> None:\n self.df[\"label\"] = self.df[\"y\"]", "def _set_columns(self, start, end):\n if start <= end <= self.width:\n self._write(ST7789_CASET, _encode_pos(\n start+self.xstart, end + self.xstart))", "def relabel_partial(df):\n df = df.reset_index()\n\n df['label_shifted'] = df['label'].shift(-1)\n df['label'] = np.where(df['label'] < df['label_shifted'],\n df['label_shifted'],\n df['label'])\n df = df.drop(['label_shifted'], axis=1)\n\n # Make it multiindex\n df['event'] = df.index\n df = df.set_index(['sample_nr', 'event'])\n df = df.reset_index('event', drop=True)\n df = df.set_index(df.groupby(level=0).cumcount().rename('event'), append=True)\n df = df.sort_index()\n\n return df", "def display_labels(self):\n\n nsubj = len(self.infiles)\n\n print('-- final label table (length %d):' % len(self.labels))\n for label in self.labels:\n nv = self.maxcounts[label]\n if nv == 1: cstr = '%3d val' % nv\n else: cstr = '%3d vals' % nv\n nv = self.subjcounts[label]\n if nv == 1: sstr = '%3d file' % nv\n else: sstr = '%3d files' % nv\n\n if nv < nsubj: short = ' (short)'\n else: short = ''\n print('%-30s : %-10s : %-10s%s' % (label, cstr, sstr, short))", "def matlabels(df, rowlabel_fn):\n return df.index.to_frame().apply(rowlabel_fn, axis=1)", "def compute_labels(pos, neg):\n labels = np.zeros(len(pos) + len(neg), dtype=np.int8)\n labels[:len(pos)] = 1\n labels[len(pos):] = 0\n return labels", "def makeLabel(self):\n\n self.setIndexNames()\n\n if self.isInCore():\n self.getFirstChar()\n else:\n # stick with what we have. (default:ExCore)\n return\n self.label = self.firstChar + \"{0:03d}\".format(self.i2)\n if self.axial is not None:\n # add axial letter\n self.label = self.label + AXIAL_CHARS[self.axial]", "def _label(self, column):\n # XXX\n return column", "def addlabels(x, y):\n\n for i in range(len(x)):\n plt.text(i, y[i], y[i], ha='center')", "def appendFromBounds(self, label=None, p1=None, p2=None, n=None):\n\n di = (p2 - p1) / n\n\n p = p1\n self.appendUpper(label=label, p=p)\n for i in range(1, n):\n p += di\n self.appendUpper(label=label, p=p)\n self.appendUpper(label=label, p=p2)", "def labelTable(self,table):\n \n for sslice,_, lFields in self._lLabellingInstruction:\n for field in lFields:\n if field is not None:\n try:\n for cell in np.nditer(table.getNPArray()[sslice],['refs_ok'],op_dtypes=np.dtype(object)):\n cell[()].addField(field.cloneMe())\n except: pass", "def create_board_binary_labels(sheet, columns):\r\n\r\n use_column = 0\r\n row = 1\r\n for i in range(64): # cycles through 64 cells\r\n column = columns[use_column]\r\n row = row\r\n\r\n sheet[column + str(row)] = format(i, '06b') # input sequenced 6-bit binary number\r\n\r\n if use_column == 7: # stops and resets column count\r\n use_column = 0\r\n row += 1\r\n else:\r\n use_column += 1\r\n\r\n print('The board has been labeled in 6-bit binary.')", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def relabel_consecutive(lab, start_from=0):\n\n new_lab = np.empty_like(lab)\n new_lab[:] = np.unique(lab, return_inverse=True)[1]\n new_lab += start_from\n return new_lab", "def makeTableNamesList(n, ):", "def FixColumnLabels(cv):\n l = []\n for label in cv[0].columns:\n if \"-\" not in label and label != \"Elapsed\":\n l.append(label + \"-UT\")\n if \"-\" in label or label == \"Elapsed\":\n l.append(label)\n\n for d in cv:\n d.columns = l\n\n return cv", "def mask(n, start, end):\n columns = []\n value = 1\n for i in range(n):\n if start <= end:\n columns.append(value if (start <= i < end) else 0)\n else:\n columns.append(value if (start <= i or i < end) else 0)\n value <<= 1\n return BitColumnMatrix(columns)", "def setIntegerLabels():\n dislin.intax()", "def compute_labels(pos, neg):\n labels = np.zeros(len(pos) + len(neg))\n labels[:len(pos)] = 1.0\n labels[len(pos):] = 0.0\n return labels", "def labels(self, labels):\n self._instructions_setter('LABEL', labels)", "def _next_unlabelled_col(x):\n for i in range(self.n_cols):\n idx = (x + i) % self.n_cols\n x_current = self._x_positions[idx]\n if self._cols[x_current].label is None:\n return idx", "def getLabels(df, eps=3, min_samples=100):\n #instantiate dbscan\n db = DBSCAN(eps=eps, \n min_samples=min_samples, \n metric='euclidean', \n n_jobs=-1\n )\n \n #fit and predict to data\n db.fit_predict(df[['x', 'y']])\n \n #Returns the sorted unique elements of an array\n labels_unique = np.unique(db.labels_)\n #drop the -1 labels which are unlabeled\n labels_unique = labels_unique[labels_unique != -1]\n \n \n return db.labels_, labels_unique", "def set_label(self, labels_set=None):\n for pos in labels_set:\n self._q_bnn_circ.x(self.outputs[int(pos)])", "def _compute_columns(log: EventLog, prefix_length: int, padding: bool) -> list:\n return [\"trace_id\"] + \\\n sorted(list({\n event['concept:name']\n for trace in log\n for event in trace[:prefix_length]\n })) + \\\n ['0'] if padding else [] + \\\n ['label']", "def labelingLVQ(self):\n numLabels = len(np.unique(self.y))\n for i, x in enumerate(self.x):\n w = self.find_closest(x)[0]\n for nl in range(numLabels):\n if self.y[i] == nl:\n self.labels[nl, w[0], w[1]] += 1\n return self.labels", "def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')", "def fix_label(df):\n df_label = df['OVERALL_DIAGNOSIS']\n\n df_label.replace({0: -1, 1: 1}, inplace=True)\n df = df.drop(['OVERALL_DIAGNOSIS'], axis=1)\n df = pd.concat([df_label, df], axis=1)\n df.columns.values[0] = \"label\"\n return df", "def get_labels(self) -> {int: str}:\n return {x: col.label for x, col in self._cols.items()}", "def get_labels(df):\n labels = []\n for i in df.index:\n label = sample_label_from_sample_name(i)\n labels.append(label)\n return labels", "def encode_labels(labels, nclass=5):\n y = np.zeros((len(labels), nclass)).astype('float32')\n for j, yj in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(yj) + 1:\n y[j, i] = yj - np.floor(yj)\n if i+1 == np.floor(yj):\n y[j, i] = np.floor(yj) - yj + 1\n return y", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def column(self, label):\n dis = []\n for x in self.rows:\n dis = dis + [x[self.column_labels.index(label)]]\n return dis\n # return self.rows[self.column_labels.index(label)]", "def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)", "def labels(self):\n \n return self.column_labels", "def create_uci_labels():\n labels_array = []\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\n promoted_to = ['q', 'r', 'b', 'n']\n\n for l1 in range(8):\n for n1 in range(8):\n destinations = [(t, n1) for t in range(8)] + \\\n [(l1, t) for t in range(8)] + \\\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\n [(l1 + a, n1 + b) for (a, b) in\n [(-2, -1), (-1, -2), (-2, 1), (1, -2), (2, -1), (-1, 2), (2, 1), (1, 2)]]\n for (l2, n2) in destinations:\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8):\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2]\n labels_array.append(move)\n for l1 in range(8):\n l = letters[l1]\n for p in promoted_to:\n labels_array.append(l + '2' + l + '1' + p)\n labels_array.append(l + '7' + l + '8' + p)\n if l1 > 0:\n l_l = letters[l1 - 1]\n labels_array.append(l + '2' + l_l + '1' + p)\n labels_array.append(l + '7' + l_l + '8' + p)\n if l1 < 7:\n l_r = letters[l1 + 1]\n labels_array.append(l + '2' + l_r + '1' + p)\n labels_array.append(l + '7' + l_r + '8' + p)\n return labels_array", "def segments_to_labels(start_times, end_times, labels, window):\n flags = []\n class_names = list(set(labels))\n index = window / 2.0\n while index < end_times[-1]:\n for i in range(len(start_times)):\n if start_times[i] < index <= end_times[i]:\n break\n flags.append(class_names.index(labels[i]))\n index += window\n return np.array(flags), class_names", "def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1", "def add_to_label_index_dict(label, starting_index, ending_index, label_index_dict):\r\n\tlabel = label.upper()\r\n\tif label in label_index_dict.keys():\r\n\t\tlabel_index_dict[label] = label_index_dict[label] + ' ' + str(starting_index) + '-' + str(ending_index)\r\n\telse:\r\n\t\tlabel_index_dict[label] = str(starting_index) + '-' + str(ending_index)\r\n\treturn label_index_dict", "def get_uci_labels():\n labels_array = []\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n numbers = ['1', '2', '3', '4', '5', '6', '7', '8']\n promoted_to = ['q', 'r', 'b', 'n']\n\n for l1 in range(8):\n for n1 in range(8):\n destinations = [(t, n1) for t in range(8)] + \\\n [(l1, t) for t in range(8)] + \\\n [(l1 + t, n1 + t) for t in range(-7, 8)] + \\\n [(l1 + t, n1 - t) for t in range(-7, 8)] + \\\n [(l1 + a, n1 + b) for (a, b) in\n [(-2, -1), (-1, -2), (-2, 1), (1, -2),\n (2, -1), (-1, 2), (2, 1), (1, 2)]]\n\n for (l2, n2) in destinations:\n if (l1, n1) != (l2, n2) and l2 in range(8) and n2 in range(8): # noqa: E501\n move = letters[l1] + numbers[n1] + letters[l2] + numbers[n2] # noqa: E501\n labels_array.append(move)\n\n for l1 in range(8):\n letter = letters[l1]\n for p in promoted_to:\n labels_array.append(letter + '2' + letter + '1' + p)\n labels_array.append(letter + '7' + letter + '8' + p)\n if l1 > 0:\n l_l = letters[l1 - 1]\n labels_array.append(letter + '2' + l_l + '1' + p)\n labels_array.append(letter + '7' + l_l + '8' + p)\n if l1 < 7:\n l_r = letters[l1 + 1]\n labels_array.append(letter + '2' + l_r + '1' + p)\n labels_array.append(letter + '7' + l_r + '8' + p)\n return labels_array", "def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)", "def label_n_elements(self, n_elements: int, **kwargs) -> int:\n # labels\n assert Exception(\"not implemented\")", "def getLabels(self):\n return self.numToLabel", "def rank_labels(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n vals = self.clf.decision_function(vec)\n # print vals.shape\n # print len(self.labelmap)\n labelvals = {}\n for idx in range(len(self.labelmap)):\n labelvals[self.labelmap[idx]] = vals[0,idx]\n sortedlabels = sorted(labelvals.items(), key=itemgetter(1),\n reverse=True)\n labels = [item[0] for item in sortedlabels]\n return labels", "def add_labels(self):\n counter = 0\n labels_list = []\n for i in range(len(self.commands)):\n command = self.commands[i]\n if command.startswith('('):\n raw_value = command.replace('(', '').replace(')', '')\n self.symbol_table[raw_value] = str(counter)\n labels_list.append(command)\n else:\n counter += 1\n for label in labels_list: # remove labels with parentheses we don't need them.\n self.commands.remove(label)", "def assign_labels(self, data):\n data[self.label] = self.labeler(data.index.values)", "def label_from_index(self, index):\n raise NotImplementedError", "def label_grid(self):\n\n self.pc_label.grid(row=0, sticky=\"nw\", pady=2, padx=3)\n self.sc_label.grid(row=1, sticky=\"nw\", pady=2, padx=3)\n self.avg_t_label.grid(row=2, sticky=\"nw\", pady=2, padx=3)\n self.nwt_label.grid(row=4, sticky=\"nw\", pady=2, padx=3)\n self.nw_ip_label.grid(row=5, sticky=\"nw\", pady=2, padx=3)\n self.nw_gw_label.grid(row=6, sticky=\"nw\", pady=2, padx=3)\n self.nw_sm_label.grid(row=7, sticky=\"nw\", pady=2, padx=3)\n self.nw_mca_label.grid(row=8, sticky=\"nw\", pady=2, padx=3)", "def _AccumulateLabelValues(\n labels, columns, label_values, non_col_labels, is_derived=False):\n for label_name in labels:\n if '-' in label_name:\n parts = label_name.split('-')\n for pivot in range(1, len(parts)):\n column_name = '-'.join(parts[:pivot])\n value = '-'.join(parts[pivot:])\n column_name = column_name.lower()\n if column_name in columns:\n label_values[column_name].append((value, is_derived))\n else:\n non_col_labels.append((label_name, is_derived))", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n #print(labels[start:end])\n yield features[start:end], labels[start:end]", "def align_labels(labels):\n # get longest label width\n max_width = -1\n for label in labels:\n width = label.GetSize().width\n max_width = max(max_width, width)\n \n # resize all labels to the longest width\n for label in labels:\n label.SetSize((max_width,-1))", "def make_fixed_labels(self):\n fixed_labels = []\n for dim in range(self.opt.c_dim):\n t = [0] * self.opt.c_dim\n t[dim] = 1\n t = torch.FloatTensor(t).expand([self.opt.batch_size, self.opt.c_dim])\n fixed_labels.append(t)\n return fixed_labels", "def plot_labels(_segments, _label_to_color, _ax=None, _alpha=0.2):\n\t_ax = _ax or plt\n\tfor idx, interval in _segments.iterrows():\n\t\t_ax.axvspan(interval['Begin'], interval['End'], facecolor=_label_to_color[interval['ID']], alpha=_alpha)", "def encode_labels(labels, nclass=5):\n Y = np.zeros((len(labels), nclass)).astype('float32')\n for j, y in enumerate(labels):\n for i in range(nclass):\n if i+1 == np.floor(y) + 1:\n Y[j,i] = y - np.floor(y)\n if i+1 == np.floor(y):\n Y[j,i] = np.floor(y) - y + 1\n return Y", "def handle_labels(ls):\r\n\r\n # assign each line a number\r\n line_num = {}\r\n counter = 0\r\n for i in ls:\r\n if not i.startswith('('):\r\n line_num[i] = counter\r\n counter += 1\r\n else:\r\n sb = i[1:-1]\r\n line_num[sb] = counter\r\n\r\n # replace @XXX with number\r\n var_address = 16\r\n mem = {}\r\n for i in range(len(ls)):\r\n if ls[i].startswith('@'):\r\n # if @XXX is already in numeral form, do nothing\r\n if ls[i][1:].isdigit():\r\n pass\r\n\r\n # replace with pre-defined symbols if found\r\n elif pre_defined_sb.get(ls[i][1:]) is not None:\r\n ls[i] = '@' + pre_defined_sb[ls[i][1:]]\r\n\r\n # replace by (XXX) line number if search failed\r\n elif line_num.get(ls[i][1:]) is not None:\r\n ls[i] = '@' + str(line_num[ls[i][1:]])\r\n\r\n # else must be user defined variable\r\n # assign same address for same variable\r\n else:\r\n if ls[i] not in mem:\r\n mem[ls[i]] = '@' + str(var_address)\r\n ls[i] = '@' + str(var_address)\r\n var_address += 1\r\n else:\r\n ls[i] = mem[ls[i]]\r\n\r\n # remove (XXX)'s\r\n ls = list(filter(lambda x: not x.startswith('('), ls))\r\n\r\n return ls", "def get_split_col_names():\n return ['dna_%d' % (idx+1) for idx in range(60)]", "def _setup_labels(self):\n self._labels = self.get_labels()\n self._labels = self.get_predefined_labels() + list(self._labels)\n self._labels = sorted(self._labels)\n\n self._labels_2_index = {label.lower(): i for i, label in enumerate([self._unknown_label] + self._labels)}\n self._index_2_labels = {i: label for label, i in self._labels_2_index.items()}\n\n self._labels_dim = len(self._labels_2_index)\n return None", "def relabel_particles(df, col1='raw_data', col2='particle'):\n\n\tdf.sort_values(by=[col1, col2])\n\tfile_names = df[col1].unique()\n\ti = 0\n\n\tind = 1\n\ttot = len(file_names)\n\tfor file_name in file_names:\n\t\tprint(\"Relabeling (%d/%d): %s\" % (ind, tot, file_name))\n\t\tind = ind + 1\n\n\t\tsub_df = df.loc[df[col1] == file_name]\n\t\tparticles = sub_df[col2].unique()\n\n\t\tfor particle in particles:\n\t\t\tdf.loc[(df[col1] == file_name) & \\\n\t\t\t(df[col2] == particle), 'tmp'] = i\n\t\t\ti+=1\n\n\tdf['tmp'] = df['tmp'].astype('int')\n\tdf[col2] = df['tmp']; del df['tmp']\n\n\treturn df", "def select(self, labels):\n indexs = []\n \n for i in range(len(labels)):\n indexs.append(self.column_labels.index(labels[i]))\n new_rows = []\n for x in self.rows:\n new_row = []\n for index in indexs:\n new_row.append(x[index])\n new_rows.append(new_row)\n\n\n\n new_Table = T88ble(new_rows, labels)\n\n return new_Table", "def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;", "def label_consecutive_lines():\n offset = 0.1\n\n def get_points():\n \"\"\"Prompts for a point triple. Returns a list of the points:\n [<iter>, ...]\n \"\"\"\n points = rs.GetPoints(\n draw_lines=False, in_plane=False, \n message1='Select first tail', message2='Select heads', \n max_points=None, base_point=None)\n return points\n\n def draw_lpoint_triple(text, tail, head):\n \"\"\"Receives label text and a list of point triples:\n str\n [<iter>, ...]\n Draws text dots with <text>-a, -b, -c\n \"\"\"\n line_vector = rs.PointSubtract(head, tail)\n offset_vector = line_vector * offset\n offset_tail = rs.VectorAdd(tail, offset_vector)\n offset_head = rs.VectorSubtract(head, offset_vector)\n axis = [0, 0, 1]\n angle = 90\n rotated_offset_vector = rs.VectorRotate(offset_vector, angle, axis)\n offset_side = rs.VectorAdd(offset_tail, rotated_offset_vector)\n rs.AddTextDot(('%s-a' % text), offset_tail)\n rs.AddTextDot(('%s-b' % text), offset_head)\n rs.AddTextDot(('%s-c' % text), offset_side)\n\n def side_is_same_as_rule(point):\n \"\"\"Receives a point (i.e., a list):\n [num, num, num]\n Returns whether the point is on the same side as the side label in the\n rule\n \"\"\"\n return False\n \n points = get_points()\n text = rs.StringBox('Enter label text')\n for i in range(len(points) - 1):\n # for point in points:\n tail = points[i]\n head = points[i + 1]\n draw_lpoint_triple(text, tail, head)", "def loadLabels(start, stop, csvFile):\n return csvFile[start:stop]", "def add_column(self, pos, char='-', new_label=None):\n MutableAlignment.add_column(self, pos, char)\n if new_label == \"MAX\":\n self._col_labels.insert(pos, max(self._col_labels) + 1)\n elif new_label == \"INC_LAST\":\n self._col_labels.append(max(self._col_labels) + 1)\n elif new_label == \"RESET\":\n self._reset_col_names()\n else:\n self._col_labels.insert(pos, new_label)", "def generate_true_labels(int_limit, n_obs):\n if int_limit > 0:\n if int_limit > n_obs:\n raise ValueError(f\"\"\"Invalid value of int_limit {int_limit}:\n greater than the number of sequences\"\"\")\n else:\n true_labels = [1 if idx <=\n int_limit else 0 for idx in range(n_obs)]\n else: # Allows test cases where all sequence pairs are non-interacting\n true_labels = [0 for item in range(n_obs)]\n return true_labels", "def with_column(self, label, values):\n \n \n \n # self.column_labels.append(label)\n # for i in range(len(self.rows)):\n # self.rows[i].append(values[i]) \n \n new_label = []\n new_rows = []\n for x in self.column_labels:\n new_label.append(x)\n new_label.append(label)\n \n for i in range(len(self.rows)):\n new_row = []\n new_row += self.rows[i]\n # for i in range(len(b)): \n new_row.append(values[i])\n new_rows.append(new_row)\n \n \n new_Table = T88ble(new_rows, new_label)\n\n return new_Table", "def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]", "def label_mat(mat):\n # Index and range of average used for labeling.\n gather_avg_25_i = 2\n avg_range = 25\n # Labels for rising and falling price.\n rising_i = 1\n falling_i = 0\n num_classes = 2\n labels = np.zeros([mat.shape[0] - avg_range + 1, num_classes])\n for i in range(mat.shape[0] - avg_range + 1):\n # If average 25 day price rises after 24 days assign rising label, else\n # assign falling label.\n if mat[i, gather_avg_25_i] < mat[i + avg_range - 1, gather_avg_25_i]:\n labels[i, rising_i] = 1.0\n else:\n labels[i, falling_i] = 1.0\n return labels", "def put_label(i):\n i = min(i, len(x) - 2)\n dx = sx[i + 1] - sx[i]\n dy = sy[i + 1] - sy[i]\n rotation = np.rad2deg(math.atan2(dy, dx)) + rotation_offset\n pos = [(x[i] + x[i + 1]) / 2. + offset[0],\n (y[i] + y[i + 1]) / 2 + offset[1]]\n plt.text(pos[0],\n pos[1],\n label_text,\n size=9,\n rotation=rotation,\n color=line.get_color(),\n ha=\"center\",\n va=\"center\",\n bbox=dict(ec='1', fc='1', alpha=0.8))" ]
[ "0.6146362", "0.60743713", "0.6023064", "0.5982423", "0.5977497", "0.5878763", "0.5794926", "0.56516623", "0.5605927", "0.55946416", "0.55929387", "0.5586301", "0.54550844", "0.5445182", "0.5400012", "0.5359969", "0.5359969", "0.5359969", "0.5359969", "0.5342608", "0.53189415", "0.5300684", "0.5282022", "0.52459097", "0.52300346", "0.5225489", "0.5199985", "0.518993", "0.5179973", "0.5174585", "0.516727", "0.51619613", "0.516053", "0.5157546", "0.5156891", "0.51523733", "0.51496315", "0.5100276", "0.50967723", "0.5096449", "0.50963676", "0.50870067", "0.50863004", "0.50633156", "0.5062996", "0.50560915", "0.50507593", "0.5043922", "0.50353247", "0.50335205", "0.50324225", "0.5002867", "0.49964648", "0.49873534", "0.49809343", "0.49808538", "0.4968706", "0.49676353", "0.4959274", "0.49531052", "0.4952078", "0.492882", "0.49085653", "0.49074134", "0.49064547", "0.49030706", "0.49022686", "0.48940352", "0.4885425", "0.48727554", "0.48642275", "0.4863405", "0.48559633", "0.48546496", "0.48540604", "0.48531407", "0.48530933", "0.48530334", "0.48475444", "0.48359388", "0.48331764", "0.48320696", "0.48310196", "0.4829873", "0.4825935", "0.48235133", "0.48217618", "0.4820439", "0.48201048", "0.48132393", "0.48062629", "0.48055482", "0.47982174", "0.47976157", "0.47972155", "0.47966376", "0.47955263", "0.47902137", "0.47890738", "0.47846207" ]
0.6012854
3
Remove unlabelled columns in [startcol_width, end+col_width].
def _remove_overlaps(start, end) -> int: start = self._x_positions[start % self.n_cols] end = self._x_positions[int(end) % self.n_cols] n_removed = 0 for x, col in self._cols.items(): if start - self.col_width <= x <= start or end <= x <= end + self.col_width: if col.label is None: n_removed += col.mark_as('ignore') return n_removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cols_drop():", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def strip_left_cols(df, cols_to_strip):\n columnss = df.columns\n return df[columns[cols_to_strip:]]", "def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def clean(df):", "def get_cols_dummy():", "def remove_below_lower_length_limit(self) -> None:\n for column_name in self.data:\n threshold_executor = TrimUtils.remove_text_below_lower_length_threshold(\n self.config[f'{column_name}_lower_length_limit']\n )\n self.data = self.data[self.data[column_name].map(threshold_executor)]\n self.data.reset_index(drop=True, inplace=True)", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols", "def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset", "def truncate_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = -1\n else:\n if row[i] == 10:\n erase = True\n return row\n\n ret = np.copy(labels)\n ret = repair_labels(ret)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)", "def delete_padded_rows(data, labels, n_dimensions):\n labels = np.repeat(labels, data.shape[1])\n data = data.reshape(-1, n_dimensions)\n added_rows = np.where(np.all(data == 0, axis=1))\n data = data[~added_rows[0]]\n labels = labels[~added_rows[0]]\n\n return data, labels", "def remove_empty_columns(aln, enforce_codon=False):\n\n ind = []\n seqs = aln.values()\n alnlen = aln.alignlen()\n\n if not enforce_codon:\n for i in range(alnlen):\n for seq in seqs:\n if seq[i] != \"-\":\n ind.append(i)\n break\n else:\n if alnlen % 3 != 0:\n raise Exception(\n \"cannot set enforce_codon if alignment length \"\n \"is not a multiple of three\")\n\n for i in range(0, alnlen, 3):\n for seq in seqs:\n if seq[i:i+3] != \"---\":\n ind.extend([i, i+1, i+2])\n break\n\n return subalign(aln, ind)", "def clear_columns(prefixlist,datas):\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n ccc=[c.lower() for c in ccc]\n \n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas.rename(columns=d,inplace=True)\n\n u, i = np.unique(datas.columns, return_index=True)\n y=u[np.argsort(i)] \n \n r=[datas.columns.tolist().index(rr)for rr in y]\n\n return datas.iloc[:, r]", "def remove_colspan(self, ):\n if self.AttributeNames.COLSPAN in self.attrs:\n del self.attrs[self.AttributeNames.COLSPAN]\n return self", "def _set_columns(self, start, end):\n if start <= end <= self.width:\n self._write(ST7789_CASET, _encode_pos(\n start+self.xstart, end + self.xstart))", "def remove_gapped_columns(aln):\n cols = zip(* aln.values())\n ind = util.find(lambda col: \"-\" not in col, cols)\n return subalign(aln, ind)", "def fitCols(self, col_start, col_end, sheet):\r\n col_n = col_start\r\n while col_n <= col_end:\r\n self.fitCol(col_n, sheet)\r\n col_n = col_n + 1", "def removeCols(self) -> List['StateNode']:\n cols = self.state[1]\n states: List[StateNode] = []\n for i in range(len(cols)):\n for j in range(i + 1, len(cols) + 1):\n # for j in range(i + 1, i + 2):\n new_cols = cols[:i] + cols[j:]\n if len(new_cols) == 0:\n continue\n states.append(StateNode(self.table, \n (self.state[0], new_cols),\n ([], cols[i:j]),\n self.cost + j - i + self.count_pairs(self.state[0], cols[i:j]),\n self))\n return states", "def get_empty_columns(\n dc_input: deepconsensus_pb2.DeepConsensusInput) -> List[int]:\n columns_to_remove = []\n for i in range(len(dc_input.subreads[0].bases)):\n all_internal_gaps = True\n for subread in dc_input.subreads:\n if subread.bases[i] != dc_constants.GAP_OR_PAD:\n all_internal_gaps = False\n break\n if all_internal_gaps:\n columns_to_remove.append(i)\n return columns_to_remove", "def test_structural_remove_columns_all_1_0(self):\n cp = Plotter.from_smiles(['CCCC', 'CCCC'], sim_type=\"structural\")\n self.assertTrue(cp._Plotter__df_descriptors.empty)", "def remove(dataframe, limit=250):\n logfile = open('logfile_removecolumns.txt', 'w') # Create a logfile\n logfile.write('=====> Time: %s <=====\\n' % time.asctime(time.localtime()))\n logfile.write('=====> Log from file %s.py <===== \\n\\n' % __name__)\n\n columns_overview = dataframe.columns.summary() # Create an overview of the dataframe\n cols_list = dataframe.columns.tolist()\n cols_to_be_deleted = list()\n logfile.write('Overview of the dataframe: \\n%s' % columns_overview)\n\n for stock in range(len(cols_list)): # Walk through all stocks\n if dataframe[cols_list[stock]].isnull().sum() > limit: # Check No. of null values in a column\n cols_to_be_deleted.append(cols_list[stock])\n \n logfile.write('\\nNo. of Columns with more that %d missing values: %s\\n'\n % (limit, len(cols_to_be_deleted)))\n logfile.write('Deleted columns:\\n')\n for col in cols_to_be_deleted:\n logfile.write('%s \\n' % str(col))\n logfile.close()\n \n # Return updated dataframe or list of columns. See test code below\n dataframe_updated = dataframe[dataframe.columns.drop(cols_to_be_deleted)]\n return dataframe_updated", "def exclude_cols(self, *_, **__) -> Tuple[str, ...]:", "def smooth_columns(input_frame):\n column_labels = list(input_frame.columns)\n input_frame.columns = [c.lower().replace('_','') for c in column_labels]\n return input_frame", "def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated", "def clear_columns(prefixlist,datas,style=0, inplace=False):\n func = {0: str.lower,\n 1: str.upper,\n 2: str.capitalize}\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n# ccc=[c.lower() for c in ccc]\n ccc=[func[style](c) for c in ccc]\n\n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas_renamed=datas.rename(columns=d,inplace=inplace)\n new_datas=datas if inplace else datas_renamed\n\n u, i = np.unique(new_datas.columns, return_index=True)\n y=u[np.argsort(i)]\n\n r=[new_datas.columns.tolist().index(rr)for rr in y]\n\n return new_datas.iloc[:, r]", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header", "def __clean_repeated_columns(self, df, column_type):\n for column in df.columns:\n if column_type in column.lower():\n # Fill main column with data from \"prefix + _\" type column names.\n df[column_type[:-1]].fillna(df[column], inplace=True)\n # Drop the \"prefix + _\" type column names.\n df.drop(column, axis=1, inplace=True)", "def remove_columns(data, col_ids):\n return np.delete(data, col_ids, axis=1)", "def remove_tail(col_lines):\n while len(col_lines[-1]) < 2:\n col_lines.pop()", "def cleaning_data():\n\n data.drop([\"Unnamed: 0\"], axis = 1, inplace = True)\n data.columns = map(str.upper, data.columns)\n return data", "def hideColumns(self, startColumn, endColumn):\n\n\t\t\t\tself.thing.column_dimensions.group(self.convertColumn(startColumn), self.convertColumn(endColumn), hidden = True)", "def EliminateRowsCols(self, *args):\n return _hypre.HypreParMatrix_EliminateRowsCols(self, *args)", "def _trim_start_end(data: pd.DataFrame, start: int, end: int):\n start_idx = data.loc[:, \"start_locus\"].searchsorted(start)\n end_idx = data.loc[:, \"start_locus\"].searchsorted(end, side=\"right\")\n return data.iloc[start_idx:end_idx, :]", "def clear_columns(self):\n self._columns = []\n return self", "def clean_columns(df: pd.DataFrame, filled_rate: float = 0.6) -> pd.DataFrame:\n\n print(f\"Initial shape of the dataframe: {str(df.shape) : >17}\")\n # keep columns that are filled more than the filled rate, default = 60%\n df = df.loc[:, (df.isnull().mean() < (1 - filled_rate))]\n print(f\"Shape after removing null columns: {str(df.shape) : >14}\")\n\n return df", "def remove_column(self, pos, labels=\"REMOVE\"):\n MutableAlignment.remove_column(self, pos)\n if labels == \"RESET\":\n self._reset_col_names()\n elif labels == \"REMOVE\":\n self._col_labels = self._col_labels[:pos] + \\\n self._col_labels[pos + 1:]", "def test_002_range_columns(self):\n assert(len(\n self.range_transformer.fit_transform(\n self.data[self.range_col]\n ).columns\n ) == 1)", "def getColumns(self, stripped):\n noheader = stripped[7:]\n entries = list(chunkify(noheader, 5))\n columns = []\n coverage = [False for x in range(self.rowLength)] #makes a string of bytes covered (0 or 1)\n for entry in entries:\n offset = int(entry[1])\n length = int(entry[2])\n assert offset+length<=self.rowLength, \"Error: entry length does not match row length in:\\n\"+ self.path +\".\"\n if coverage[offset] == True: # in the case where NMM entries are overlapping\n entry[0] = \"##OVERLAP WARNING## \" + entry[0]\n for x in range(offset, offset+length):\n coverage[x] = True #set bytes as covered.\n columns.append(NightmareEntry(entry))\n #at this point you have a list [True, True, False] or whatever\n fillerEntries = []\n count = 0\n for offset,val in enumerate(coverage):\n if val==False:\n count += 1\n if count == 1:\n fillerEntry = [\"##UNKNOWN##\",offset,1,\"HEXA\",\"NULL\"]\n fillerEntries.append(fillerEntry)\n else:\n fillerEntries[-1][2] = count\n else:\n count = 0\n for fillerEntry in fillerEntries:\n columns.append(NightmareEntry(fillerEntry))\n columns.sort(key=lambda col: col.offset) #sort columns by offset\n return columns", "def normalize_columns_separately(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tcolumn_max=column_matrix.max(1)\n\tcolumn_min=column_matrix.min(1)\n\trange=column_max-column_min\n\tnomalized=(column_matrix-column_min)/range\n\treturn nomalized", "def strip_static_cols(df):\n for col in df.columns:\n if len((df[col]).unique()) == 1:\n df.drop(columns=[col], inplace=True)\n return df", "def trim_features():\n pass", "def reduce_data_to_necessary_columns(filtered_df):\n hist_df = filtered_df[\n [\n \"UniqueName\",\n \"Joins\",\n \"Projection_Attributes\",\n \"Selection_Attributes\",\n \"GroupBy\",\n \"OrderBy\",\n \"Strings\",\n \"Tables\",\n ]\n ].set_index(\"UniqueName\")\n return hist_df", "def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw", "def _truncate_cmap(cmap, Y_thresh=0.65, start_offN=100):\n\n cmap_func = plt.get_cmap(cmap)\n allcolors = cmap_func(np.linspace(0., 1., start_offN))\n mask = np.array([colorsys.rgb_to_yiq(*c[:-1])[0] <= Y_thresh for c in allcolors])\n if ~mask.any():\n return cmap # not truncated\n else:\n return colors.LinearSegmentedColormap.from_list('trunc_cmap', allcolors[mask])", "def _fix_uniq_col(self):\n # subgradient; for two boolean arrays, multiplication seems to be the best way \n # (equivalent to logical_and)\n n_covered_col = self.a_csr.dot(np.ones(self.ncols)) \n ifix = np.zeros(self.ncols, dtype=bool)\n if (np.count_nonzero(n_covered_col) != self.mrows):\n raise ValueError(\"There are uncovered rows! Please check your input!\")\n if (np.any(n_covered_col==1)):\n inonzero = self.a_csr[n_covered_col==1,:].nonzero()\n ifix[inonzero[1]] = True\n\n return ifix", "def remove_columns ( infilename, outfilename, cols_to_remove ):\n xcols = cols_to_remove\n xcols.sort()\n xcols.reverse()\n \n reader = csv.reader( open( infilename, 'rt' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer = csv.writer( open( outfilename, 'wb' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for row in reader:\n vals = row\n for x in xcols :\n vals.pop( x )\n writer.writerow( vals )", "def add_column_filter(source, args, index):\n include_tags = hxl.TagPattern.parse_list(args.get('cut-include-tags%02d' % index, []))\n exclude_tags = hxl.TagPattern.parse_list(args.get('cut-exclude-tags%02d' % index, []))\n skip_untagged = args.get('cut-skip-untagged%02d' % index, False)\n if include_tags:\n source = source.with_columns(include_tags)\n if exclude_tags or skip_untagged:\n source = source.without_columns(exclude_tags, skip_untagged=skip_untagged)\n return source", "def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)", "def complete_columns(training_df, valid_df):\n for c in valid_df.columns:\n if c not in training_df.columns:\n training_df[c] = 0\n for c in training_df.columns:\n if c not in valid_df.columns:\n valid_df[c] = 0\n return training_df, valid_df", "def select_columns(data):\n\n #Channels to be excluded\n features_delete = np.arange(46, 50)\n features_delete = np.concatenate([features_delete, np.arange(59, 63)])\n features_delete = np.concatenate([features_delete, np.arange(72, 76)])\n features_delete = np.concatenate([features_delete, np.arange(85, 89)])\n features_delete = np.concatenate([features_delete, np.arange(98, 102)])\n features_delete = np.concatenate([features_delete, np.arange(134, 243)])\n features_delete = np.concatenate([features_delete, np.arange(244, 249)])\n return np.delete(data, features_delete, 1)", "def remove_bad_cells(self, *dims):\n ranges = [DimRange(d, 0, np.inf) for d in dims]\n return self.gate(*ranges)", "def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df", "def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df", "def EliminateCols(self, cols):\n return _hypre.HypreParMatrix_EliminateCols(self, cols)", "def get_fixed_colspec(self):\n\n # Warning! Assuming th start values are sorted. Really should check.\n\n return (\n [c.name for c in self.columns if c.start and c.width],\n [(c.start, c.start + c.width) for c in self.columns if c.start and c.width]\n )", "def select_feats(df):\n cols = list(df)\n for col in cols:\n if col not in config[\"feats\"] and col != \"label\":\n df = df.drop(columns=col)\n return df", "def _modify_columns(self, cols, X, y=None):", "def delete_col(A, delcol):\r\n m = A.shape[0]\r\n n = A.shape[1]\r\n keeprows = arange(0, m)\r\n keepcols = delete(arange(0, n), delcol)\r\n return A[keeprows][:, keepcols]", "def remove(self):\n for i in range(self.min_y+1, self.max_y+1):\n for j in range(self.min_x+1, self.max_x+1):\n try:\n DIMENSIONAL_ARRAY[i-1][j-1] = ' '\n except IndexError:\n pass", "def cleaning_Dataset(dataset):\n cols = dataset.select_dtypes([np.number]).columns\n diff = dataset[cols].diff().sum()\n\n dataset = dataset.drop([diff==0].index, axis=1)\n dataset = dataset.drop('adj close', 1)\n dataset = dataset.fillna(method='bfill')\n dataset = dataset[1:-1]\n return dataset", "def reset_columns(self):\n\n reset_cols = [i for i in self.__cols if i in self.__df_timings.columns]\n self.__df_timings = self.__df_timings.loc[:, reset_cols]\n return", "def trim_data(data, attributes):\n return data.drop(attributes, axis=1)", "def write_untrim08(self,fn):\n UnTRIM08Grid(grid=self).write_untrim08(fn)", "def normalize_columns_together(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmax=column_matrix.max()\n\tprint \"The maximum:\t \", max\n\tmin=column_matrix.min()\n\tprint \"The minimum:\t \", min\n\trange=max-min\n\tprint \"range: \", range\n\tcolumn_matrix=column_matrix-min\n\tnormalized=column_matrix/range\n\treturn normalized", "def clean_dataframe(df, column_list, length):\n list_of_actual_columns = list(dataframe.columns.values)\n for each_column in column_list:\n if each_column not in list_of_actual_columns:\n return False\n if number_of_rows < 1000:\n return False\n return True\n\n category_value_counts = df['Type'].value_counts()\n category_value_counts.to_csv('category_value_counts')\n cdf = pd.read_csv('category_value_counts')\n df = df.merge(cdf, left_on='Type', right_on='OldCategory')\n df['Datetime'] = pd.to_datetime(df['Datetime'])\n df[(df['Datetime'] > '2019-12-31')]\n df.sort_values(by='Datetime', ascending=False)", "def columns_to_drop(filepath, skiprows):\n candidates = ['unit', 'units', 'total', 'totals', 'id']\n df = pd.read_csv(filepath, skiprows=skiprows)\n drop = set()\n \n # find columns according to a list of names we should drop\n for item in df.columns:\n if item.upper() in [x.upper() for x in candidates]:\n drop.add(item)\n \n # find columns with only one unique value\n unique = df.nunique().to_dict()\n for column, n in unique.items():\n if n == 1:\n drop.add(column)\n \n # find columns with int values that are not a time period\n for column in df.columns:\n if df[column].dtype.name == 'int64':\n if not df[column].nunique() in [12, 24, 48, 96, 24*60/5, 24*60]:\n drop.add(column)\n \n return list(drop)", "def drop_dfcol(self, drop_list):\n self.data = self.df\n for lbl in drop_list:\n self.data = self.data.drop(lbl, axis=1)\n self.n_features = np.shape(self.data)[1]", "def remove_null_cols(df, thresh=0.08):\n \n # look at this\n # df.dropna(thresh=int(df.shape[0] * .9), axis=1)\n pct_null = df.isnull().sum() / len(df)\n missing_features = pct_null[pct_null > thresh].index\n return df.drop(missing_features, axis=1)", "def FixColumnLabels(cv):\n l = []\n for label in cv[0].columns:\n if \"-\" not in label and label != \"Elapsed\":\n l.append(label + \"-UT\")\n if \"-\" in label or label == \"Elapsed\":\n l.append(label)\n\n for d in cv:\n d.columns = l\n\n return cv", "def remove_border(src): #---- remove blank border\r\n rows = src.shape[0]; VMIN= 0; VMAX= rows; \r\n cols = src.shape[0]; UMIN= 0; UMAX= cols;\r\n for ky in range(1,rows):\r\n sum0 = np.sum(src[ky,:,:]);\r\n sum1 = np.sum(src[rows-ky-1,:,:]);\r\n if sum0== 0 and VMIN== ky-1: VMIN= ky;\r\n if sum1== 0 and VMAX== rows-ky+1: VMAX= rows-ky;\r\n for kx in range(1,cols):\r\n sum0 = np.sum(src[:,kx,:]);\r\n sum1 = np.sum(src[:,cols-kx-1,:]);\r\n if sum0== 0 and UMIN== kx-1: UMIN= kx;\r\n if sum1== 0 and UMAX== cols-kx+1: UMAX= cols-kx;\r\n #--- --- \r\n DV = np.minimum(VMIN, rows-VMAX);\r\n DU = np.minimum(UMIN, cols-UMAX);\r\n return src[DV:(rows-DV), DU:(cols-DU), :];", "def orig_cols():\n return ['Q-E','ZN-E','PH-E','DBO-E','DQO-E','SS-E','SSV-E','SED-E','COND-E','PH-P','DBO-P','SS-P','SSV-P',\n 'SED-P','COND-P','PH-D','DBO-D','DQO-D','SS-D','SSV-D','SED-D','COND-D','PH-S','DBO-S','DQO-S',\n 'SS-S','SSV-S','SED-S','COND-S','RD-DBO-P','RD-SS-P','RD-SED-P','RD-DBO-S','RD-DQO-S','RD-DBO-G',\n 'RD-DQO-G','RD-SS-G','RD-SED-G']", "def handel_nans(self):\n col_nan_pct = self.df.isin([' ',np.nan]).mean() #Calculates percent of Nans\n col_names = col_nan_pct[col_nan_pct >= .1].index # Gets name of columns with over 50% Nans\n col_count = [self.df[col].count() for col in col_names for x in self.df if x == col] #Gets length of valid values for column\n dropped_col = [col for col in zip(col_count, col_names) if col[0] <= 1400] #Gets columns names with under 50 values\n [self.df.drop(columns=[col[1]], inplace=True) for col in dropped_col]\n self.dropped_cols_phase_one = dropped_col\n [self.column_dtypes.pop(item[1]) for item in dropped_col]\n self.df[self.target].dropna(inplace=True)", "def _drop_cols(self, duplicate_cols):\n self._hybrid_meta.drop(\n duplicate_cols + DROPPED_COLUMNS,\n axis=1, inplace=True, errors='ignore'\n )", "def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns", "def test_clean_columns():\n assert clean_columns('Id, AdCampaignId, CampaignId') == ['id', 'adCampaignId', 'campaignId']", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def drop_attributes(df, cutoff=25, extra_add=[]):\n\n df_copy = df.copy()\n\n attributs_drop = []\n for var in sorted(df.columns):\n series = df[var]\n perc_missing = 100 - series.count() / len(series) * 100\n\n if perc_missing > cutoff:\n attributs_drop.append(var)\n else:\n continue\n\n if len(extra_add) == 0:\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n else:\n attributs_drop = attributs_drop + extra_add\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n return df_copy", "def col_subset(self, patt, rmprefix=None):\n cols = self.widedf.columns\n want = [bool(re.search('^sesid$|^age$|'+patt, x)) for x in cols]\n subset = self.widedf.iloc[:, want]\n if rmprefix is None:\n # assume\n # * the best prefix to remove is from the first non-{age,id} column \n # * prefix is any text before the first '_'\n subset_specifc_cols = [x for x in cols[want] if x not in ['sesid','age']]\n rmprefix = subset_specifc_cols[0].split(\"_\")[0]\n if rmprefix:\n subset.columns = [re.sub(f'^{rmprefix}_','',x) for x in subset.columns]\n return subset.dropna()", "def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]", "def remove_columns(df, threshold, log=False):\n if log: sectionTimer = Timer(log=f\"removing columns with more than {threshold * 100}% of nans\")\n \n # removes columns with many nans\n non_nan_values = int(df.shape[0] * (1 - threshold))\n df_clean = df.dropna(thresh=non_nan_values, axis=1)\n dropped_cols = list(set(df.columns) - set(df_clean.columns))\n\n if log: sectionTimer.end_timer(log=f\"removed {len(set(df.columns)) - df_clean.shape[1]} columns\")\n return df_clean, dropped_cols", "def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df", "def split_columns(l):\n return [l[:3], l[3:7], l[7:12], l[12:16], l[16:]]", "def reduce_possibilities_by_column(self):\n y = self.targetCell.y\n for i in range(1,10): #content\n for n in range(9): #x-coord adjacent cells\n neighbour_cell = self.puzzleGrid.grid[n][y]\n if self.targetCell != neighbour_cell:\n self.targetCell.column_neighbour_possibilities.append( neighbour_cell.possibilities)\n if str(i) == neighbour_cell.finalNumber:\n self.RemovePossiblityFromTargetCell(i)\n self.targetCell.column_neighbour_possibilities = flatten_list(self.targetCell.column_neighbour_possibilities)", "def remove_other_elements(data):\n charset = ['F','l','B','r','I','i','M','g','L','b','a','e','K','V','d','R','Z','G','A','Y','u']\n x = []\n for i in range(data.shape[0]):\n for j in range(len(data.iloc[i,1])):\n if data.iloc[i,1][j] in charset:\n x.append(i)\n break\n df = data[(True^data['Index'].isin(x))]\n df.reset_index(drop=True, inplace=True)\n return df", "def trim_long_colnames(cat):\n import re\n long_short_pairs = [\n ('GeneralShapeletPsf', 'GSPsf'),\n ('DoubleShapelet', 'DS'),\n ('noSecondDerivative', 'NoSecDer')]\n for long, short in long_short_pairs:\n long_re = re.compile(long)\n for col_name in cat.colnames:\n if long_re.search(col_name):\n new_col_name = long_re.sub(short, col_name)\n cat.rename_column(col_name, new_col_name)", "def columns_to_fix(df):\n return [col for col in df.columns.values if any([k in col and v in col for k, v in symmetric_dihedrals.items()])]", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_", "def remove_columns(lst):\n cols_rem = ['yearID','Team','lgID','Name','X','playerID','pops']\n\n for item in cols_rem:\n if item in lst:\n lst.remove(item)\n\n return(lst)", "def _clean_up_columns(\n self):\n self.log.debug('starting the ``_clean_up_columns`` method')\n\n tableName = self.dbTableName\n\n print \"cleaning up %(tableName)s columns\" % locals()\n\n sqlQuery = u\"\"\"\n set sql_mode=\"STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION\";\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n sqlQuery = u\"\"\"\n update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;\n update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = \"\";\n update %(tableName)s set notes = null where notes = \"\";\n update %(tableName)s set redshift = null where redshift = 0;\n update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = \"\";\n update %(tableName)s set hubble_const = null where hubble_const = 0;\n update %(tableName)s set lmc_mod = null where lmc_mod = 0;\n update %(tableName)s set master_row = 0;\n update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);\n \"\"\" % locals()\n writequery(\n log=self.log,\n sqlQuery=sqlQuery,\n dbConn=self.cataloguesDbConn,\n )\n\n self.log.debug('completed the ``_clean_up_columns`` method')\n return None", "def dataendclean(df, x, inplace=False):\r\n # Examine Mean Values\r\n if inplace:\r\n df = df\r\n else:\r\n df = df.copy()\r\n\r\n jump = df[abs(df.loc[:, x].diff()) > 1.0]\r\n try:\r\n for i in range(len(jump)):\r\n if jump.index[i] < df.index[50]:\r\n df = df[df.index > jump.index[i]]\r\n printmes(\"Dropped from beginning to \" + str(jump.index[i]))\r\n if jump.index[i] > df.index[-50]:\r\n df = df[df.index < jump.index[i]]\r\n printmes(\"Dropped from end to \" + str(jump.index[i]))\r\n except IndexError:\r\n printmes('No Jumps')\r\n return df", "def remove_index(self):\n if \"@Index\" not in self.col_lines[0]:\n return\n\n while not self.col_lines[0].startswith('@Entity'):\n self.col_lines.pop(0)", "def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)", "def _validate_limits_cols_prefixed(self):\n for col in self._limits:\n self.__validate_col_prefix(\n col, (SOLAR_PREFIX, WIND_PREFIX), input_name='limits'\n )", "def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z", "def unique_cols(self):\n return list(set([coord[1] for coord in self.landscape]))", "def keep_columns(self, colnames):\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())" ]
[ "0.6347648", "0.60383844", "0.5991607", "0.5967662", "0.57150346", "0.56798846", "0.5672258", "0.5656547", "0.56305486", "0.5617258", "0.55419534", "0.5483048", "0.54566985", "0.54525024", "0.5401275", "0.5390643", "0.5390326", "0.5388381", "0.53292286", "0.53068745", "0.52854407", "0.52827644", "0.52759993", "0.5208304", "0.51816946", "0.51556236", "0.51554507", "0.51490206", "0.5144577", "0.51425093", "0.51343226", "0.51283395", "0.51262724", "0.5125608", "0.5124935", "0.51178426", "0.5100704", "0.50886464", "0.506857", "0.50633705", "0.50553155", "0.5055251", "0.5052894", "0.50518036", "0.5043559", "0.50338227", "0.5016358", "0.49723122", "0.49707237", "0.4969768", "0.49640766", "0.4959578", "0.49497318", "0.493332", "0.49297488", "0.49202245", "0.49200436", "0.4894246", "0.48925647", "0.48759082", "0.48713756", "0.4863427", "0.48513559", "0.48469436", "0.4835408", "0.4835259", "0.48239934", "0.482393", "0.4819551", "0.48146012", "0.48032862", "0.48031121", "0.47987488", "0.4794859", "0.47887504", "0.47858888", "0.47821873", "0.47819734", "0.47775602", "0.47772664", "0.4777228", "0.47711226", "0.4755529", "0.47525653", "0.4733533", "0.4728215", "0.4722209", "0.47210073", "0.47179845", "0.4707272", "0.4704514", "0.47041216", "0.47034174", "0.46963638", "0.46952996", "0.4689888", "0.46898398", "0.4687264", "0.46824977", "0.46810254" ]
0.67192227
0
Return index of first unlabelled column after x.
def _next_unlabelled_col(x): for i in range(self.n_cols): idx = (x + i) % self.n_cols x_current = self._x_positions[idx] if self._cols[x_current].label is None: return idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XToCol(self, x):\r\n \r\n colLeft = 0\r\n numColumns = self.GetColumnCount()\r\n for col in xrange(numColumns):\r\n \r\n if not self.IsColumnShown(col):\r\n continue \r\n\r\n column = self.GetColumn(col)\r\n\r\n if x < (colLeft + column.GetWidth()):\r\n return col\r\n \r\n colLeft += column.GetWidth()\r\n \r\n return wx.NOT_FOUND", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1", "def getColumn(self, x):\n i = _getIndex(x, self.columnNames)\n return self.data[i]", "def _get_column(self, index):\n left, right = self._get_columns()\n return left if index < left.count else right", "def _get_col(self, idx):\n return self.line[self._fwf.column_slices[idx]]", "def get_undef_cols_idx(x, undef_val):\n undef_col_idx = []\n for col_idx in range(x.shape[1]):\n column = x[:, col_idx]\n if((column == undef_val).all()):\n undef_col_idx.append(col_idx)\n\n return undef_col_idx", "def __get_column(self, index: int) -> int:\n return index % self.columns", "def get_index(self, column):\r\n\r\n\t\treturn self.columns.index(column)", "def get_drop_row(self, x):\n for y in range(self.size_y):\n if self.get_piece_at_opening(x, y) == Piece.NONE:\n return y\n return -1", "def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None", "def xy_to_index(x, y):\n index = y * columns + x\n return index", "def getColIdx(self, col):\n try: \n return int(col)\n except:\n return ord(col)-ord('a')", "def getColIdx(self, col):\n try:\n return int(col)\n except:\n return ord(col)-ord('a')", "def _get_col(self, idx):\n return self.text[self._fwf.column_slices[idx]]", "def canonicalize_column_index(self, line, col):\n if col < 0:\n col += self.col_lens[line] + 1\n assert col >= 0\n return col", "def cells_x(self):\n return self._cells[0]", "def _position_x_to_column(self, x, y):\n col = -1\n if y>self.padding_top and y<self.padding_top+self.len_y_cercles:\n for i in range(self.n_columns):\n if x>self.padding_left+i*63 and x<self.padding_left+i*63+self.diam_cercles:\n col = i+1\n break\n return col", "def get_nearest_index(self, x_value: float) -> int:\n return int(np.argmax(self.x >= x_value))", "def xAt(self, col):\n\n return self.bottomBoard.x + self.bottomBoard.xAt(col)", "def find_col(table, col):\n return table[0].index(col)", "def __column_height(self, x):\n\t\tcolumn = self.board[:, x]\n\t\treturn np.count_nonzero(column)", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column_index(self, column_name: str) -> int:\n return self._column_indices[column_name]", "def choose_column(row_index, prev_col_name, changed_variables, max_concealable_variables):\n if(prev_col_name == None):\n return changed_variables[row_index][0]\n return changed_variables[row_index][(changed_variables[row_index].index(prev_col_name)+1) % max_concealable_variables]", "def get_column(self):\n return self._column_number", "def index(self, x) -> int:\n pass", "def comp_attack_column(self):\n column_hit = self.column_arry[-1]\n if column_hit == 10:\n column = random.randint(0, 9)\n return column\n else:\n attk_random = self.random_attk_int()\n if attk_random == 1:\n column = column_hit + 1\n return column\n elif attk_random == 2:\n column = column_hit - 1\n return column", "def calc_pos(x):\n a = torch.arange(1, x.shape[1] + 1).unsqueeze(0).to(x.device)\n p = a.expand(x.shape[0], -1)\n mask = (x != 0).long()\n return p * mask", "def get_unlabeled_idx(X_train, labeled_idx):\n return np.arange(X_train.shape[0])[np.logical_not(np.in1d(np.arange(X_train.shape[0]), labeled_idx))]", "def landmark_x(self):\n ######## TODO: NATSORT columns before returning #######\n x_cols = [col for col in self.landmark_columns if \"x\" in col]\n return self[x_cols]", "def landmark_x(self):\n ######## TODO: NATSORT columns before returning #######\n x_cols = [col for col in self.landmark_columns if \"x\" in col]\n return self[x_cols]", "def get_y_pos(self, board, x):\n for i in reversed(range(self.height//80)):\n if self.check_pos(board, x, i):\n return i\n i -= 1\n pass", "def column(self) -> int:\n return self._column", "def _get_coating_index(self, y: int) -> int:\n if y < 1:\n raise MirrorLogicError('coating state not valid or unknown')\n return y - 1", "def find_column(text, index):\n\n last_cr = text.rfind(\"\\n\", 0, index)\n if last_cr < 0:\n last_cr = 0\n column = (index - last_cr) + 1\n return column", "def getLabelColumn(self):\n return self.getOrDefault(self.labelColumn)", "def index_of_x(word: str, position=0):\n\tif word[position] == 'x': \n\t\treturn position \n\telse:\n\t\treturn index_of_x(word, position + 1)", "def column_xw(self, x):\n xp = x * self.column_width + x * self.column_gap\n if x < self.num_columns:\n w = min(self.max_x, self.column_width)\n else:\n w = self.max_x - xp\n return xp, w", "def select_next_NN(X, x):\n\n if len(X) == 0 or len(x) == 0:\n printt(\"Error: No data in X and/or x.\")\n return None\n if X.shape[0] != x.shape[0]:\n printt(\"Mismatch in dimensions; must have X mxn, x mx1.\")\n return None\n\n # Compute the (Euclidean) distance from x to all items in X\n scores = np.apply_along_axis(linalg.norm, 0, X - x[:,np.newaxis])\n\n # Select and return item with min distance to x\n m = scores.argmin()\n\n return m", "def get_column_index(i, inputs):\n if isinstance(i, int):\n if i == 0:\n # Useful shortcut, skips the case when end is None\n # (unknown dimension)\n return 0, 0\n vi = 0\n pos = 0\n end = inputs[0][1].shape[1]\n if end is None:\n raise RuntimeError( # pragma: no cover\n \"Cannot extract a specific column %r when \"\n \"one input (%r) has unknown \"\n \"dimension.\" % (i, inputs[0]))\n while True:\n if pos <= i < end:\n return vi, i - pos\n vi += 1\n pos = end\n if vi >= len(inputs):\n raise RuntimeError( # pragma: no cover\n \"Input %r (i=%r, end=%r) is not available in\\n%r\" % (\n vi, i, end, pprint.pformat(inputs)))\n rel_end = inputs[vi][1].shape[1]\n if rel_end is None:\n raise RuntimeError( # pragma: no cover\n \"Cannot extract a specific column %r when \"\n \"one input (%r) has unknown \"\n \"dimension.\" % (i, inputs[vi]))\n end += rel_end\n else:\n for ind, inp in enumerate(inputs):\n if inp[0] == i:\n return ind, 0\n raise RuntimeError( # pragma: no cover\n \"Unable to find column name %r among names %r. \"\n \"Make sure the input names specified with parameter \"\n \"initial_types fits the column names specified in the \"\n \"pipeline to convert. This may happen because a \"\n \"ColumnTransformer follows a transformer without \"\n \"any mapped converter in a pipeline.\" % (\n i, [n[0] for n in inputs]))", "def column_index(input_file, name):\n col, com = find_columns(input_file)\n col_name = name\n contents = open(input_file, 'r').readlines()\n for line in contents:\n if com[col.index(col_name)] in line:\n line_index = contents.index(line)+1\n return line_index", "def getSymbol(symbol):\n global masterdf\n if len(symbols) > 1:\n ifirst_collumn = symbols[symbols==symbol].index[0]*7\n else:\n ifirst_collumn = 0\n return masterdf.iloc[:, ifirst_collumn:ifirst_collumn+7]", "def column(self, label):\n dis = []\n for x in self.rows:\n dis = dis + [x[self.column_labels.index(label)]]\n return dis\n # return self.rows[self.column_labels.index(label)]", "def __get_cell_index(self, x, y) -> int:\n # \"The map data, in row-major order, starting with (0,0)\"\n return x + y * self.occupancy_map.info.width", "def _get_column(self, column_or_label):\n c = column_or_label\n if isinstance(c, collections.Hashable) and c in self.column_labels:\n return self[c]\n else:\n assert len(c) == self.num_rows, 'column length mismatch'\n return c", "def get_column_index(self, colName):\n\t\treturn self._columns[colName]", "def get_index(self, row, col):\n return (row * self.cols) + col", "def _check_X_y(self, X, column):\n column_idx = None\n if isinstance(X, pd.core.frame.DataFrame):\n if isinstance(column, str):\n # get index of current column\n column_idx = X.columns.get_loc(column)\n else:\n column_idx = column\n X = X.as_matrix()\n else:\n column_idx = column\n return X, column_idx", "def find_column_index(self, columns):\n for i in range(len(columns)):\n if self.match(columns[i]):\n return i\n return None", "def columnIndexes(a):\n nrows = (a.size-2)+1\n return a[1*np.arange(nrows)[:,None] + np.arange(2)]", "def get_column(filename, column_name):\n with open(filename) as f:\n for header in f:\n columns = header.rstrip().split(\"\\t\")\n return columns.index(column_name)", "def get_source_column(self, column):\n return self._source.columns.get_loc(self._dataframe.columns[column])", "def _getFIdx(self, featureName):\n return np.where(self.featureNames == featureName)[0][0]", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def getRowColumn(N):\n N += 1\n y = int((np.sqrt(1 + 8 * N) - 1) / 2)\n b = int(N - (y**2 + y) / 2)\n if b == 0:\n return (y - 1, y - 1)\n else:\n return (y, b - 1)", "def get_index(line):\n for dummy_i in range(0,len(line) - 1):\n if line[dummy_i] !=0 and line[dummy_i] == line[dummy_i+1]:\n return dummy_i", "def get_column_index(infile, column: str, sep: str=\",\"):\n return list_from_line(infile.readline()).index(column)", "def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def get_index(y, value):\n\n for i in range(len(y)):\n if y[i] <= value:\n continue\n\n return i", "def column(self, index: int) -> List[int]:\n return [x[index - 1] for x in self.matrix]", "def find_in_column(self, column, value):\n values = [cell[column - 1] for cell in self._op.values]\n try:\n return values.index(value) + 1\n except ValueError:\n return 0", "def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol", "def d_index(self, coord):\n return coord - 1 if coord - 1 < 0 else self.dimensions - 1", "def _get_charindex(self, x, y):\r\n verts = self.shapes[0].buf[0].vertices\r\n x = x - self.x + verts[2][0]\r\n y = y - self.y + verts[0][1]\r\n nv = len(verts)\r\n for i in range(0, nv, 4):\r\n vtr = verts[i] # top right\r\n vbl = verts[i + 2] # bottom left\r\n if x >= vbl[0] and x < vtr[0] and y >= vbl[1] and y < vtr[1]:\r\n i = int(i / 4)\r\n c_i = self.c_lookup[i]\r\n if c_i == (len(self.txt) - 1) or self.c_lookup[i + 1] > c_i + 1:\r\n if (vtr[0] - x) < (x - vbl[0]):\r\n c_i += 1\r\n return c_i\r\n return len(self.txt)", "def table_column(self, i):\n return self.__column_list[i]", "def _index(orig, off):\n orig_x, orig_y = orig\n off_x, off_y = off\n return (orig_y - off_y) * self.ncols + (orig_x - off_x)", "def first_visible_column(self):\n return self.container['first_visible_column']", "def get_cell_idx(max_coord, min_coord, separator, x_current):\n lenght = max_coord - min_coord\n return max(0, min(int((x_current - min_coord) * separator / lenght), separator - 1))", "def _get_header_index(self, columnname):\n\n return self.headers.index(columnname)", "def index(a, x):\n i = bisect_left(a, x)\n if i != len(a) and a[i] == x:\n return i\n raise ValueError", "def get_image_column_row(filename):\n row, column = os.path.splitext(filename)[0][-5:].split(\"_\")\n return (int(column) - 1, int(row) - 1)", "def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx", "def trilind(x):\n return _band_part_inverted(x, 0, -1)", "def column_marker(column):\n if (column)//7 == 0:\n marker = 'x'\n elif (column)//7 == 1:\n marker = '+'\n else:\n marker = 'd'\n return marker", "def get_nearest_col(self):\n return (self.rect.left - (self.screen.get_width() // 5)) // self.maze.block_size", "def pandas_find_post_label_num(index, dataframe):\n return dataframe.at[index, 'label_number']", "def winner(self, x):\n self._activate(x)\n return unravel_index(self._activation_map.argmin(),\n self._activation_map.shape)", "def first_true(x, axis):\n nonz = x > 0\n return ((nonz.cumsum(axis) == 1) & nonz).max(axis)[1]", "def get_c_idx(self, node_idx):\n idx = (node_idx + 1) * 2 - 1\n return idx", "def column_location(self, value):\n\n # Try to use as-is\n try:\n return self._columns.index(value)\n except ValueError:\n pass\n\n # Try as integer index\n try:\n value = int(value)\n\n if value in self._columns:\n location = self._columns.index(value)\n elif value < 0:\n location = value + len(self._columns)\n else:\n location = value\n\n size = len(self._columns)\n if size == 0:\n raise IndexError(\"No columns in table\")\n\n if location >= size:\n raise IndexError(f\"Column ({location}) out of range (0..{size - 1})\")\n\n return location\n except ValueError:\n pass\n\n # No matches\n options = \", \".join(str(col) for col in self._columns)\n raise ValueError(f\"Unknown column name: {value}, current columns: {options}\")", "def get_column_offsets(self):\n offsets = [x + self.bitcell_array_inst.lx() for x in self.bitcell_array.get_column_offsets()]\n return offsets", "def _column(self, x):\n pixels = []\n for y in range(self.height):\n p = self.data[x, y]\n pixels.append(p)\n return pixels", "def findy1(yxCells,yi,numCells):\r\n yit = 0\r\n found = False\r\n while (not found) and (yit<numCells):\r\n if int(yxCells[yit,0]) == yi:\r\n found = True\r\n else:\r\n yit = yit + 1\r\n if found:\r\n y1 = yit\r\n else:\r\n y1 = -1\r\n return y1", "def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)" ]
[ "0.670367", "0.66707695", "0.65949285", "0.63676727", "0.6300657", "0.62851495", "0.6224062", "0.62041897", "0.61869335", "0.6082615", "0.6070392", "0.6063719", "0.60264426", "0.6016226", "0.59535724", "0.59106576", "0.58775485", "0.5842333", "0.5821943", "0.5775763", "0.57735604", "0.57628155", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.57215726", "0.5710119", "0.5703785", "0.5702994", "0.57003754", "0.5685365", "0.5671926", "0.5656465", "0.5656465", "0.5652494", "0.56356347", "0.56343204", "0.5630783", "0.56255555", "0.562371", "0.5623617", "0.56081486", "0.55989146", "0.55836356", "0.55726516", "0.5571996", "0.55581725", "0.55502903", "0.5540642", "0.553612", "0.5530869", "0.5524891", "0.5508014", "0.5505788", "0.5504125", "0.54911387", "0.54910237", "0.54687244", "0.5464267", "0.5462129", "0.5441496", "0.5436958", "0.5436958", "0.5417428", "0.54133224", "0.5404744", "0.5404419", "0.5403677", "0.5394553", "0.5392392", "0.5389293", "0.5382874", "0.5382519", "0.5378835", "0.5368792", "0.5367576", "0.5360309", "0.53597546", "0.53524005", "0.5348431", "0.53459954", "0.53459036", "0.53441846", "0.5340859", "0.5324643", "0.5310692", "0.5307261", "0.53035563" ]
0.84793603
0
Return a mapping of column positions and labels.
def get_labels(self) -> {int: str}: return {x: col.label for x, col in self._cols.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_columns(mapping):\n columns = []\n for name, column in mapping.items():\n columns.append(column.label(name))\n return columns", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def column_labels(self):\n return tuple(self._columns.keys())", "def get_label_map(labels):\n label_map = dict()\n for i,v in enumerate(np.ravel(labels.data)):\n if v in label_map.keys():\n label_map.get(v).append(i)\n else:\n label_map[v] = [i]\n return label_map", "def labels(self):\n \n return self.column_labels", "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def to_mapping(self, dim):\n mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_LABELS')\n for name, label, meta in zip(self.name, self.label, self.meta):\n label_table = cifti2.Cifti2LabelTable()\n for key, value in label.items():\n label_table[key] = (value[0],) + tuple(value[1])\n named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), label_table)\n mim.append(named_map)\n return mim", "def get_label_scores_mapping(labels, scores):\n return {label: scores[i] for i, label in enumerate(labels)}", "def get_mappings():\n original_dict = ClassifierDataset.get_labels()\n return dict(zip(original_dict.values(), original_dict.keys()))", "def column(self, label):\n dis = []\n for x in self.rows:\n dis = dis + [x[self.column_labels.index(label)]]\n return dis\n # return self.rows[self.column_labels.index(label)]", "def field_labels(label_row, datum_row):\n return dict(zip(label_row, datum_row))", "def _get_columns_mapping_dict():\n\n columns_mapping_dict = {}\n for original_header in COLUMN_HEADERS_MAPPER:\n new_header = COLUMN_HEADERS_MAPPER[original_header]\n columns_mapping_dict[new_header] = [original_header]\n return columns_mapping_dict", "def column_indexer(data):\n idCol = {label: index for index, label in enumerate(data.columns)}\n return idCol", "def _create_labels_and_mapping(self, labels, mapping):\n numbered_classes = list(enumerate(list(labels), start=0))\n if mapping:\n new_mapping = {number: str(mapping[label]) for number, label in numbered_classes}\n else:\n new_mapping = {number: str(label) for number, label in numbered_classes}\n new_labels = [new_mapping[numbered[0]] for numbered in numbered_classes]\n\n return new_labels, new_mapping", "def load_label_columns(self):\n with open(self.config.labels_local_path, 'r') as f:\n label_columns = yaml.safe_load(f)\n return label_columns", "def get_positions_from_labels(self, row_loc, col_loc):\n from modin.pandas.indexing import (\n is_boolean_array,\n is_list_like,\n is_range_like,\n boolean_mask_to_numeric,\n )\n\n lookups = []\n for axis, axis_loc in enumerate((row_loc, col_loc)):\n if is_scalar(axis_loc):\n axis_loc = np.array([axis_loc])\n if isinstance(axis_loc, pandas.RangeIndex):\n axis_lookup = axis_loc\n elif isinstance(axis_loc, slice) or is_range_like(axis_loc):\n if isinstance(axis_loc, slice) and axis_loc == slice(None):\n axis_lookup = axis_loc\n else:\n axis_labels = self.get_axis(axis)\n # `slice_indexer` returns a fully-defined numeric slice for a non-fully-defined labels-based slice\n # RangeIndex and range use a semi-open interval, while\n # slice_indexer uses a closed interval. Subtract 1 step from the\n # end of the interval to get the equivalent closed interval.\n if axis_loc.stop is None or not is_number(axis_loc.stop):\n slice_stop = axis_loc.stop\n else:\n slice_stop = axis_loc.stop - (\n 0 if axis_loc.step is None else axis_loc.step\n )\n axis_lookup = axis_labels.slice_indexer(\n axis_loc.start,\n slice_stop,\n axis_loc.step,\n )\n # Converting negative indices to their actual positions:\n axis_lookup = pandas.RangeIndex(\n start=(\n axis_lookup.start\n if axis_lookup.start >= 0\n else axis_lookup.start + len(axis_labels)\n ),\n stop=(\n axis_lookup.stop\n if axis_lookup.stop >= 0\n else axis_lookup.stop + len(axis_labels)\n ),\n step=axis_lookup.step,\n )\n elif self.has_multiindex(axis):\n # `Index.get_locs` raises an IndexError by itself if missing labels were provided,\n # we don't have to do missing-check for the received `axis_lookup`.\n if isinstance(axis_loc, pandas.MultiIndex):\n axis_lookup = self.get_axis(axis).get_indexer_for(axis_loc)\n else:\n axis_lookup = self.get_axis(axis).get_locs(axis_loc)\n elif is_boolean_array(axis_loc):\n axis_lookup = boolean_mask_to_numeric(axis_loc)\n else:\n axis_labels = self.get_axis(axis)\n if is_list_like(axis_loc) and not isinstance(\n axis_loc, (np.ndarray, pandas.Index)\n ):\n # `Index.get_indexer_for` works much faster with numpy arrays than with python lists,\n # so although we lose some time here on converting to numpy, `Index.get_indexer_for`\n # speedup covers the loss that we gain here.\n axis_loc = np.array(axis_loc, dtype=axis_labels.dtype)\n axis_lookup = axis_labels.get_indexer_for(axis_loc)\n # `Index.get_indexer_for` sets -1 value for missing labels, we have to verify whether\n # there are any -1 in the received indexer to raise a KeyError here.\n missing_mask = axis_lookup == -1\n if missing_mask.any():\n missing_labels = (\n axis_loc[missing_mask]\n if is_list_like(axis_loc)\n # If `axis_loc` is not a list-like then we can't select certain\n # labels that are missing and so printing the whole indexer\n else axis_loc\n )\n raise KeyError(missing_labels)\n\n if isinstance(axis_lookup, pandas.Index) and not is_range_like(axis_lookup):\n axis_lookup = axis_lookup.values\n\n lookups.append(axis_lookup)\n return lookups", "def map_clusters(labels, rows):\r\n counts = Counter(labels)\r\n mappings = {c + 1: ((counts[c] / rows) * 100) for c in sorted(counts)}\r\n\r\n return mappings", "def labels_map(self, data, labels):\n self._check_input_len(data)\n if not len(data) == len(labels):\n raise ValueError('data and labels must have the same length.')\n winmap = defaultdict(list)\n for x, l in zip(data, labels):\n winmap[self.winner(x)].append(l)\n for position in winmap:\n winmap[position] = Counter(winmap[position])\n return winmap", "def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary", "def label_map_gen(df_main):\n # Function to flatten a list of list\n flatten = lambda l: [item for sublist in l for item in sublist]\n labels = list(set(flatten([l.split(' ') for l in df_main['tags'].values])))\n\n # Create list of labels\n label_map = {l: i for i, l in enumerate(labels)}\n return label_map", "def letter_to_column(self, pos):\n column_dict = {}\n column_dict['a'] = 0\n column_dict['b'] = 1\n column_dict['c'] = 2\n column_dict['d'] = 3\n column_dict['e'] = 4\n column_dict['f'] = 5\n column_dict['g'] = 6\n column_dict['h'] = 7\n column_dict['i'] = 8\n return column_dict[pos[0]]", "def letter_to_column(self, pos):\n column_dict = {}\n column_dict['a'] = 0\n column_dict['b'] = 1\n column_dict['c'] = 2\n column_dict['d'] = 3\n column_dict['e'] = 4\n column_dict['f'] = 5\n column_dict['g'] = 6\n column_dict['h'] = 7\n column_dict['i'] = 8\n return column_dict[pos[0]]", "def letter_to_column(self, pos):\n column_dict = {}\n column_dict['a'] = 0\n column_dict['b'] = 1\n column_dict['c'] = 2\n column_dict['d'] = 3\n column_dict['e'] = 4\n column_dict['f'] = 5\n column_dict['g'] = 6\n column_dict['h'] = 7\n column_dict['i'] = 8\n return column_dict[pos[0]]", "def getColumnIndices(*args, filepath=\"CO2.tab\"):\n # idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"SEG\": 0}\n idxDict = {\"PT\": 0, \"TM\": 0, \"HG\": 0, \"VISG\": 0, \"VISHL\": 0, \"ROG\": 0, \"ROHL\": 0}\n if filepath:\n cols = tabLineToList(readFullLine(filepath, 52))\n for key in idxDict:\n idxDict[key] = cols.index(key)\n return idxDict", "def get_mtz_map_columns(self):\n coefs = any_file(self.mtz_map, force_type=\"hkl\", raise_sorry_if_errors=False)\n #the first array should contain the map coefs for the 2Fextr-DFc map\n maplabels = coefs.file_object.as_miller_arrays()[0].info().labels\n #the second array should contain the map coefs for the Fextr-DFc map\n #maplabels_diff = coefs.file_object.as_miller_arrays()[1].info().labels\n \n return \",\".join(maplabels)", "def _get_labels(self, ind):\n pass", "def get_data_labels(answer_mapping_df, column):\n labels = []\n for i in answer_mapping_df[column].columns.values:\n labels.append(answer_mapping_df.xs((column, i), level=('q_code', 'a_code'), axis=1).iloc[0,0])\n return labels", "def _pos2label(self, p, labels):\n if labels is not None:\n if p in labels.keys():\n return labels[p]\n else:\n return ''\n # raise ValueError('Fatal ERROR: no label for this position in label dictionary!')\n else:\n if p == 1:\n return 'top'\n elif p == 2:\n return 'bottom'\n elif p == 3:\n return 'left'\n elif p == 4:\n return 'right'", "def create_label_map(label_lists, trailing_piece_tag=\"X\"):\n\n label_set = set()\n for labels in label_lists:\n label_set.update(labels)\n\n label_map = {label: i for i, label in enumerate(label_set)}\n\n if trailing_piece_tag not in label_set:\n label_map[trailing_piece_tag] = len(label_set)\n return label_map", "def init_label_dict(num_classes):\n label_dict={}\n for i in range(num_classes):\n label_dict[i]=(0,0,0)\n return label_dict", "def provide_label(self):\r\n return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.label]", "def GetColumnsOption(self, data) :\n indices = [ int(x.replace(self.label, '')) for x in data.columns if self.label in x and x.replace(self.label, '')!='' ]\n return indices", "def get_column_dict(self) -> HeaderToWells:\n return self._grid.columns", "def labeled_indices(self):\n return self._labeled_indices", "def calculate_positions(self):\n return {cell: (cell.column, -cell.row) for cell in self.game.get_cells()}", "def index_by(self, column_or_label):\n column = self._get_column(column_or_label)\n index = {}\n for key, row in zip(column, self.rows):\n index.setdefault(key, []).append(row)\n return index", "def _build_labels_dict(self, label_names):\n\n for i in range(len(label_names)):\n self.labels_index[label_names[i]] = i", "def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['labels']):\n final_column += 1\n header = sheet.cell(row + header_row, final_column).value\n return [sheet.cell(row + header_row, i).value for i in\n range(col, final_column)]", "def rank_labels(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n vals = self.clf.decision_function(vec)\n # print vals.shape\n # print len(self.labelmap)\n labelvals = {}\n for idx in range(len(self.labelmap)):\n labelvals[self.labelmap[idx]] = vals[0,idx]\n sortedlabels = sorted(labelvals.items(), key=itemgetter(1),\n reverse=True)\n labels = [item[0] for item in sortedlabels]\n return labels", "def nk_table(self):\n return self.map(\"keys\", \"values\")", "def labels(self):\n return self.label2cc.keys()", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def get_lab_col_cnt(self):\n return zip(self.get_labels(), self.colors, self.data)", "def nodes_names_map(self):\n return {nd.name: nd for nd in self.nodes}", "def get_label_indices(df: DataFrame, labels: list):\n return [idx for idx, name in enumerate(df.columns) if name in labels]", "def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")", "def get_labels(label_file):\n labels = None\n with open(label_file, 'r') as infile:\n reader = csv.reader(infile)\n labels = dict((rows[0], rows[1]) for rows in reader)\n return labels", "def labels(self):\n return self.label(self.p_y_given_x)", "def nomenclatura():\n df = pd.read_csv(\"Data/nomenclatura_1.csv\", encoding = \"latin1\")\n #dict_axis = df.set_index('id').T.to_dict('list')\n dict_axis = dict( [ (i, [a,b]) for i, a,b in zip(df.id, df.latitude, df.longitude) ] )\n\n return dict_axis", "def to_mapping(self, dim):\n mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_SCALARS')\n for name, meta in zip(self.name, self.meta):\n named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta))\n mim.append(named_map)\n return mim", "def getLabelColumn(self):\n return self.getOrDefault(self.labelColumn)", "def labels(self) -> dict:\n raise NotImplementedError", "def column_order(self):\n return ((1, 2), (1, 0), (1, 1))", "def vector_columns(map, layer=None, getDict=True, **args):\n s = read_command('v.info', flags='c', map=map, layer=layer, quiet=True,\n **args)\n if getDict:\n result = dict()\n else:\n result = list()\n i = 0\n for line in s.splitlines():\n ctype, cname = line.split('|')\n if getDict:\n result[cname] = {'type': ctype, 'index': i}\n else:\n result.append(cname)\n i += 1\n\n return result", "def get_column_offsets(self):\n offsets = [x + self.bitcell_array_inst.lx() for x in self.bitcell_array.get_column_offsets()]\n return offsets", "def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)", "def get_labels(self):\n\n labels = list(self.meta_data[self.target_column])\n\n return labels", "def cat_labels(self):\n try:\n return list(self.cats.columns)\n except AttributeError:\n return []", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def labels(self):\n return self._get_labels(self.label_vector)", "def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames", "def labelpos(self):\n return self._labelpos", "def labels(self) -> pulumi.Output[Mapping[str, Any]]:\n return pulumi.get(self, \"labels\")", "def build_label_mapping(\n grouped_targeted_labels: List[Set[str]],\n nontargeted_labels: Optional[Set[str]] = None,\n) -> Dict[str, int]:\n mapping = {\n label: i + 1\n for i, label_group in enumerate(grouped_targeted_labels)\n for label in label_group\n }\n\n if nontargeted_labels:\n mapping.update({label: 0 for label in nontargeted_labels})\n\n return mapping", "def map_column_to_index(self, col):\n if col in self.column_maps:\n return\n\n # First construct the map from original ids to new ones.\n ids = pd.concat((self.train[col], self.test[col])).unique()\n n = len(ids)\n idmap = dict(itertools.izip(ids, xrange(n)))\n\n # Next use the map to convert the ids in-place.\n self.train.loc[:, col] = self.train[col].apply(lambda _id: idmap[_id])\n self.test.loc[:, col] = self.test[col].apply(lambda _id: idmap[_id])\n\n # Now swap key for value in the idmap to provide a way to convert back.\n reverse_map = {val: key for key, val in idmap.iteritems()}\n self.column_maps[col] = reverse_map", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def get_labels(self):\n\n for i in range(self.p.shape[0]):\n self.find(i)\n return self.p", "def _get_positions(self):\n position_map = dict()\n # Assumes that the positions are indexed in the order of Row-->Well-->FOV\n for well in self.wells:\n for pos in self.store[well].attrs.get('well').get('images'):\n pos_name = pos['path']\n # pos name is 'Pos_xxx'\n pos_idx = int(pos_name.split('_')[-1])\n position_map[pos_idx] = {'name': pos_name, 'well': well}\n return position_map", "def labels_data(protein_data_path, columns):\n labels = pd.read_csv(protein_data_path, sep=\"\\t\").fillna(0)\n return labels[columns].astype(int).values", "def OwnsColMap(self):\n return _hypre.HypreParMatrix_OwnsColMap(self)", "def labels_to_slugs(self):\n return {\n column_attrs[LABEL]: reserve_encoded(column_name) for\n (column_name, column_attrs) in self.items()\n }", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def lookup(self, row_labels, col_labels): # noqa: PR01, RT01, D200\n return self.default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)", "def matlabels(df, rowlabel_fn):\n return df.index.to_frame().apply(rowlabel_fn, axis=1)", "def get_categories_enumerated_key_map(self):\n return dict(enumerate([c.name for c in self.categories]))", "def create_matrix_mapping(train_mh, unk_vec_id):\n mh_index_map = {}\n matrix_idx = 0\n for vector_idx in train_mh:\n if vector_idx == unk_vec_id:\n unk_matrix_id = matrix_idx\n mh_index_map[vector_idx] = matrix_idx\n matrix_idx += 1\n return mh_index_map, unk_matrix_id", "def _get_dof_map(model: BDF) -> Dict[Tuple[int, int], int]:\n i = 0\n dof_map = {}\n spoints = []\n ps = []\n for nid, node_ref in model.nodes.items():\n if node_ref.type == 'GRID':\n for dof in range(1, 7):\n dof_map[(nid, dof)] = i\n i += 1\n for psi in node_ref.ps:\n nid_dof = (nid, int(psi))\n j = dof_map[nid_dof]\n ps.append(j)\n elif node_ref.type == 'SPOINT':\n spoints.append(node_ref)\n #dof_map[(nid, 0)] = i\n #i += 1\n else:\n raise NotImplementedError(node_ref)\n\n # we want the GRID points to be first\n assert len(spoints) == 0, spoints\n\n for nid in sorted(model.spoints.keys()):\n key = (nid, 0)\n if key not in dof_map:\n dof_map[key] = i\n i += 1\n assert len(dof_map) > 0\n return dof_map, ps", "def get_column_to_tags_mapping(\n self, config: cconfig.Config\n ) -> Optional[Dict[Any, List[str]]]:\n _ = self, config\n return None", "def provide_label(self):\n return [(k, v.shape) for k, v in self.label]", "def names(self):\n return self._names_to_cols.keys()", "def labeled_dicoms(self):\n return [sorted(self.data)[i-1][1:] for i in self.labeled]", "def from_labels_map(cls, labels_map):\n mask_index = cls()\n for index, value in iteritems(labels_map):\n mask_index[index] = CategoricalAttribute(\"label\", value)\n\n return mask_index", "def labels(self):\n return self._labels", "def plabels(self):\n return self._cache.plabels", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def add_labels(data_lists, table_labels):\n labeled_dictionary_collection = {}\n \n\n for symbol, data_list in data_lists.iteritems():\n if len(data_list) > 1:\n labeled_dictionary_collection[symbol] = dict(zip(table_labels,data_list))\n return labeled_dictionary_collection", "def _starts(self, column_labels):\n val = [self[c][0] for c in column_labels]\n starts = [0]\n values = [val]\n for i in range(1,self.num_rows):\n ival = [self[c][i] for c in column_labels ]\n if ival != val:\n starts.append(i)\n values.append(ival)\n val = ival\n return values, starts" ]
[ "0.73458505", "0.72522795", "0.68872035", "0.68740356", "0.6553106", "0.650619", "0.64447373", "0.6226804", "0.61503714", "0.60560364", "0.6047371", "0.6043159", "0.59742296", "0.5932194", "0.5919516", "0.5908463", "0.5892866", "0.5887391", "0.5864398", "0.5817711", "0.5793506", "0.5793506", "0.5793506", "0.5770052", "0.5768581", "0.57198256", "0.5710612", "0.5703075", "0.56874514", "0.568135", "0.5656913", "0.56435055", "0.56240636", "0.5587427", "0.5563365", "0.5555298", "0.5553645", "0.5553446", "0.5539496", "0.55272335", "0.5517542", "0.5517141", "0.5517141", "0.5517141", "0.5517141", "0.5517141", "0.5517141", "0.5517141", "0.5517141", "0.55165815", "0.55108273", "0.550754", "0.54999554", "0.54999554", "0.54999554", "0.54999554", "0.54999554", "0.54869795", "0.5484188", "0.5472025", "0.5470485", "0.5469144", "0.54641324", "0.54592997", "0.54525167", "0.5449935", "0.54486054", "0.54374963", "0.5429516", "0.5425183", "0.5422446", "0.5412271", "0.54057556", "0.54030865", "0.5396896", "0.536645", "0.53495425", "0.53316075", "0.53117746", "0.5311373", "0.5308233", "0.5305253", "0.5302516", "0.52872425", "0.5286039", "0.5283765", "0.5282924", "0.5281886", "0.5279067", "0.5277311", "0.527653", "0.52760607", "0.52719796", "0.52652436", "0.52639234", "0.525007", "0.5243087", "0.52423924", "0.5236346", "0.5229562" ]
0.7213927
2
Move all files of this image to the output directories defined by each column's label. Returns number of files moved.
def to_disk(self, dry_run: bool) -> int: file_counter = 0 for k, col in self._cols.items(): self._moved_cols.append(k) file_counter += col.move(dry_run=dry_run) return file_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r", "def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')", "def move(self, dry_run: bool) -> int:\n if self.label == 'ignore':\n return 0\n\n file_counter = 0\n for crop in self._content:\n if not dry_run:\n crop.move_to(self.label)\n file_counter += 1\n\n return file_counter", "def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):\n for file_no in range(len(selected_image_list)):\n shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])\n return", "def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0", "def moveFiles(outputDir, files):\n\tfor fn in files:\n\t\tshutil.move(fn, join(outputDir, getFilenameWithoutPath(fn)))", "def img2folder(idir, img_lbl_df, label=\"breed\", img=\"img_name\"):\n lbls = set(img_lbl_df[label])\n\n for img, label in zip(img_lbl_df[img], img_lbl_df[label]):\n for folder in lbls:\n if label == folder:\n try:\n shutil.move(join(idir, img), join(idir, folder, img))\n except FileNotFoundError:\n print(f\"{img} not found.\")\n pass\n\n print(\"Done\")", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def move_files(probs):\r\n path = '../brain_tiny_dataset_class/png/'\r\n for _, _, files in os.walk(path):\r\n for file in files:\r\n # Reads the ID\r\n id = file[3:-4]\r\n try:\r\n # Reads dictionary of probabilities\r\n result = probs[id]\r\n # Moves pictures in 2 folders\r\n if result['epidural'] > 0 or result['intraparenchymal'] > 0 \\\r\n or result['intraventricular'] > 0 or result['subarachnoid'] > 0 \\\r\n or result['subdural'] > 0:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/hemorrhage/' + file)\r\n else:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/healthy/' + file)\r\n except KeyError:\r\n continue", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()", "def move_images_and_list(path, final_path):\n #Lists all created folders\n directories = os.listdir(path)\n #Array that stores the path to each image\n lists = []\n #This variable will be used to give a unique name to each image\n tot_images = 0\n #Creates the path where will be stored all files\n if not os.path.exists(final_path):\n os.mkdir(final_path)\n #Iterates over each folder\n for ph in directories:\n #Iterates over each line of the generated file images.lst\n for img in open(os.path.join(path, ph, \"images.lst\")).readlines():\n \"\"\"Images are stored with a name, how many objects have and\n where it is, like this '01_0252_0067_0139_0222.jpg 1 252 67 139 222'\n so these five lines under changes the first part before '_', because\n in some cases, the command opencv_createsamples creates a same name\n to different positive images, this ensures a different name to each\n image\"\"\"\n split_space = img.split()\n split_underscore = split_space[0].split(\"_\")\n split_underscore[0] = str(tot_images)\n join_underscore = \"_\".join(split_underscore)\n join_space = \" \".join([join_underscore, *split_space[1:]])\n #Appends the new image's name to the list\n lists.append(join_space)\n #Moves each image in the folder to the final path, with a new name\n move(os.path.join(path, ph, split_space[0]),\n os.path.join(final_path, join_space.split()[0]))\n tot_images += 1\n #Writes a file withe the name of all images in the folder\n with open(os.path.join(final_path, \"images.lst\"), \"w+\") as f:\n for i in lists:\n f.write(\"\".join([i, '\\n']))\n #Removes the temporary path\n rmtree(os.path.abspath(path))\n #Name of the created file\n return \"images.lst\"", "def move_next_image(self):\r\n self.index += 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n \r\n # if 'OCT_V2' in sorting_string:\r\n # cat_string = 'Unlabelled'\r\n # else:\r\n # cat_string = \r\n \r\n for label in labels:\r\n if label not in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index])\r\n else:\r\n self.master.quit()", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def move_files(self, file_dict: Dict[str, List[str]]) -> NoReturn:\n\n for folder in file_dict:\n target_folder = os.path.join(self.out_folder, folder)\n mkdirr(target_folder)\n for file_path in file_dict[folder]:\n annotation_file_name = (\n os.path.basename(file_path)\n .replace(\"png\", \"json\")\n .replace(\"jpg\", \"json\")\n )\n annotation_file_path = os.path.join(\n self.annotation_folder, annotation_file_name\n )\n\n copy_file(file_path, os.path.join(target_folder, DATA_FOLDER))\n copy_file(\n annotation_file_path, os.path.join(target_folder, ANNOTATION_FOLDER)\n )", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def move_files(src, dst, filenames):\n for filename in filenames:\n os.rename(os.path.join(src, filename), os.path.join(dst, filename))", "def alternativeSeperation(path=\"data\"):\n path = os.path.join(path, \"val\")\n val_df = pd.read_csv(os.path.join(path, \"val_annotations.txt\"), delimiter=\"\\t\",\n header=None, index_col=0)\n val_labels = val_df.to_dict()[1]\n\n for image, label in val_labels.items():\n label_path = os.path.join(path, label)\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n shutil.move(os.path.join(os.path.join(path, \"images\"), image), label_path)", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def Move(args):\n\n parser = argparse.ArgumentParser(usage='mv [Options] sources... dest',\n description=Move.__doc__)\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '-f', '--force', dest='force', action='store_true',\n default=False,\n help='force, do not error it files already exist.')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('dest')\n\n options = parser.parse_args(args)\n\n if options.verbose:\n print('mv %s %s' % (' '.join(options.srcs), options.dest))\n\n for src in options.srcs:\n MovePath(options, src, options.dest)\n return 0", "def move_files(origin=''):\n\tpng_file_list = glob.glob(origin+'*png')\n\tif png_file_list != []:\n\t\tif not os.path.exists(origin+'positions-histograms'):\n\t\t\tos.makedirs(origin+'positions-histograms')\n\t\tfor png in png_file_list:\n\t\t\tshutil.move(str(png), origin+'positions-histograms')", "def get_output(self, output_dir=\"tools_output\"):\n\n output_dir = self.project_dir / output_dir / self.name\n # create output directory if didn't exist\n if not output_dir.exists():\n os.makedirs(output_dir)\n logger.info(f\"Created {output_dir}\")\n\n for outfile in self.output:\n outfile = self.project_dir / outfile\n if outfile.exists():\n src = os.fspath(outfile)\n dst = os.fspath(output_dir / outfile.name)\n shutil.move(src, dst)\n logger.info(f\"Moved {outfile.name} to {output_dir}\")\n else:\n msg = f\"File not found: {outfile} - did you execute run() before?\"\n logger.error(msg)\n raise FileNotFoundError(msg)", "def add_to_split_numbered(rec_dir, target, label):\n for file_name in os.listdir(rec_dir):\n path = os.path.join(rec_dir, file_name)\n if (os.path.isfile(path)):\n count = 0\n if os.path.isdir(os.path.join(target, str(label))):\n count = len([f for f in os.listdir(os.path.join(target, str(label)))])\n else:\n os.makedirs(os.path.join(target, str(label)))\n shutil.copy(path, os.path.join(target, str(label), str(count)))", "def copyFiles(img, lbl):\n if not os.path.exists(os.path.join(out_root_dir, \"images\")):\n os.makedirs(os.path.join(out_root_dir, \"images\"))\n if not os.path.exists(os.path.join(out_root_dir,\"labels\")):\n os.makedirs(os.path.join(out_root_dir, \"labels\"))\n # copy\n for i,f in enumerate(img):\n img_dstdir = os.path.join(out_root_dir, \"images\", os.path.basename(f))\n lbl_srcdir = os.path.join(in_root_dir, \"labels\", os.path.basename(f))\n lbl_dstdir = os.path.join(out_root_dir, \"labels\", os.path.basename(f))\n \n # copy images and labels\n try:\n # copy image\n copyfile(f, img_dstdir)\n except:\n print(f\"Error trying copy image file {f}\")\n \n try:\n # copy label\n copyfile(lbl_srcdir, lbl_dstdir)\n except:\n print(f\"Error trying copy label file {lbl_srcdir}\")", "def main():\n os.chdir(\"FilesToSort\")\n files = os.listdir('.')\n for file in files:\n extension_directory = file[file.find('.') + 1:]\n try:\n os.mkdir(extension_directory)\n except FileExistsError:\n pass\n shutil.move(file, extension_directory)", "def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)", "def MovieScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in movtypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(moviePath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Movies'", "def bulk_process_images(inputpath, outputpath, extension):\n\n for dirpath, dirnames, filenames in os.walk(inputpath):\n structure = os.path.join(outputpath, dirpath[len(inputpath) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dirpath, file)\n dest = os.path.join(structure, file)\n img = load_and_preprocess_image(src)\n cv2.imwrite(dest, img)", "def move_and_filter_tiles_folders(tiles_folders, classes, slides_id, cases_ids, output_folder, background_pixel_value,\n background_threshold, expected_shape, logger):\n def move_jpeg_file(inputs):\n slide_id, img_filepath = inputs[0], inputs[1]\n is_mostly_background, percent_background = is_tile_mostly_background(img_filepath=img_filepath,\n background_pixel_value=background_pixel_value,\n background_threshold=background_threshold,\n expected_shape=expected_shape)\n\n # If img considered not mostly background, move to processed folder, otherwise is discarded\n if not is_mostly_background: # copy to dest folder\n new_filepath = os.path.join(output_folder, slide_id, os.path.basename(img_filepath))\n shutil.copyfile(os.path.abspath(img_filepath),\n os.path.abspath(new_filepath))\n\n assert len(tiles_folders) == len(classes) == len(slides_id) == len(cases_ids)\n\n destination_folders = []\n for i, (tile_folder, slide_id, class_, case_id) in \\\n tqdm(enumerate(zip(tiles_folders, slides_id, classes, cases_ids)), total=len(tiles_folders)):\n new_folderpath = os.path.abspath(os.path.join(output_folder, slide_id))\n destination_folders.append(new_folderpath)\n # if destination folder already exists then folder already processed -> skip\n if os.path.exists(new_folderpath):\n continue\n os.makedirs(new_folderpath)\n\n images_filenames = [f for f in os.listdir(tile_folder) if f.endswith(('.jpeg', '.jpg', '.png', '.pt'))]\n\n # Write a summary.txt file containing all the tiles of WSI, before background discarding\n with open(os.path.join(new_folderpath, 'summary.txt'), 'w') as f:\n f.write('\\n'.join(images_filenames))\n # Write file containing label as int\n with open(os.path.join(new_folderpath, 'label.txt'), 'w') as f:\n f.write(str(class_))\n # Write file containing case id\n with open(os.path.join(new_folderpath, 'case_id.txt'), 'w') as f:\n f.write(case_id)\n\n # Add slide if within arguments of move_jpeg_file\n try:\n with futures.ThreadPoolExecutor(max_workers=N_PROCESSES) as pool:\n images_filepaths = [(slide_id, os.path.join(tile_folder, img_filename))\n for img_filename in images_filenames]\n list(pool.map(move_jpeg_file, images_filepaths))\n except (SyntaxError, ValueError) as e:\n logger.warn(' discarding %s because some image files are corrumpted: %s' % (slide_id, e))\n continue\n\n # return all destination slides folders\n return list(map(os.path.abspath, destination_folders))", "def move_calc_files(ase_obj, new_label):\n old_label = ase_obj.calc.label\n home, scratch = get_active_dirs()\n\n home_exts, scratch_exts = ['.com'], ['.log', '.chk', '.fchk']\n\n ssh = remote.connect_server(ssh=True)\n\n commands = []\n for ext in home_exts:\n command = ''.join(['mv', home, old_label, ext, ',', home, new_label, ext])\n commands.append(command)\n for ext in scratch_exts:\n command = ''.join(['mv', scratch, old_label, ext, ',', scratch, new_label, ext])\n commands.append(command)\n\n for command in commands:\n i,o,e = ssh.exec_command(command)\n\n ssh.close()", "def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)", "def order_test_set(path_to_images, path_to_csv, path_to_save_test):\n\n try:\n with open(path_to_csv, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter = ',')\n\n for i, row in enumerate(reader):\n if i == 0: # continue the first line beacuse of the header line\n continue\n\n img_name = row[-1].replace('Test/', '')\n label = row[-2]\n\n path_to_folder = os.path.join(path_to_save_test, label)\n\n if not os.path.isdir(path_to_folder): # if the dir. not exist\n os.makedirs(path_to_folder) # create the directory\n\n img_full_path = os.path.join(path_to_images, img_name)\n\n print(\"Copying \", img_full_path, \" to \", path_to_folder)\n shutil.copy(img_full_path, path_to_folder)\n\n except:\n print(\"[INFO]: Error reading csv file.\")", "def moveFiles(rootDir):\n\n homedir = os.environ['HOME']\n albumDirec = 'AlbumCoverImages'\n #Check if a directory exists\n if not os.path.isdir(os.path.join(homedir, 'Pictures', albumDirec)):\n print('AlbumCoverImages not found, trying to make...')\n os.makedirs(os.path.join(homedir, 'Pictures', albumDirec))\n \n for root, dirs, files in os.walk(rootDir, topdown=False):\n #print('testtest')\n for name in files:\n \n\n #Find image files, and move them to albumCoverImages\n #For some bullshit reason or statments won't work here, have to\n # parse this out to elif statements, ughhhh...\n \n if '.jpg' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec)))\n \n elif '.png' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.gif' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.pdf' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n\n else:\n try:\n #Use tinytag to get file metadata\n tag = TinyTag.get(os.path.join(root, name))\n artistName = tag.artist\n albumName = tag.album\n \n #TODO: Need to add more conditions\n if isinstance(artistName, str):\n artistName = artistName.replace('/', '_')\n\n elif isinstance(albumName, str):\n albumName.replace('/', '_')\n \n\n #Check if the artists directory exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName)):\n os.makedirs(os.path.join(rootDir, artistName))\n print('{0} directory made!'.format(artistName))\n \n except ValueError:\n print('ValueError with {0}'.format(root+'/'+name))\n continue\n\n except TypeError:\n print('TypeError with {0}'.format(root+'/'+name))\n continue\n\n #Check if the songs album exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName, albumName)):\n os.makedirs(os.path.join(rootDir, artistName, albumName))\n print('{0} directory made!'.format(albumName))\n \n except TypeError:\n print('TypeError with {0}! Look at album directory making.'.format(root+'/'+name))\n continue\n\n #TODO: Check if album is in artist direc, if not, move it\n\n #Check if song is in album, if not move it \n try:\n if os.path.isfile(os.path.join(rootDir, artistName, albumName, name)) == False:\n os.rename(os.path.join(root, name), os.path.join(rootDir, artistName, albumName, name))\n print('{0} moved to {1}!'.format(name, albumName))\n \n except TypeError:\n print('TypeError with file {0}! Look at line song moving'.format(root+'/'+name))\n continue\n \n #TODO: Check if this part works\n except LookupError:\n if (\".jpg\") or (\".png\") or (\".7z\") or (\"README\") or (\".zip\") in name:\n continue\n \n else:\n print('No reader support for {0}'.format(name))\n continue", "def sort_folder():\n for file in downloads_path.iterdir():\n if file.is_file():\n extension = file.suffix\n file = str(file)\n if extension in program_types:\n move_file(file, programs_path)\n elif extension in compressed_types:\n move_file(file, compressed_path)\n elif extension in doc_types:\n move_file(file, documents_path)\n elif extension in music_types:\n move_file(file, music_path)\n elif extension in video_types:\n move_file(file, video_path)\n elif extension in picture_types:\n move_file(file, pictures_path)\n else:\n move_file(file, other_path)", "def moveOutput(self,id, max_id,path,file):\n Dir_Base=path +'Submission_'\n \n for i in range(1, max_id):\n if not os.path.isdir( Dir_Base + str(i) + '/'):\n cmd=('mkdir '+ Dir_Base + str(i) + '/ >& /dev/null')\n cmd_out = runCommand(cmd) \n common.logger.debug(str(cmd_out))\n cmd='mv '+ path + file + ' ' + Dir_Base + str(max_id -1) + '/ >& /dev/null' \n \n try:\n cmd_out = runCommand(cmd) \n common.logger.debug(cmd_out)\n except:\n msg = 'no output to move for job '+str(id)\n common.logger.debug(msg)\n pass\n return", "def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def move_files(self, files: List[str], directory=\"\"):\n result = []\n for file in files:\n if directory == \"\":\n temp_file = File(file)\n new_directory = self._create_or_define(temp_file)\n origin_folder = \"\"\n else:\n new_directory = directory\n origin_folder = os.path.basename(os.path.dirname(file))\n temp_file = File(os.path.basename(file))\n\n if not file.startswith(new_directory):\n if temp_file.get_extension():\n temp_extension = \".\" + temp_file.get_extension()\n else:\n temp_extension = \"\"\n\n ordinal_number = self.check_same_objects(new_directory, temp_file)\n target_name = temp_file.get_just_name() + temp_extension\n if ordinal_number:\n formatted_ordinal_number = f\" ({ordinal_number - 1})\"\n target_name = (\n temp_file.get_just_name()\n + formatted_ordinal_number\n + temp_extension\n )\n\n if self.underscore_flag:\n target_name = target_name.replace(\" \", \"_\")\n\n new_position = os.path.join(self.directory, new_directory, target_name)\n\n file_position = os.path.join(\n self.directory, origin_folder, str(temp_file)\n )\n if file_position != os.path.join(\n self.directory,\n new_directory,\n temp_file.get_just_name() + temp_extension,\n ):\n result.append(os.path.join(origin_folder, str(temp_file)))\n self.possibilities[new_directory].files.append(temp_file)\n if not self.dry_run:\n os.rename(file_position, new_position)\n else:\n print(f\"{file_position} would be moved to {new_position}\")\n elif self.dry_run:\n print(\n f\"{file_position} won't be move since the location is the same\"\n )\n\n self.log_result(result, directory)", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def move_backups(self, name, source, destination, regex):\n files = os.listdir(source)\n pattern = re.compile(regex)\n for entry in files:\n match = pattern.match(entry)\n if match is None:\n continue\n if name == match.group(1):\n self.logger.debug('Archiving %s', entry)\n path = os.path.join(source, entry)\n result = self.os_rename(path, os.path.join(destination, entry))\n if result != 0:\n return result\n return 0", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def move(matches):\n for source in matches:\n target = matches[source]\n os.rename(source, target)", "def main():\n os.chdir('FilesToSort')\n extension_to_category = {}\n for filename in os.listdir('.'):\n if os.path.isdir(filename):\n continue\n extension = filename.split('.')[-1]\n make_subdirectories(extension, extension_to_category)\n shutil.move(filename, extension_to_category[extension])", "def batch_mover(pattern, directory=None):\n if directory is None:\n directory = Path().cwd()\n\n for i in os.scandir(directory):\n if file_check(pattern, i.name):\n pass\n # shutil.move(i.name, yeah we gotta change a lot here", "def jarvis(input_path, output_path): \n\n if not os.path.exists(f'{output_path}'):\n os.makedirs(f'{output_path}')\n\n file_list = [filename for filename in os.listdir(f'{input_path}') if '.tif' in filename]\n\n for filename in file_list:\n pathname = os.path.join(input_path, filename)\n new_name = f\"{output_path}{filename.replace('.lif - ', '_').replace('_5x-', '_')}\"\n copyfile(pathname, new_name)\n logger.info(f'{new_name}')", "def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)", "def move_fast5_files(args):\n # Create pandas dataframe with x columns.\n fast5_df = pd.DataFrame(columns=['fast5_file', 'subfolder', 'mv_command'])\n\n fast5_df['fast5_file'] = [fast5_file for fast5_file in os.listdir(READS_DIR) if fast5_file.endswith(\".fast5\")]\n fast5_df['subfolder'] = [standardise_int_length(int(i / 4000)) for i in xrange(len(fast5_df))]\n fast5_df['mv_command'] = [\"mv %s %s/\" % (fast5_file, subfolder)\n for fast5_file, subfolder in izip(fast5_df.fast5_file, fast5_df.subfolder)]\n\n subdirectories = fast5_df.subfolder.unique().tolist()\n print(subdirectories)\n for subdirectory in subdirectories:\n # Create directory\n if os.path.isdir(subdirectory):\n # If directory already exists, make sure nothing is inside\n if len(os.listdir(subdirectory)) > 0:\n sys.exit(\"Directory '%s' exists with files inside\" % subdirectory)\n else:\n os.mkdir(subdirectory)\n\n processes = (subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n for cmd in fast5_df.mv_command.tolist())\n\n # We use the islice command to split our list of mv commands into five smaller lists.\n running_processes = list(itertools.islice(processes, args.num_threads))\n while running_processes:\n for i, process in enumerate(running_processes):\n if process.poll() is not None: # Means that the process is complete!\n stdout, stderr = process.communicate() # Get the output of the completed process\n if not stderr == \"\": # Print stderr if it exists.\n print stderr\n running_processes[i] = next(processes, None)\n # Run the next number in the list.\n if running_processes[i] is None: # No more commands waiting to be processed.\n del running_processes[i] # Not a valid process.\n break\n\n return subdirectories", "def convert_dir(self) -> int:\n old_classifiers: List[Classifier] = self.get_entities_by_entity_type(\n self.pack.classifiers, FileType.OLD_CLASSIFIER\n )\n intersection_fields = self.get_classifiers_schema_intersection_fields()\n for old_classifier in old_classifiers:\n self.create_classifier_from_old_classifier(\n old_classifier, intersection_fields\n )\n self.create_mapper_from_old_classifier(old_classifier)\n\n return 0", "def merge_folders():\r\n from shutil import copyfile\r\n # Merge all folders into main folder\r\n grp_img_dir = os.listdir('Group_Test_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Test_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n new_directory = 'Group_Test_Images'+'/'+img_label\r\n \r\n try:\r\n os.makedirs(new_directory)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n \r\n file_names = os.listdir('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label)\r\n \r\n for file in file_names:\r\n copyfile('Group_Test_Images'+'/'+grp_img_folder+'/'+img_label+'/'+file, new_directory+'/'+file)", "def move_files(fname_fout, root_dir, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Find files which filename of fname_fout\n matches = []\n pattern = fname + '*'\n root_fnames = os.listdir(root_dir)\n for filename in fnmatch.filter(root_fnames, pattern):\n matches.append([filename, os.path.join(root_dir, filename)])\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n # Move files\n for cur_file in matches:\n os.renames(cur_file[1], os.path.join(dest_dir, cur_file[0]))", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def process_all_images(input_path: str, output_path: str, resized_image_shape: Tuple,transformations:List[TransformationsEnum]):\n\n output_images_path = os.path.join(output_path, \"images\")\n csv_file_path = os.path.join(output_path, \"metadata.csv\")\n\n prepare_folders(output_path, output_images_path)\n prepare_csv(csv_file_path)\n\n df = pd.read_csv(csv_file_path)\n current_id = 1 #has to check the current id in the folder or be set to 1 if none\n categories_names = list(os.listdir(input_path))\n\n encoder = LabelBinarizer()\n encoder.fit(categories_names)\n\n\n for folder_name in os.listdir(input_path):\n current_category_name = folder_name\n category_path = os.path.join(input_path, folder_name)\n images_in_category = list(Path(category_path).glob(\"*.jpg\"))\n df, current_id = process_image(\n df, current_id, encoder, current_category_name, images_in_category,output_images_path, resized_image_shape,transformations\n )\n\n df.to_csv(csv_file_path, index=False, quotechar='\"', encoding='ascii')\n\n print(\"done, processed\", len(df), \"images\")", "def main(src, dst, size):\r\n\ttrain_dst = os.path.join(dst, 'train')\r\n\ttest_dst = os.path.join(dst, 'test')\r\n\tlabel_paths = [os.path.join(src, 'labels', i) for i in os.listdir(os.path.join(src, 'labels'))]\r\n\timage_folders = [os.path.join(src, i) for i in os.listdir(src) if i != \"labels\"]\r\n\r\n\timage_paths = {}\r\n\tfor folder in image_folders:\r\n\t\timages = os.listdir(folder)\r\n\t\timage_paths[os.path.basename(folder)] = [os.path.join(folder, i) for i in images]\r\n\tif DEBUG:\r\n\t\tprint(\"image folders are : {}\".format(image_paths.keys()))\r\n\r\n\t# for each image assign its xyz coordinate\r\n\targs = []\r\n\r\n\ttrain_labels = [\"B1\", \"B2\", \"B3\", \"B5\", \"B6\"]\r\n\ttest_labels = [\"B4\"]\r\n\r\n\tfor l_p in label_paths:\r\n\t\tfolder = os.path.basename(l_p).split('_')[0]\r\n\t\tcamera = os.path.basename(l_p).split('_')[-1][0:-4]\r\n\r\n\t\timages = image_paths[folder]\r\n\t\tlabels = get_xyz_coord(l_p)\r\n\t\timages = list(filter(lambda x: os.path.basename(x).split(\"_\")[0] == camera, images))\r\n\t\tif DEBUG:\r\n\t\t\tprint(l_p, camera)\r\n\t\tfor i in images:\r\n\t\t\tindex = int(os.path.basename(i).split('_')[-1][0:-4])\r\n\t\t\tif os.path.basename(l_p)[0:2] in train_labels:\r\n\t\t\t\tdestination = os.path.join(train_dst, folder, os.path.basename(i))\r\n\t\t\telif os.path.basename(l_p)[0:2] in test_labels:\r\n\t\t\t\tdestination = os.path.join(test_dst, folder, os.path.basename(i))\r\n\t\t\telse:\r\n\t\t\t\traise ValueError\r\n\t\t\targs.append([i, destination, reorder(labels[index]), size])\r\n\r\n\tp = Pool()\r\n\tresults = list(tqdm.tqdm(p.imap(image_process, args), ascii=True, total=len(args)))\r\n\tp.close()\r\n\tp.join()\r\n\r\n\tannotations_train = edict()\r\n\tannotations_test = edict()\r\n\tfor r in results:\r\n\t\tdestination, uv_coord, depth, xyz, k = r\r\n\t\tfolder = os.path.basename(os.path.dirname(destination))\r\n\t\timage = os.path.basename(destination)\r\n\r\n\t\tif folder[0:2] in train_labels:\r\n\t\t\tannotations = annotations_train\r\n\t\telif folder[0:2] in test_labels:\r\n\t\t\tannotations = annotations_test\r\n\t\telse:\r\n\t\t\traise ValueError\r\n\r\n\t\tif folder not in annotations:\r\n\t\t\tannotations[folder] = edict()\r\n\t\t\tannotations[folder][image] = edict()\r\n\t\telse:\r\n\t\t\tannotations[folder][image] = edict()\r\n\t\tannotations[folder][image].uv_coord = uv_coord\r\n\t\tannotations[folder][image].k = k\r\n\t\tannotations[folder][image].depth = depth\r\n\t\tannotations[folder][image].xyz = xyz\r\n\r\n\twith open(os.path.join(train_dst, \"annotation.pickle\"), \"wb\") as handle:\r\n\t\tpickle.dump(annotations_train, handle)\r\n\r\n\twith open(os.path.join(test_dst, \"annotation.pickle\"), \"wb\") as handle:\r\n\t\tpickle.dump(annotations_test, handle)", "def move_files(self, download_path):\n if self.file_list is None:\n self._set_file_list()\n\n for individual_file in self.file_list:\n source_path = os.path.join(self.base_dir, individual_file)\n dest_path = os.path.join(download_path, individual_file)\n # We don't move files that don't exist\n if not os.path.exists(source_path):\n continue\n\n # Make sure the destination directory exists\n if not os.path.exists(os.path.dirname(dest_path)):\n os.makedirs(os.path.dirname(dest_path))\n if self.to_copy:\n shutil.copy(source_path, dest_path)\n else:\n os.rename(source_path, dest_path)\n return", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def change_imagens(current_folder, destination_folder, name=\"crosswalk\", qtd=0, dim=(128, 64)):\n\n img_path = [os.path.join(current_folder, file) for file in os.listdir(current_folder)]\n qtd_img = 1\n\n for img in img_path:\n img_name = os.path.split(img)[1].split(\"/\")[0]\n extension = os.path.split(img_name)[1].split(\".\")[0]\n\n new_name = name\n saved_name = new_name + \"_\" + str(qtd_img + qtd)\n print(img_name + \" -> \" + saved_name + \".jpg\")\n\n try:\n saved_folder = destination + \"/\"\n\n # carrega a imagem\n img = Image.open(current_folder + \"/\" + img_name)\n # converte a imagem (PIL) para numpy array\n imgNp = np.array(img,'uint8')\n # redimensionar a imagem\n imgNp = cv2.resize(imgNp, dim)\n\n # Cria a pasta positivas_final e salva as imagens\n pathlib.Path(saved_folder).mkdir(parents=True, exist_ok=True)\n cv2.imwrite(saved_folder + saved_name + \".jpg\", imgNp)\n\n qtd_img += 1\n\n except ValueError:\n print('.')", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def move_file(self, ctx):\n pass", "def bulk_rename_files(input_path, output_path, suffix, new_suffix):\n for dir_path, dir_names, filenames in os.walk(input_path):\n structure = os.path.join(output_path, dir_path[len(input_path) + 1:])\n for file in filenames:\n src = os.path.join(dir_path, file)\n f_name, ext = os.path.splitext(file)\n if not f_name.endswith(suffix):\n file = f_name + new_suffix + ext\n dest = os.path.join(structure, file)\n os.rename(src, dest)", "def before_process(self,data,labels):\n # JM: if integer labels are given, then create different output\n # directories for each new label\n if all(isinstance(lbl,int) for lbl in labels):\n self.batch_dirs = \\\n [os.path.join(self.output_dir,str(lbl)) for lbl in labels]\n # JM: otherwise create the same output directory for each image\n else:\n self.batch_dirs = [self.output_dir] * len(data)\n\n # create output directories if they don't already exist\n uniques = set(self.batch_dirs)\n for out_dir in uniques:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n self.batch_index = 0", "def bulk_augment_images(input_path, output_path, extension, augmentation, label_type, label_threshold=-1):\n for dir_path, dir_names, filenames in os.walk(input_path):\n structure = os.path.join(output_path, dir_path[len(input_path) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dir_path, file)\n label = get_labels([src], label_type)[0]\n if label > label_threshold:\n img = cv2.imread(src, 0)\n f_name, f_ext = os.path.splitext(file)\n if augmentation == 'flip':\n img = np.flip(img, axis=-1)\n file = f_name + \"_flipped\" + f_ext\n elif augmentation == 'original':\n file = f_name + \"_original\" + f_ext\n elif augmentation == 'rotate_crop':\n rotation = np.random.choice((-10, 10))\n img = rotate_and_crop_image(img, rotation)\n file = f_name + \"_rotated\" + f_ext\n else:\n raise ValueError(\n \"Invalid value for 'augmentation'. Value can be 'flip', 'original', 'rotate_crop, \"\n \"value was: {}\".format(augmentation))\n dest = os.path.join(structure, file)\n cv2.imwrite(dest, img)", "def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)", "def move_ocr_results(doc_dict):\n # get OCR result files from OCR output directory\n result_files = os.listdir(os.path.join(config.TOC_OCR_OUT, doc_dict['name']))\n if len(result_files) == 0:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Result files not found in {os.path.join(config.TOC_OCR_OUT, doc_dict['name'])}...\")\n\n for item in result_files:\n try:\n\n # check if does not yet exist in document root directory\n if not os.path.isfile(os.path.join(doc_dict['path'], item)):\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): Copying {os.path.join(config.TOC_OCR_OUT, doc_dict['name'], item)} to {doc_dict['path']}...\")\n\n # copy the output files if they are not in the document root directory\n shutil.copy2(src=os.path.join(config.TOC_OCR_OUT,doc_dict['name'], item), dst=doc_dict['path'])\n\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} WARNING (OCR): File {item} is already in the directory {doc_dict['path']}...\")\n except:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Failed to copy result file {item} to {doc_dict['path']}...\")", "def move_from_temp_directory(self):", "def convert_all_in_bmp(self, path, new_path):\n DbWorker.mkdir(new_path)\n for i in os.listdir(path):\n self.convert_and_save_image(path+'/'+i, new_path)", "def rename_images():\r\n grp_img_dir = os.listdir('Group_Training_Images')\r\n \r\n for grp_img_folder in grp_img_dir:\r\n image_folders = os.listdir('Group_Training_Images'+'/'+grp_img_folder)\r\n \r\n for img_label in image_folders:\r\n image_path = 'Group_Training_Images'+'/'+grp_img_folder+'/'+img_label\r\n \r\n original_file_names = os.listdir(image_path)\r\n \r\n if len(original_file_names) > 1:\r\n for idx, img in enumerate(os.listdir(image_path)):\r\n assert '.jpeg' in img or '.jpg' in img, img +' incorrect format'\r\n new_name = img_label+'_'+grp_img_folder+'_'+str(idx+1)+'.jpeg'\r\n os.rename(image_path+'/'+img, image_path+'/'+ new_name)\r\n else:\r\n assert ('.jpeg' in original_file_names[0] or \r\n '.jpg' in original_file_names[0]), original_file_names[0] +' incorrect format'\r\n new_name = img_label+'_'+grp_img_folder+'.jpeg'\r\n os.rename(image_path+'/'+original_file_names[0], image_path+'/'+ new_name)", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def move_prev_image(self):\r\n self.index -= 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n\r\n #Add Current Label\r\n if 'OCT_V2' in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index]) # change path to be out of df\r\n else:\r\n self.master.quit()", "def move(self, direction):\r\n # replace with your code\r\n row_increment = OFFSETS[direction][0]\r\n col_increment = OFFSETS[direction][1]\r\n changed = False\r\n for header in self._grid_headers[direction]:\r\n row_header = header[0]\r\n col_header = header[1]\r\n source_line = []\r\n # get the source line first\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n source_line.append(self.get_tile(row_header, col_header))\r\n row_header += row_increment\r\n col_header += col_increment\r\n # merge\r\n result_line = merge(source_line)\r\n # write the result back\r\n row_header = header[0]\r\n col_header = header[1]\r\n result_line_index = 0\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n self.set_tile(row_header, col_header, result_line[result_line_index])\r\n if result_line[result_line_index] != source_line[result_line_index]:\r\n changed = True\r\n result_line_index += 1\r\n row_header += row_increment\r\n col_header += col_increment\r\n if changed:\r\n self.new_tile()", "def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def organize_by_order(current_path):\n\tfor file in sorted(os.listdir(current_path)):\n\t\tif file != 'file_organizer.py':\n\t\t\ttry:\n\t\t\t\tos.makedirs(file[0])\n\t\t\t\tclick.echo(\"Creating a Folder\",file[0])\n\t\t\texcept:\n\t\t\t\tNone\n\t\t\tshutil.move(file,file[0])\n\t\t\tclick.secho(('Finished moving : {} to {} folder'.format(file,file[0])),fg='green')", "def rename_files(dir): #Copy files to rootdir, and rename (according to extension)\n\n #total file count (for WSJ should be 500)\n total = 0\n\n #COPY and RENAME\n print \"\\n==========================\\nCopying files from {} to {}...\\n\".format(originalDir, rootdir)\n\n for subdir, dirs, files in os.walk(dir):\n for file in files:\n if ((file != \".DS_Store\") and (file != \"CVS\") and (file[-4:] == (\".ref\"))):\n total += 1\n origName = file #saving original name for printing\n\n shutil.copy(subdir + \"/\" + file, rootdir + str(total) + extension)\n\n //os.rename(rootdir + file, rootdir + str(total) + extension)\n\n print \"\\tCopied file: {} to \\n\\t\\t {}\".format(file, rootdir)\n print \"\\t\\t\\t and renamed it to: {} \".format((str(total) + extension))\n\n print \"\\n============ Total files copied and renamed: %d ==============\\n\" % total", "def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)", "def move_images(dirSrc, dirDest):\n usedFnames = set()\n for path, dirs, files in os.walk(dirSrc):\n for fname in files:\n if re.search('\\\\.jpe?g', fname.lower()) is None:\n continue\n fnameNormalized = re.sub('\\\\.[^.]*$', '.jpg', fname)\n if fnameNormalized in usedFnames:\n print('Warning: duplicate image with the name \"' + fnameNormalized + '\".')\n fnameSrc = os.path.join(path, fname)\n fnameDest = os.path.join(dirDest, fnameNormalized)\n shutil.copy2(fnameSrc, fnameDest)", "def move_files(sim_dir, dest_dir, file_patterns):\n for f in file_patterns:\n for p in glob.glob1(sim_dir, f):\n try:\n shutil.move(os.path.join(sim_dir, p), os.path.join(dest_dir, p))\n except Exception as e:\n print(\n \"error while copy ing file from {} to {}\\n{}\".format(\n sim_dir, dest_dir, e\n )\n )", "def classify(source_name):\n maindir = os.path.dirname(__file__)\n subdir = os.path.join(maindir, source_name)\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n #for fits_file in glob.glob('*.fits')\n for fits_file in glob.glob('*.fits'):\n fits_content = fits.open(fits_file)\n try:\n if fits_content[0].header['targname'] == source_name:\n fits_content.close()\n new_name = os.path.join(subdir, fits_file)\n os.rename(fits_file, new_name)\n print 'moved file {0}'.format(fits_file)\n except KeyError:\n pass\n finally:\n fits_content.close()", "def rotate_images(data_folder, rots_per_pic):\n\n\tprint \"Rotating images...\"\n\n\t#search for images in folder iteratively\n\told_paths = []\n\tfor folder, subs, files in os.walk(data_folder):\n\t\tfor filename in files:\n\t\t\tif filename.endswith('.png') or filename.endswith('.jpg'):\n\t\t\t\told_paths.append(os.path.join(folder, filename))\n\t#sorts the paths obtained\n\told_paths.sort()\n\n\told_paths_with_sums = {}\n\n\tfor filename in old_paths:\n\t\told_paths_with_sums[filename] = 0\n\n\t#counts how many times the images were already processed \n\tnew_paths = []\n\tall_files_sum = 0\n\talready_processed_sum = 0\n\tfor filename in old_paths:\n\t\tif \"processed\" not in filename:\n\t\t\tall_files_sum = all_files_sum + 1\n\t\t\tnew_paths.append(filename)\n\t\t\tprint('File found:')\n\t\t\tprint filename\n\t\telse:\n\t\t\talready_processed_sum = already_processed_sum + 1\n\t\t\tmatching = [s for s in new_paths if ((filename.partition(\"_processed_\")[0]+\".png\")==s or (filename.partition(\"_processed_\")[0]+\".jpg\")==s)]\n\t\t\tfor i in matching:\n\t\t\t\told_paths_with_sums[i] = old_paths_with_sums[i] + 1\n\t\t\t\tif old_paths_with_sums[i] >= rots_per_pic:\n\t\t\t\t\tnew_paths.remove(i)\n\t\t\t\t\tprint('File already processed '+str(old_paths_with_sums[i])+' time(s):')\n\t\t\t\t\tprint(i)\n\t\t\t\telse:\n\t\t\t\t\tprint('File processed '+str(old_paths_with_sums[i])+' time(s):')\n\t\t\t\t\tprint(i)\n\n\tprocessed_sum = 0\n\ttoo_big_angles_sum = 0\n\tno_desc_found_sum = 0\n\tmarkers_out_of_mesh = 0\n\n\tfor current_path in new_paths:\n\t\t#rotates image as many times as needed to achieve the desired number of rotations\n\t\tfor i in range(int(rots_per_pic) - old_paths_with_sums[current_path]):\n\t\t\tpath = current_path\n\t\t\t\n\t\t\t#loads files generated by Zface if they exist and are not empty\n\t\t\tif (os.path.isfile(path+'.mesh3D') and\n\t\t\t\tos.path.isfile(path+'.mesh2D') and\n\t\t\t\tos.path.isfile(path+'.ctrl2D') and\n\t\t\t\tos.path.isfile(path+'.pars') and\n\t\t\t\tos.stat(path+'.mesh3D').st_size != 0 and\n\t\t\t\tos.stat(path+'.mesh2D').st_size != 0 and\n\t\t\t\tos.stat(path+'.ctrl2D').st_size != 0 and\n\t\t\t\tos.stat(path+'.pars').st_size != 0):\n\t\t\t\tsrc3 = np.loadtxt(path+'.mesh3D')\n\t\t\t\tsrc2 = np.loadtxt(path+'.mesh2D')\n\t\t\t\tctrl2 = np.loadtxt(path+'.ctrl2D')\n\t\t\t\tscale = np.loadtxt(path+'.pars')[0]\n\t\t\t\ttranslx = np.loadtxt(path+'.pars')[1]\n\t\t\t\ttransly = np.loadtxt(path+'.pars')[2]\n\t\t\t\tpitch = np.loadtxt(path+'.pars')[3]\n\t\t\t\tyaw = np.loadtxt(path+'.pars')[4]\n\t\t\t\troll = np.loadtxt(path+'.pars')[5]\n\n\t\t\t\t#tests wether or not initial rotation is too large\n\t\t\t\tif (abs(yaw)<radians(30) and abs(pitch)<radians(15)):\n\n\t\t\t\t\timage = data.load(path)\n\t\t\t\t\trows, cols = image.shape[0], image.shape[1]\n\n\t\t\t\t\tx = src3[:,0]\n\t\t\t\t\ty = src3[:,1]\n\t\t\t\t\tz = src3[:,2]\n\n\t\t\t\t\t#transform 3D mesh from normalized space and rotation to actual space and rotation\n\t\t\t\t\tx = x*cos(roll)+y*-sin(roll)\n\t\t\t\t\ty = x*sin(roll)+y*cos(roll)\n\t\t\t\t\tz = z\n\n\t\t\t\t\tx = x*cos(yaw)+z*sin(yaw)\n\t\t\t\t\ty = y\n\t\t\t\t\tz = x*-sin(yaw)+z*cos(yaw)\n\n\t\t\t\t\tx = x\n\t\t\t\t\ty = y*cos(pitch)+z*-sin(pitch)\n\t\t\t\t\tz = y*sin(pitch)+z*cos(pitch)\n\n\t\t\t\t\tx = x*scale+translx\n\t\t\t\t\ty = y*scale+transly\n\n\t\t\t\t\t#ortographically projects the 3D mesh to 2D (this will be our source for the Piecewise Affine Transform)\n\t\t\t\t\tsrc_cols = x\n\t\t\t\t\tsrc_rows = y\n\n\t\t\t\t\tsrc_rows, src_cols = np.meshgrid(src_rows, src_cols, sparse=True)\n\t\t\t\t\tsrc = np.dstack([src_cols.flat, src_rows.flat])[0]\n\n\t\t\t\t\t#transforms it back to normalized space\n\t\t\t\t\tx = (x-translx)/scale\n\t\t\t\t\ty = (y-transly)/scale\n\n\t\t\t\t\t#rotates it back to 0 rotation\n\t\t\t\t\tyaw = -yaw\n\t\t\t\t\tpitch = -pitch\n\t\t\t\t\troll = -roll\n\n\t\t\t\t\t#adds random rotation\n\t\t\t\t\treal_yaw = radians(random.uniform(-30, 30))\n\t\t\t\t\treal_pitch = radians(random.uniform(-15, 15))\n\t\t\t\t\treal_roll = 0\n\n\t\t\t\t\tyaw = yaw + real_yaw\n\t\t\t\t\tpitch = pitch + real_pitch\n\t\t\t\t\troll = roll + real_roll\n\n\t\t\t\t\tx = x*cos(roll)+y*-sin(roll)\n\t\t\t\t\ty = x*sin(roll)+y*cos(roll)\n\t\t\t\t\tz = z\n\n\t\t\t\t\tx = x*cos(yaw)+z*sin(yaw)\n\t\t\t\t\ty = y\n\t\t\t\t\tz = x*-sin(yaw)+z*cos(yaw)\n\n\t\t\t\t\tx = x\n\t\t\t\t\ty = y*cos(pitch)+z*-sin(pitch)\n\t\t\t\t\tz = y*sin(pitch)+z*cos(pitch)\n\n\t\t\t\t\t#transforms it back to real space\n\t\t\t\t\tx = x*scale+translx\n\t\t\t\t\ty = y*scale+transly\n\n\t\t\t\t\t#orthographic projection of new coordinates will be the destination for PiecewiseAffineTransform\n\t\t\t\t\tdst_cols = x\n\t\t\t\t\tdst_rows = y\n\t\t\t\t\tdst = np.vstack([dst_cols, dst_rows]).T\n\n\t\t\t\t\tout_rows = rows\n\t\t\t\t\tout_cols = cols\n\n\t\t\t\t\t#looks for triangles formed by Delaunay triangularion, extracts the ones associated with each facial keypoint marker\n\t\t\t\t\ttform = PiecewiseAffineTransform()\n\t\t\t\t\tsrc_triangles, dst_triangles = tform.estimate(src[:,0:2], dst)\n\t\t\t\t\tctrl2_transforms = []\n\t\t\t\t\tfor current_ctrl2 in ctrl2:\n\t\t\t\t\t\tfor i in range(len(src_triangles)):\n\t\t\t\t\t\t\ttriangle = polygon.Path(src_triangles[i])\n\t\t\t\t\t\t\tif triangle.contains_point(current_ctrl2):\n\t\t\t\t\t\t\t\tctrl2_transforms.append(tform.affines[i])\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif len(ctrl2_transforms)!=49:\n\t\t\t\t\t\tmarkers_out_of_mesh = markers_out_of_mesh + 1\n\t\t\t\t\t\tprint \"didn't process image, because can't find all shape parameters:\"\n\t\t\t\t\t\tprint path\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tout_ctrl2 = []\n\t\t\t\t\tfor i in range(len(ctrl2_transforms)):\n\t\t\t\t\t\t\t#performs transformation on marker\n\t\t\t\t\t\t\tout_ctrl2.append(ctrl2_transforms[i](ctrl2[i]))\n\t\t\t\t\tout_ctrl2 = np.transpose((np.transpose(out_ctrl2)[0],np.transpose(out_ctrl2)[1]))\n\t\t\t\t\tout_ctrl2 = np.squeeze(out_ctrl2)\n\n\t\t\t\t\t#transforms image to the new surface triangle by triangle using Delaunay triangulation, then interpolation to smooth it out\n\t\t\t\t\ttform = PiecewiseAffineTransform()\n\t\t\t\t\ttform.estimate(dst, src[:,0:2])\n\t\t\t\t\tout_image = warp(image, tform, output_shape=(out_rows, out_cols))\n\n\t\t\t\t\tout_path = path[:-4]+'_processed'+'_yaw_'+str(real_yaw)+'_pitch_'+str(real_pitch)+'_roll_'+str(real_roll)+path[-4:]\n\n\t\t\t\t\t#saves image and marker points\n\t\t\t\t\timsave(out_path, out_image)\n\n\t\t\t\t\tnp.savetxt(out_path+'_0.txt', out_ctrl2)\n\n\t\t\t\t\tprocessed_sum = processed_sum + 1\n\t\t\t\t\tprint(str(processed_sum)+'. file processed:')\n\t\t\t\t\tprint(path)\n\t\t\t\telse:\n\t\t\t\t\ttoo_big_angles_sum = too_big_angles_sum + 1\n\t\t\t\t\tprint(\"didn't process image, because of too big original rotation:\")\n\t\t\t\t\tprint(path)\n\t\t\telse:\n\t\t\t\tno_desc_found_sum = no_desc_found_sum + 1\n\t\t\t\tprint(\"didn't process image, beacuse descriptor documents not found:\")\n\t\t\t\tprint(path)\n\n\tout_paths = []\n\tfor folder, subs, files in os.walk(data_folder):\n\t\tfor filename in files:\n\t\t\tif filename.endswith('.png') or filename.endswith('.jpg'):\n\t\t\t\tif \"processed\" in filename:\n\t\t\t\t\tout_path = os.path.join(folder, filename).replace(data_folder, \"\")\n\t\t\t\t\tout_paths.append(out_path)\n\n\t#writes paths of generated images into contents\n\tfilename = data_folder+'/contents'\n\n\twith open(filename, 'w') as f:\n\t\tf.write('\\n'.join(out_paths))\n\n\tprint \"Shuffling contents...\"\n\t#shuffles contents\n\tshuffle_contents(filename)\n\n\n\t#prints some statistics about the process on the screen\n\tprint\n\tprint(\"Statistics:\")\n\tprint(\"-----------\")\n\tprint(\"Files found: \"+str(all_files_sum))\n\tif all_files_sum != 0:\n\t\tprint(\"Already processed: \"+str(already_processed_sum))\n\t\tprint(\"Got processed now: \"+str(processed_sum))\n\t\tprint(\"All processed: \"+str((processed_sum+already_processed_sum)*100/all_files_sum)+\"%\")\n\t\tprint(\"Can't be processed because of too big angles: \"+str(too_big_angles_sum*100/all_files_sum)+\"%\")\n\t\tprint(\"Can't be processed because of no decriptors: \"+str(no_desc_found_sum*100/all_files_sum)+\"%\")\n\t\tprint(\"Can't be processed because of markers outside of mesh: \"+str(markers_out_of_mesh*100/all_files_sum)+\"%\")", "def move(self, newPath):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.move(newPath)\n\t\telse:\n\t\t\tsuper( textureFile, self ).move( newPath )", "def move_files_with_extension(self, extension: str):\n\n while True:\n files_with_extension = self.collect_files_with_extensions(extension)\n print(files_with_extension)\n folders_containing = set(\n [\n os.path.basename(os.path.dirname(file))\n for file in files_with_extension\n ]\n )\n directory = input(\n f\"Files with '{extension}' extension are scattered in your folders:\\n\"\n f\" {', '.join(folders_containing)}\\n\"\n f\"Where do you want to put them?\\n\"\n f\"({', '.join(self.possibilities.keys())})\\n\"\n )\n if directory in self.possibilities:\n self.move_files(files_with_extension, directory)\n break\n else:\n print(\"Invalid Input\")", "def perform_action(self):\n errors = ErrorList()\n dest = self.cleaned_data['destination_folder']\n for item in self.cleaned_data['items']:\n path = os.path.join(self.file_dir, item)\n try:\n utility.move_items([path], self.dest_dir)\n except FileExistsError:\n errors.append(format_html(\n 'Item named <i>{}</i> already exists in <i>{}</i>',\n item, dest))\n except OSError:\n if not os.path.exists(path):\n errors.append(format_html(\n 'Item named <i>{}</i> does not exist', item))\n else:\n errors.append(format_html(\n 'Unable to move <i>{}</i> into <i>{}</i>', item, dest))\n return 'Your items have been moved', errors", "def rename_folders(self, metadata: list, transformed_metadata: dict) -> None:\n self.__write_mapping(self.__transform_mapping(transformed_metadata[\"encrypted_metadata\"]))\n for i in range(len(metadata)):\n cwd = join(getcwd(), \"input\")\n LOGGER.debug(\"Renaming {} to {}.\"\n .format(join(cwd, metadata[i][0]),\n join(cwd, transformed_metadata[\"encrypted_metadata\"][i][1])))\n rename(join(cwd, metadata[i][0]), join(cwd, transformed_metadata[\"encrypted_metadata\"][i][1]))", "def renameImages(directory):\n filenames = os.listdir(directory)\n numberOfImages = len(filenames)\n totalNumberOfDigits = math.floor(math.log10(numberOfImages)) + 1\n\n i = 1\n print('The current folder is ' + directory)\n for filename in filenames:\n currentNumberOfDigits = math.floor(math.log10(i)) + 1\n numberOfZeros = totalNumberOfDigits - currentNumberOfDigits\n print(f'renaming: {filename} to' + 'IMG_' + '0'* numberOfZeros + f'{i}.jpg')\n oldFile = os.path.join(directory, filename)\n newFile = os.path.join(directory, 'IMG_' + '0'* numberOfZeros + f'{i}.jpg')\n os.rename(oldFile, newFile)\n i += 1", "def move_files(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n data_files = project_obj.files.all()\n\n for data in data_files:\n working_dir = get_sequencedir(project_obj)\n create_dir(working_dir)\n path = data.file.name.split('/')[-1]\n end_path = os.path.join(working_dir, path)\n\n if file_exists(end_path):\n print(\"File: \", end_path, \" already found. No need to copy.\")\n else:\n try:\n print(\"Copying from %s to %s\" % (data.file.name, end_path))\n shutil.copyfile(data.file.name, end_path)\n # if somehow the user deleted the database files, they are told to restart the database\n except FileNotFoundError:\n print(\"Protected database files have been deleted by the user. Restart the database to continue.\")", "def collect_csv(source_dir, dest_dir):\n source_dir = Path(source_dir)\n dest_dir = Path(dest_dir)\n for csvfile in source_dir.rglob(\"*.csv\"):\n species = normalized_species(csvfile)\n species_dir = dest_dir / species\n species_dir.mkdir(exist_ok=True, parents=True)\n date_time = normalized_datetime(csvfile)\n print(f\"Renaming {csvfile} to {species_dir / (date_time + '.csv')}\")\n csvfile.rename(species_dir / (date_time + \".csv\"))", "def move_items(items, target_folder):\n for item in items:\n rename_file(item, os.path.join(target_folder, os.path.split(item)[-1]))", "def main(vis_dirs, outdir):\n assert len(vis_dirs) == 4\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n for i, filename in enumerate(tqdm(os.listdir(vis_dirs[-1]))):\n # if i % 100 == 0:\n # print(i)\n\n files = [os.path.join(vis_dir, filename) for vis_dir in vis_dirs]\n outimg = os.path.join(outdir, filename)\n merge_four_images(files, outimg)\n\n print (\"Finished! Result dir is %s\" % outdir)", "def main():\n # The following dictionary will allow us to map extensions to the destination folder names\n extension_to_category = {}\n os.chdir(\"FilesToSort\")\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split('.')[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into? \".format(extension))\n # Now we can map this new extension to a folder name\n extension_to_category[extension] = category\n try:\n # We don't expect to get an exception due to the if statement\n # But we'll play it safe anyway in case the user chooses an existing folder\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # We don't need a separate loop for this next step\n # We're already in a loop per file and we now know where to put it\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))", "def cleaning_this_directory():\n import os, shutil\n files = os.listdir(\".\")\n for f in files:\n if os.path.isfile(f):\n extension = f.split(\".\")[-1]\n if extension == 'jpg':\n #move the file\n os.rename(f, \"images/\"+f)\n elif extension == 'JPG':\n #move to xml file\n os.rename(f, 'xml/'+f)\n else:\n pass", "def add_to_split(rec_dir, target, label):\n for file_name in os.listdir(rec_dir):\n path = os.path.join(rec_dir, file_name)\n if (os.path.isfile(path)):\n if not os.path.isdir(os.path.join(target, str(label))):\n os.makedirs(os.path.join(target, str(label)))\n shutil.copy(path, os.path.join(target, str(label), file_name))", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def moveBigFiles(self):\n if not self.bigFilesArea:\n self.logger.info('Moving of big files to a separate volume has not been requested.')\n return\n\n self.logger.info('Moving of big files to a separate volume is requested. Scanning...')\n \n if not os.path.exists(self.bigFilesArea):\n m = 'Cannot shift big files onto inexistent volume: %s' % self.bigFilesArea\n self.logger.error(m)\n return\n \n bigFiles = self.getBigFiles()\n\n if not [val for val in bigFiles.values() if val]:\n self.logger.info('No big files were found, returning.')\n return\n \n placeToDump = createBigFileIO(self.site, self.bigFilesArea, self.workDirs, self.isNightly).getJobDumpLocation(self)\n if not placeToDump:\n m = 'Unable to retrieve location of big files volume. Not moving big files.'\n self.logger.warning(m)\n return\n\n # We have files to move, let's move them\n for bigFileBaseDir, bigFiles in bigFiles.items():\n for bigFile in bigFiles:\n src = bigFile # file\n dst = placeToDump # directory\n self.moveBigFile(src, dst)\n # If big file origin is results path, replace with a soft link\n # to separate big file volume.\n if bigFileBaseDir == self.resPath:\n self.makeReplacementKeepFile(bigFile, placeToDump)", "def seperate_dog_cat(src, dst):\n imgs = [f for f in os.listdir(src) if os.path.isfile(os.path.join(src, f)) and not f.startswith('.')]\n \n dst_dog = os.path.join(dst, 'dog')\n dst_cat = os.path.join(dst, 'cat')\n if not os.path.exists(dst_dog):\n os.makedirs(dst_dog)\n if not os.path.exists(dst_cat):\n os.makedirs(dst_cat)\n \n for img in imgs:\n if 'dog' in img:\n move(os.path.join(src, img), dst_dog)\n if 'cat' in img:\n move(os.path.join(src, img), dst_cat)\n print('seperate done')", "def do_dir(arguments):\n #print(\"Outputting in directory: \" + dsum)\n \n if not os.path.exists(arguments.file_pathout): \n os.mkdir(arguments.file_pathout)\n\n num = 0\n detected = 0\n fileCount = 0\n zero_image = 0\n bad_image = 0\n bad_image_paths = []\n\n # debug/verbose\n if arguments.v:\n print('DEBUG: shape=%g area=%g contour=%g' % (arguments.shape,arguments.area,arguments.contour))\n \n ffs = glob.glob(arguments.file_pathin+'/*.FIT') + glob.glob(arguments.file_pathin+'/*.fit') + \\\n glob.glob(arguments.file_pathin+'/*.FTS') + glob.glob(arguments.file_pathin+'/*.fts') + \\\n glob.glob(arguments.file_pathin+'/*.FITS') + glob.glob(arguments.file_pathin+'/*.fits')\n ffs = list(set(ffs)) # needed for dos\n ffs.sort() # on linux wasn't sorted, on dos it was \n f = open(arguments.file_pathout+'/summary.txt','w') # Creates summary text file\n f.write('Streaks found in files: \\n') #Creates first line for summary file\n\n sf = arguments.start_frame\n ef = arguments.end_frame\n \n if sf <= 0:\n sf = 1\n \n if ef <= 0 or ef > len(ffs):\n ef = len(ffs)\n \n if ef < sf:\n temp = sf\n sf = ef\n ef = temp\n\n print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))\n for ff in ffs[sf-1:ef]:\n # creates directory one directory back from the folder which contains fits files\n \n num = do_one(ff,arguments.file_pathout+'/'+ff[ff.rfind(os.sep)+1:ff.rfind('.')],arguments.shape,arguments.area,arguments.contour)\n \n \n if num == 0:\n zero_image += 1\n elif num < 0:\n bad_image += 1\n bad_image_paths.append(ff)\n else:\n detected += int(num) #Counter of how many streaks detected\n f.write(ff + '\\n') \n fileCount += 1 #Counter for how many files analyzed \n print(\"\\n\")\n # Produce and write summary file \n f.write('\\n' 'Files analyzed: ' + str(fileCount)+ '\\n' )\n f.write('Streaks detected: ' + str(detected) + '\\n' )\n f.write('Files with no detections: ' + str(zero_image) + '\\n')\n f.write('Bad files: ' + str(bad_image)+ '\\n')\n \n temp_string = \"\\n\"\n temp_string = temp_string.join(bad_image_paths)\n f.write(temp_string)\n \n f.write('\\n\\n')\n\n if arguments.diff:\n f.write('Streaks found in Files: \\n')\n num = 0\n detected = 0\n fileCount = 0\n zero_image = 0\n bad_image = 0\n bad_image_paths = []\n dfs = []\n# print('Computing %d differences' % (ef-sf+1))\n for i in range(len(ffs)-1):\n dfs.append(arguments.file_pathout+'/'+ffs[i+1][len(arguments.file_pathin):]+'DIFF')\n# mk_diff(ffs[i],ffs[i+1],dfs[i],v)\n \n if sf <= 0:\n sf = 1\n\n if ef <= 0 or ef > len(dfs):\n ef = len(dfs)\n \n if ef <= sf:\n temp = sf\n sf = ef\n ef = temp\n\n print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))\n i = sf-1\n for df in dfs[sf-1:ef]:\n try:\n mk_diff(ffs[i],ffs[i+1],dfs[i],arguments.v)\n # num = do_one(df,dsum+'/'+df[df.rfind(os.sep)+1:df.rfind('.')],shape,area,contour)\n #diff_file = dsum+'/'+df[df.rfind(os.sep)+1:df.find('.')]+'DIFF'\n \n #directory one directory back\n new_dir = arguments.file_pathout+'/'+df[df.rfind(os.sep)+1:df.rfind('.')]+'DIFF'\n num = do_one(df,new_dir,arguments.shape,arguments.area,arguments.contour)\n os.remove(df)\n \n except:\n num=-1\n sys.stdout.write('X')\n \n\n\n if num == 0:\n zero_image += 1\n elif num < 0:\n bad_image += 1\n bad_image_paths.append(df)\n else:\n detected += int(num) #Counter of how many streaks detected\n f.write(df + '\\n') \n fileCount += 1 #Counter for how many files analyzed \n i += 1\n print(\"\\n\")\n # Produce and write summary file \n f.write('\\n' 'Files analyzed: ' + str(fileCount)+ '\\n' )\n f.write('Streaks detected: ' + str(detected) + '\\n' )\n f.write('Files with no detections: ' + str(zero_image) + '\\n')\n f.write('Bad files: ' + str(bad_image)+ '\\n')\n\n temp_string = \"\\n\"\n temp_string = temp_string.join(bad_image_paths)\n f.write(temp_string)\n\n f.close()\n else:\n f.close()" ]
[ "0.6666721", "0.6293736", "0.61736655", "0.6053043", "0.59070665", "0.5705878", "0.56739455", "0.5666597", "0.5597771", "0.55276024", "0.55109316", "0.5431153", "0.54137915", "0.5404886", "0.5398599", "0.53864944", "0.53838104", "0.5334871", "0.532649", "0.53132355", "0.53118867", "0.52962136", "0.5263762", "0.52590597", "0.52435637", "0.5238241", "0.5237887", "0.51958823", "0.51779634", "0.51748234", "0.51738596", "0.51629806", "0.5151736", "0.5150708", "0.5142081", "0.51305723", "0.51162916", "0.5106405", "0.5098917", "0.509667", "0.5066411", "0.5059729", "0.5057985", "0.50456035", "0.5042526", "0.50421566", "0.5027476", "0.5025622", "0.5020589", "0.5017476", "0.49934447", "0.49799362", "0.49753067", "0.49690357", "0.49623895", "0.49622035", "0.4960514", "0.49577248", "0.49470633", "0.49383682", "0.49273595", "0.49222744", "0.49143443", "0.4904785", "0.4898587", "0.48898178", "0.48897272", "0.48890853", "0.48859248", "0.48795807", "0.48781142", "0.48738945", "0.48725662", "0.4865833", "0.48622766", "0.48603946", "0.48474738", "0.4846338", "0.48369467", "0.48250273", "0.4818305", "0.48128715", "0.48127562", "0.48063862", "0.48017493", "0.47968534", "0.47867298", "0.47837883", "0.47829676", "0.47779647", "0.47771227", "0.47736266", "0.4766739", "0.4762521", "0.4761347", "0.47552007", "0.47552007", "0.47551343", "0.47530505", "0.47487327" ]
0.6105557
3
Undo all former file movements.
def rollback(self) -> None: for k in self._moved_cols: self._cols[k].move_back()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(os.path.dirname(move))\r\n except OSError as e:\r\n logging.error('There was an error moving the file {}'.format(move))\r\n logging.error('Error status: {}'.format(e))\r\n logging.info(\"Completed undoing moves\")\r\n try:\r\n os.remove(self.backup)\r\n except OSError as e:\r\n logging.error('There was an error removing the file {}'.format(self.backup))\r\n logging.error('Error status: {}'.format(e))", "def undo():", "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "def undo(self):\n self._check_undo_prerequisites()\n self._decrement_history_pointer()\n self._replay_history()", "def undo(self):\n for command in reversed(self.commands):\n command.undo()", "def undo(self) :\n \n raise NotImplementedError()", "def __undo(self):\n self.__undo_controller.undo()", "def undo(self):\n self.cnvImgTest.undoLast()", "def undo():\n\n try:\n my_file.undo()\n except FileNotFoundError:\n print('No file has been read yet')\n except Exception:\n print('You must make an edit to undo')", "def _undo_action(self):\n pass", "def undo(self):\n if self._history_position > 0:\n self._history_position -= 1\n self._commands[\n self._history[self._history_position][1]\n ].execute(self._history[self._history_position][2])\n else:\n print(\"nothing to undo\")", "def undo(self):\n if self.__undo is None: # if we can not undo anymore we raise an error\n raise ControllerException(\"Error!!! Can't undo anymore!!!\\n\")\n else: # otherwise we simply do the swap from the undo list once more\n self.__scramble.swap(self.__undo[0], self.__undo[1], self.__undo[2], self.__undo[3])\n # self.__scramble.inc()\n self.__undo = None # undo becomes None because we don't want the user to do multiple undo operations", "def undo(self):\n\n if not self.can_undo():\n print(\"error: trying to undo\")\n return\n\n func = self.undo_gen(self.undo_act())\n func()\n self.position -= 1", "def onUndo(self, event):\r\n\t\tself.ActionHistory.Undo()", "def undo_settings(self):\r\n cF.undo_settings()", "def undoChanges(self):\n Objects.undoChanges(self)\n self.draw()", "def undo(self):\r\n\r\n if self.done.size() > 0:\r\n command = self.done.pop()\r\n if command[0] == 'add':\r\n uncommand = (('del'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.delete(uncommand[1],\r\n False)\r\n if command[0] == 'del':\r\n uncommand = (('add'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.addnew(uncommand[2],\r\n uncommand[3],\r\n False)\r\n if command[0] == 'move':\r\n uncommand = (('move'),\r\n command[2],\r\n command[1])\r\n self.move(uncommand[1],\r\n uncommand[2],\r\n False)\r\n self.undone.add(uncommand)", "def onUndo(self):\n pass", "def abort(self):\n for command in reversed(self.commands):\n command.undo()", "def undo(self):\n self.setIndex(self._index-1)", "def move_back(self) -> None:\n if self._file_was_moved:\n os.rename(self._new_path, self._file_path)\n pass", "def restore_last_undo_point(self):\n self.unload()", "def reset(self):\n self.source.seek(0)\n self.target.seek(0)", "def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0", "def undo_move(self):\n # general idea:\n # store the state of the board in a stack before every successful attempted move \n # when this is called, set the current board equal to the top state in the stack\n # print(\"Undo\")\n # print(self)\n # if len(self.board_states) != 0:\n if self.moves != 0:\n self.moves -= 1\n self.stock = []\n self.wp = []\n self.foundations = []\n self.tableaus = []\n self.stock, self.wp, self.foundations, self.tableaus = self.board_states.pop()\n self.init_move_dict()", "def undo(self):\n if self._snapshot_index >= 0:\n snapshot = self._snapshots[self._snapshot_index]\n for chunk_location in snapshot:\n dimension, cx, cz = chunk_location\n chunk = self._unserialise_chunk(dimension, cx, cz, -1)\n self._chunk_cache[chunk_location] = chunk\n self._snapshot_index -= 1", "def __editUndo(self):\n self.activeWindow().undo()", "def reset(self):\n self.undo_stack = Stack(self.undo_stack_size)\n self.redo_stack[:] = []\n self.not_undoable_action = False\n self.undo_in_progress = False", "def erase_files(self):\n self.ofile_handle()\n self.efile_handle()\n\n os.remove(self.ofile_name())\n os.remove(self.efile_name())\n return None", "def undo_move(self, n=1):\n self.state = self.move_history[-n - 1]\n self.positions = self.copy_board(self.state[1])\n # delete all moves between the current state and the restored state\n del self.move_history[-n:]", "def flushUndo(*args, **kwargs)->None:\n pass", "def undo(*args, **kwargs)->None:\n pass", "def revert_pristine(self):\n self.revert_all()\n self.svn_update()\n\n status = self.svn_status()\n if not status.unversionned:\n return\n # delete unversionned files\n for entry in status.unversionned:\n path = entry.path\n if os.path.isdir(path) and not os.path.islink(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n if self.svn_update() == 0:\n raise Error('Failed to reset workspace !')", "def undo_last_move(self):\n if self.last_move is None:\n return\n x, y, i, j = self.last_move\n self.boards[x][y].undo_last_move()\n if len(self.history) > 1:\n self.last_move = self.history[-2]\n else:\n self.last_move = None\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n del self.history[-1]", "def undo_transaction(self):\n transaction = self.context\n entries = transaction.entries()\n\n # check if we can undo\n if not transaction.canUndoOrReverse():\n raise AccessControl_Unauthorized('No permission to create transactionentries, or there are no entries to reverse')\n \n # force a remove from the balances and update the references\n for transactionEntry in entries:\n transactionEntry.removeTransactionEntryFromAccount()\n\n # remove transaction\n transaction.getTransactionFolder().manage_delObjects(ids=transaction.getId())", "def UndoChanges(self):\n if (len(self.alignmentHistory) > 1):\n self.alignmentHistory.pop()\n self.alignment = self.alignmentHistory[-1][:,:]\n self.Show(self.displayedColumn)\n else:\n self.AlertMessage('Nothing to undo.', 'low')", "def pre_revert(self):", "def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"", "def rollback(self):\n self.stream.seek(0)", "def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0", "def revert(self, *args, **kwargs):", "def __editRevert(self):\n self.activeWindow().revertToUnmodified()", "def undo(self):\n if not self.undo_stack:\n return\n self.begin_not_undoable_action()\n self.undo_in_progress = True\n undo_action = self.undo_stack.pop()\n self.redo_stack.append(undo_action)\n if isinstance(undo_action, self.insertclass):\n self._undo_insert(undo_action)\n elif isinstance(undo_action, self.deleteclass):\n self._undo_delete(undo_action)\n else:\n self._handle_undo(undo_action)\n self.end_not_undoable_action()\n self.undo_in_progress = False", "def undo(self, num=1):\n for i in range(num):\n super().undo()", "def undo(self):\n if (0 == len(self._undoStack)):\n raise ValueError(\"Nothing to undo\")\n else:\n self._redoStack.append(self.gameState())\n\n lastGameState = self._undoStack.pop()\n self.counter = lastGameState[\"counter\"]\n self.wonRounds = lastGameState[\"wonRounds\"]\n self.wonGames = lastGameState[\"wonGames\"]\n self.currentMaxPoints = lastGameState[\"currentMaxPoints\"]\n self.sidesChanged = lastGameState[\"sidesChanged\"]\n self.playerPositions = lastGameState[\"playerPositions\"]\n self.servePosition = lastGameState[\"servePosition\"]", "def restore(self, clean=False):\n\n for origfilename in self.filenames[:]:\n if not origfilename.endswith(\".\"+self.BACKUP_EXTENSION):\n continue\n filename = origfilename.strip(\".\"+self.BACKUP_EXTENSION)\n shutil.copy(origfilename, filename)\n self.filenames.append(filename)\n if clean:\n os.remove(origfilename)", "def undo(self):\r\n previous = self.memory.pop()\r\n if not isinstance(previous, task2.ListADT):\r\n raise TypeError(\"Did not expect any other object in memory\")\r\n if previous[0] == \"d\":\r\n index = previous[1]\r\n for i in range(len(previous)-1, 1, -1):\r\n self.text_lines.insert(index, previous[i])\r\n elif previous[0] == \"i\":\r\n start = previous[1]\r\n for j in range(previous[2]):\r\n self.text_lines.delete(start)\r\n else:\r\n raise ValueError(\"Did not expect any other action other than delete or insert\")", "def clean_files(self):\n self.filenames.clear()", "def __redo(self):\n self.__undo_controller.redo()", "def reset(self):\n self._cmd_line = 0\n self._file_line = 0", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def wipe(self):", "def wipe(self):", "def rewind(self):\n self.run_command('rewind')", "def rewind():", "def rewind(self):\n self.seek(0)", "def _restore_file(file):\n\n os.remove(file)\n os.rename(file + '.bak', file)", "def undo(self, outer_instance):\n pass", "def clean_up():\n for action in reversed(undo_actions):\n try:\n action()\n except Exception, exc:\n sys.stderr.write(\"BAD CLEANUP: Call to %s failed\\n\"\n % action.func_name)\n sys.stderr.write(\" %s\\n\" % exc)", "def reset(self):\n self.prev_obj1_position = None\n self.prev_obj2_position = None", "def rewind(self):\n self.seek(0)", "def reset(self):\n self.fscore_history = []", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def rollback(self):\n pass", "def undo():\n\n # pressing undo twice restores the original value \n global current_value, operations\n \n # solution: since there are only 2 values stored, swap\n operations[0], operations[1] = operations[1], operations[0]\n current_value = operations[-1]", "def _undo(self, action, data):\n if self.undobuffer is None:\n return\n if action == \"rot\":\n angle, degPAU = data\n self._rotate(-angle*degPAU/self._degreesPerAU)\n dummy = self.undobuffer.pop()\n elif action == \"stamp\":\n stitem = data[0]\n self.clearstamp(stitem)\n elif action == \"go\":\n self._undogoto(data)\n elif action in [\"wri\", \"dot\"]:\n item = data[0]\n self.screen._delete(item)\n self.items.remove(item)\n elif action == \"dofill\":\n item = data[0]\n self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)),\n fill=\"\", outline=\"\")\n elif action == \"beginfill\":\n item = data[0]\n self._fillitem = self._fillpath = None\n if item in self.items:\n self.screen._delete(item)\n self.items.remove(item)\n elif action == \"pen\":\n TPen.pen(self, data[0])\n self.undobuffer.pop()", "def undo(self):\n LOG.debug(\"In the undo method, will attempt to restore\")\n\n # validate detected nothing to do for this, nothing was done\n # for execute, so simply return\n if self.no_op:\n return\n\n if not self.source_dev or not self.target_dev:\n return\n LOG.debug(\"The source dictionary is: %s\", self.source_dict_restore)\n LOG.debug(\"The target dictionary is: %s\", self.target_dict_restore)\n\n # In scenario where no source IP Address...\n if self.source_dict_restore:\n self.commandex.send_ifcfg(self.source_dev,\n self.source_dict_restore)\n\n # May have failed because the ifcfg didn't even exist, nothing\n # to roll back then\n if self.target_dict_restore:\n self.commandex.send_ifcfg(self.target_dev,\n self.target_dict_restore)", "def c_undo(self):\r\n try:\r\n self.canvas.delete(self.canvas.find_all()[-1])\r\n self.update()\r\n return True\r\n except: return False", "def rollback(self):\n self._rollback = True", "def reset(self):\n # from pathlib import Path\n # import pickle as pkl\n # path_traj = Path.home() / 'TmrlData' / 'reward' / 'traj.pkl'\n # with open(path_traj, 'wb') as file_traj:\n # pkl.dump(self.traj, file_traj)\n\n self.cur_idx = 0\n self.step_counter = 0\n self.failure_counter = 0\n\n # self.traj = []", "def move_back(self) -> None:\n if self.label == 'ignore':\n return\n\n for crop in self._content:\n crop.move_back()", "def undo(self, event=None):\n if not self.segs == []:\n self.requestSegByDct((self.segs[-1].getDct() + 2) % 4)", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()\n self.reset_snippet()", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()", "def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())", "def post_revert(self):", "def revertToNormal(self, revertEffectFiles = True):\n for j in enumerate(self.inputFilesAll):\n # Load the backups of msb/luabnd files\n print(\"[Unrandomize] Reverting msb and luabnd files \" + str(j[0]) + \"/\" + str(len(self.inputFiles)))\n self.restoreBackup(self.MAPSTUDIO + j[1] + '.msb')\n self.restoreBackup('event/{0}.emevd{1}'.format(j[1], '.dcx' if self.useDCX else ''))\n \n if not (j[1] == \"m12_00_00_01\"):\n if (self.useDCX):\n self.restoreBackup(self.AISCRIPTS + j[1] + '.luabnd.dcx')\n else:\n self.restoreBackup(self.AISCRIPTS + j[1] + '.luabnd')\n\n if (revertEffectFiles):\n for iFile in self.inputFFXFiles:\n if (iFile != \"NONE\"):\n if (self.useDCX):\n self.restoreBackup(self.FFX_DIR_REMASTERED.format(iFile))\n else:\n self.restoreBackup(self.FFX_DIR.format(iFile))\n\n check_exe.restore_exe()\n\n self.revertParam()", "def rollback(self):\n if len(self.__stack) == 0:\n raise EmptyStackException()\n self.__current_pos = self.__stack[-1][0]\n self.line = self.__stack[-1][1]\n self.linePos = self.__stack[-1][2]\n self.__stack = self.__stack[:-1]", "def reset_original(self):\n self._original = [] # Empty out self._originals", "def clear_redo(self):\r\n self.command_manager.clear_redo()", "def revertInterims(self):\n for interim in self.getInterim():\n interim.revertInterim()", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def redo():", "def redo(self):\n for command in self.commands:\n command.redo()", "def reset_old_files():\n commands = [\n 'rm -f {0}/tools/perf/page_sets/url*'.format(CHROMIUM_SRC),\n 'rm -f {0}/tools/perf/page_sets/data/url*'.format(CHROMIUM_SRC),\n 'rm -f ' \\\n '{0}/tools/perf/benchmarks/telemetryBenchmarks.py'.format(CHROMIUM_SRC),\n 'rm -f data/wpr_source/*',\n 'rm -f temp/*',\n 'rm -f data/results.db',\n 'rm -f {0}/data/har/*'.format(PLT_SRC),\n 'rm -f {0}/data/replay/*'.format(PLT_SRC),\n 'rm -f {0}/webpagereplay_logs/*'.format(CHROMIUM_SRC),\n 'rm -f {0}/telemetry/count.db'.format(PLT_SRC),\n ]\n\n for cmd in commands:\n p = Popen(cmd, shell=True)\n p.wait()", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def test_move_to_trash(self):\n os.chdir(\"testimages/\")\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n filename = os.path.abspath(\"image_to_edit.jpg\")\n files = [filename]\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file = os.path.join(self.trashdir, \"image_to_edit.jpg\")\n self.assertTrue(os.path.isfile(trashed_file))\n # Repeat, to check if backing up works\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file1 = os.path.join(self.trashdir, \"image_to_edit.jpg.1\")\n self.assertTrue(os.path.isfile(trashed_file1))\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file2 = os.path.join(self.trashdir, \"image_to_edit.jpg.2\")\n self.assertTrue(os.path.isfile(trashed_file2))\n # Clear the files\n os.remove(trashed_file)\n os.remove(trashed_file1)", "def reset(self):\n self.continued = False\n self.warned = False\n self.whatifs = None\n self.tablefmt = None\n self.saved = False", "def undoPossibleBarMoves(self):\r\n for num in self.diceNumbers:\r\n if self.currentPlayer == 0:\r\n potentialPoint = num - 1\r\n else:\r\n potentialPoint = num * (-1)\r\n self.points[potentialPoint].setValidMove(False)\r\n self.points[potentialPoint].setBorder(BLACK, 1)", "def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def reset(self):\n self.previous = None\n self.state = None\n self.args = None\n self.context = None", "def unmakeMove(self, move):", "def redo(self):\n pass", "def rollback(self):\n raise NotImplementedError", "def undo_act(self):\n\n return self.history[self.position]", "def _move_current_to_previous(self, metadata_role):\n\n # Get the 'current' and 'previous' full file paths for 'metadata_role'\n metadata_filepath = metadata_role + '.txt'\n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filepath)\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filepath)\n\n # Remove the previous path if it exists.\n if os.path.exists(previous_filepath):\n os.remove(previous_filepath)\n\n # Move the current path to the previous path. \n if os.path.exists(current_filepath):\n tuf.util.ensure_parent_dir(previous_filepath)\n os.rename(current_filepath, previous_filepath)", "def rewind(f):\n\tf.seek(0)" ]
[ "0.7702794", "0.7388811", "0.7248675", "0.71586466", "0.71578836", "0.70705074", "0.69751996", "0.6805957", "0.6794466", "0.6791538", "0.6788477", "0.67617655", "0.6698153", "0.66829187", "0.6664879", "0.66583955", "0.66569996", "0.6571274", "0.65703166", "0.65516204", "0.6458982", "0.6451298", "0.6427389", "0.6409655", "0.63870406", "0.6376271", "0.63254315", "0.63005996", "0.62785554", "0.625894", "0.62464553", "0.62230664", "0.6173843", "0.61345273", "0.6115084", "0.6105772", "0.61000615", "0.6090402", "0.6083443", "0.6007545", "0.60060203", "0.59928054", "0.5947704", "0.5930141", "0.59197634", "0.58912724", "0.588541", "0.5882821", "0.5851814", "0.58343464", "0.5806273", "0.5795323", "0.5795323", "0.5787217", "0.57746226", "0.5763237", "0.57626307", "0.57357633", "0.57346845", "0.57280105", "0.57126313", "0.57016337", "0.5690927", "0.5688515", "0.567465", "0.56700796", "0.5664333", "0.5654297", "0.5631179", "0.56071764", "0.56015646", "0.55957484", "0.55793554", "0.55740476", "0.5552739", "0.555118", "0.55509675", "0.55416554", "0.55407137", "0.55323315", "0.5526638", "0.55209404", "0.550361", "0.5501008", "0.54814935", "0.54687506", "0.54683286", "0.5461682", "0.5456488", "0.54462737", "0.54460967", "0.5432923", "0.5418609", "0.5414524", "0.54098344", "0.5399765", "0.5395827", "0.53736025", "0.5372597", "0.5365138" ]
0.58613473
48
Move the file associated with this crop to the directory path/annot_type, where annot_type is this crop's annotation type.
def move_to(self, path: str) -> None: self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path)) os.rename(self._file_path, self._new_path) self._file_was_moved = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def move_file(self, ctx):\n pass", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def insert(self, file_path: str, annot_type: str) -> None:\n if self._valid_file_name_regex.match(os.path.basename(file_path)) is None:\n raise ValueError(f'Illegal file name: {os.path.basename(file_path)}')\n x_pos = get_metadata_from_filename(file_path).x_pos\n if x_pos in self._x_positions:\n col = self._cols[x_pos]\n else:\n col = Column()\n self._x_positions.append(x_pos)\n self._x_positions.sort()\n col.insert(Crop(file_path, annot_type))\n self._cols[x_pos] = col\n\n self.n_cols = len(self._cols)", "def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))", "def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r", "def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)", "def move_file(file, dest_path):\n if os.path.isdir(dest_path):\n shutil.move(file, dest_path)\n else:\n os.mkdir(dest_path)\n shutil.move(file, dest_path)", "def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def file_move(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path)}\n\n url, params, headers = self.request(\"/fileops/move\", params)\n\n return self.rest_client.POST(url, params, headers)", "def move(self, newPath):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.move(newPath)\n\t\telse:\n\t\t\tsuper( textureFile, self ).move( newPath )", "def convert_and_move_file (filename, origpath, wavpath, mp4path, mono):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav (filename, name, origpath, wavpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n oldlocation = path.join(origpath, filename)\n newlocation = path.join(mp4path, filename)\n shutil.move(oldlocation, newlocation)", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def move_file(source, destination):\n shutil.move(source, destination)", "def save(annotation, new_filename, original_path):\n \n destination = \"../../standardized-data/\"\n if os.path.isdir(destination + \"/\" + annotation) == False:\n os.mkdir(destination + \"/\" + annotation)\n print(annotation, \"FOLDER CREATED\")\n if os.path.exists(destination + \"/\" + annotation + \"/\" + new_filename):\n print('FILE EXISTS: DOUBLE CHECK FOR DUPLICATION :', new_filename)\n else:\n shutil.copyfile(original_path, destination + \"/\" + annotation + \"/\" + new_filename)\n return", "def convert_tmpfile(src_file_name:str, dest_path:str):\n src_path = os.path.join(\n current_app.config['UPLOAD_FOLDER'],\n src_file_name\n )\n if not os.path.exists(src_path):\n abort(http.HTTPStatus.BAD_REQUEST, message='raw file not exist')\n pathlib.Path(os.path.dirname(dest_path)).mkdir(parents=True, exist_ok=True)\n shutil.move(src_path, dest_path)", "def move_back(self) -> None:\n if self._file_was_moved:\n os.rename(self._new_path, self._file_path)\n pass", "def move_file(path_from, filename):\n finaldir = getormakedir(settings.UPLOAD_DEST_DIR, filename)\n\n path_to = os.path.join(finaldir, filename)\n\n if not os.path.exists(path_to):\n shutil.copyfile(path_from, path_to)\n if settings.REMOVE_UPLOAD_FILES:\n remove_file(path_from)\n\n return path_to", "def move_files(self, file_dict: Dict[str, List[str]]) -> NoReturn:\n\n for folder in file_dict:\n target_folder = os.path.join(self.out_folder, folder)\n mkdirr(target_folder)\n for file_path in file_dict[folder]:\n annotation_file_name = (\n os.path.basename(file_path)\n .replace(\"png\", \"json\")\n .replace(\"jpg\", \"json\")\n )\n annotation_file_path = os.path.join(\n self.annotation_folder, annotation_file_name\n )\n\n copy_file(file_path, os.path.join(target_folder, DATA_FOLDER))\n copy_file(\n annotation_file_path, os.path.join(target_folder, ANNOTATION_FOLDER)\n )", "def move(self, dry_run: bool) -> int:\n if self.label == 'ignore':\n return 0\n\n file_counter = 0\n for crop in self._content:\n if not dry_run:\n crop.move_to(self.label)\n file_counter += 1\n\n return file_counter", "def act_move_file(self, file_source, file_target):\n try:\n if not os.path.isfile(file_source):\n return\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.move(file_source, file_target)\n #shutil.copy2(file_source, file_target)\n #os.remove(file_source)\n self.logger.debug('%s: Action: <move> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file move: %s -> %s', file_source, file_target)", "def move(self, path):\n self.current_location = (path[1][1], path[1][0])", "def file_move(session, dc_ref, src_file, dst_file):\n LOG.debug(\"Moving file from %(src)s to %(dst)s.\",\n {'src': src_file, 'dst': dst_file})\n vim = session._get_vim()\n move_task = session._call_method(\n session._get_vim(),\n \"MoveDatastoreFile_Task\",\n vim.get_service_content().fileManager,\n sourceName=src_file,\n sourceDatacenter=dc_ref,\n destinationName=dst_file,\n destinationDatacenter=dc_ref)\n session._wait_for_task(move_task)\n LOG.debug(\"File moved\")", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def movefile(destpath,filename,sourcepath):\n\n\tcommand = 'mv ' + filename + ' ' + destpath\n\t\n\ttry :\n\t\tst = commands.getstatusoutput(command)\n\texcept Exception:\n\t\traise", "def move_character(character, dest):\n character_path = dirname(character.path)\n shutil.move(character_path, dest)", "def move_to(self, file_name, to_dir, change_name_to=None):\n raise NotImplementedError", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def write(self, filename, arcname=None, compress_type=None):\n filename = str(filename)\n if arcname is None:\n arcname = os.getcwd()\n\n shutil.copy(filename, self.folder + arcname)", "def cp_dir_or_files(self):\n if self.recursive:\n if self.cmdtype == 'upload' and not self.srcpath.endswith(os.path.sep):\n basename = os.path.basename(self.srcpath)\n self.destpath = join_obs_path(self.destpath, basename)\n elif self.cmdtype == 'download' and not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = os.path.join(self.destpath, basename)\n elif not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = join_obs_path(self.destpath, basename)", "def write(self, path):\n\n annotation = copy.deepcopy(self.annotation)\n\n for image_info in annotation['images']:\n image_info['file_name'] = os.path.relpath(image_info['file_name'],\n os.path.dirname(path))\n\n with open(path, 'w') as read_file:\n json.dump(annotation, read_file)", "def move_file_to_config(path):\n destination = str(os.path.expanduser('~')) +'/.config/hackerjobs/'\n shutil.copy(path,destination)", "def move_to_complete(metadata: Metadata):\n\n func = f\"{__name__}.move_to_complete\"\n\n metadata_updated = get_destination(metadata)\n moved = move(metadata[\"full_clipname\"], metadata_updated[\"destination\"])\n metadata_updated[\"destination\"] = moved\n\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The file was moved from: {metadata_updated['full_clipname']}\",\n )\n post_event(\n \"log_info\",\n f\"{func}\",\n f\"The file was moved to: {metadata_updated['destination']}\",\n )\n\n return metadata_updated", "def mv_file(file_name: str, path: str) -> None:\n global number_of_files\n if file_name.startswith(\".\"):\n pass\n else:\n for extensions in file_formats_list:\n if file_.endswith(extensions):\n shutil.move(desktop + \"/\" + file_, path)\n print(f\"moving {colored(file_name, 'yellow')} to {path}\")\n number_of_files += 1\n else:\n pass", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def move_completed_file(self, fName, save_loc, pattern, rename=None):\n basename = os.path.basename(fName)\n end_location = os.path.join(save_loc, basename)\n if rename:\n end_location = end_location.replace(rename, pattern)\n\n shutil.move(fName, end_location)\n\n return end_location", "def move_file_to_directory(base_path, file_name, directory_name):\n path = FileUtils.full_path\n\n full_file_path = path(base_path, file_name)\n full_dir_path = path(base_path, directory_name)\n full_new_path = path(full_dir_path, file_name)\n try:\n os.rename(full_file_path, full_new_path)\n except FileNotFoundError:\n pass\n # pass for now", "def alternativeSeperation(path=\"data\"):\n path = os.path.join(path, \"val\")\n val_df = pd.read_csv(os.path.join(path, \"val_annotations.txt\"), delimiter=\"\\t\",\n header=None, index_col=0)\n val_labels = val_df.to_dict()[1]\n\n for image, label in val_labels.items():\n label_path = os.path.join(path, label)\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n shutil.move(os.path.join(os.path.join(path, \"images\"), image), label_path)", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def move_to(self, file_name, to_dir, change_name_to=None):\n self._check_filename(file_name)\n src = posixpath.join(server_setup.LOCAL_DIR, file_name)\n file_name = file_name if change_name_to is None else change_name_to\n dest = posixpath.join(self.root, to_dir, file_name)\n print(f\"--> Moving file {src} to {dest}\")\n self._check_file_exists(dest, should_exist=False)\n self.copy(src, dest)\n self.remove(src)", "def move_file(original_path,final_path,max_attempts=30):\n assert_is_string(original_path)\n assert_is_string(final_path)\n\n attempt_counter = 0\n while attempt_counter < max_attempts:\n attempt_counter += 1\n if attempt_counter > 1:\n # Pause if something went wrong, (yt-dl is a suspect, might not be closing files?)\n time.sleep(attempt_counter)\n logging.debug(\"Attempt \"+repr(attempt_counter)+\" to move \"+repr(original_path)+\" to \"+repr(final_path))\n try:\n # Make sure output folder exists\n output_dir = os.path.dirname(final_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n assert(os.path.exists(output_dir))\n # Move file\n shutil.move(original_path, final_path)\n assert(not os.path.exists(original_path))\n assert(os.path.exists(final_path))\n return\n except WindowsError, err:\n logging.exception(err)\n logging.error(\"Failed to move file: \"+repr(original_path)+\" to \"+repr(final_path))\n continue\n # If we get here we already have an exception to re-raise\n logging.critical(\"move_file() Too many failed attempts to move a file!\")\n logging.critical(\"move_file()\"+repr(locals()))\n raise", "def move_file(self, from_path: str, to_path: str, force: bool = False) -> Dict:\n raise NotImplementedError", "def moveBy(self, **kwargs):\n\n try:\n shake = kwargs[\"fname\"].split(' ')\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n rospy.loginfo('moveBy(): pose_offset = %s' % str(pose_offset))\n\n if 'empty' in shake:\n self.locator.recognise_grid()\n x_offset = self.target_locations['D'][0]-self.locator.pose[0]\n y_offset = self.target_locations['D'][1]-self.locator.pose[1]\n pose_offset = (x_offset,y_offset,-0.05,0,0,0)\n self.locator.moveBy(offset_pose=pose_offset)\n self.baxter.no()\n else:\n self.locator.moveBy(offset_pose=pose_offset)\n self.mm.loadMenu(\"actionMenu\")", "def convert_and_move_dir (dirname, origpath, wavpath, mp4path, mono):\n print(dirname)\n origdirpath = path.join(origpath, dirname)\n wavdirpath = path.join(wavpath, dirname)\n for filename in listdir(origdirpath):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav(filename, name, origdirpath, wavdirpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n shutil.move(origdirpath, mp4path)", "def mv(path_file_folder, new_path):\n if not is_folder(new_path):\n raise DegooError(f\"mv: The target path is not a folder\")\n\n source_path = path_file_folder if is_folder(path_file_folder) else path_file_folder[:path_file_folder.rfind('/')]\n\n if source_path == new_path:\n raise DegooError(f\"mv: The target path cannot be the same as the source path\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n if isinstance(new_path, int):\n new_parent_id = new_path\n elif isinstance(new_path, str):\n new_parent_id = path_id(new_path)\n else:\n raise DegooError(f\"rm: Illegal destination folder: {new_path}\")\n\n return api.mv(file_id, new_parent_id)", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def MoveFile(path, new_path):\n try:\n RemoveFile(new_path)\n os.rename(path, new_path)\n except OSError, e:\n if e.errno != errno.ENOENT:\n raise", "def move_file_in_dir(name_file, desten):\n\n if os.path.isfile(config_tools.full_dest+name_file):\n try:\n shutil.move(config_tools.full_dest + name_file, config_tools.full_dest + desten)\n except OSError:\n print(f\"Не удалось переместить {name_file} в папку:{desten}\")\n else:\n print(f\"Файл {name_file} находиться в папке {desten}\")", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()", "def save_annotations(image2annotation, image_dir, xml_dir):\n\n for image_id in tqdm(image2annotation):\n annotations = image2annotation[image_id]\n image_name, xml_name = image_id + '.jpg', image_id + '.xml'\n image = cv2.imread(os.path.join(args.image_path, image_name))\n\n objects = []\n for annotation in annotations:\n bbox = annotation['bbox']\n class_label = annotation['class']\n obj = {class_label: {'xmin': bbox[0], 'ymin': bbox[1], 'xmax': bbox[2], 'ymax': bbox[3]}}\n objects.append(obj)\n\n # save image and xml to destination folder.\n w = os.system('cp {}/{} {}'.format(args.image_path, image_name, image_dir))\n\n xml_content = _annotation_xml(objects, image_id, image.shape)\n with open(os.path.join(xml_dir, xml_name), 'w') as f:\n f.write(xml_content)", "def save(self, path):\n (folder, filename) = os.path.split(path)\n if not filename:\n filename = _clean_filename(self.name)\n path = os.path.join(folder, filename)\n return self.image.save(path)", "def on_created(self, event):\n print(\"Created\")\n time.sleep(5)\n self.moveFile(event.src_path)", "def moveOutput(self,id, max_id,path,file):\n Dir_Base=path +'Submission_'\n \n for i in range(1, max_id):\n if not os.path.isdir( Dir_Base + str(i) + '/'):\n cmd=('mkdir '+ Dir_Base + str(i) + '/ >& /dev/null')\n cmd_out = runCommand(cmd) \n common.logger.debug(str(cmd_out))\n cmd='mv '+ path + file + ' ' + Dir_Base + str(max_id -1) + '/ >& /dev/null' \n \n try:\n cmd_out = runCommand(cmd) \n common.logger.debug(cmd_out)\n except:\n msg = 'no output to move for job '+str(id)\n common.logger.debug(msg)\n pass\n return", "def move_file_on_datastore(content, datastore_name, datacenter_name, source, destination):\n datacenter = get_obj(content, [vim.Datacenter], datacenter_name)\n datastore = get_obj(content, [vim.Datastore], datastore_name)\n task = vim.FileManager.MoveDatastoreFile_Task(\n content.fileManager,\n '[{0}] {1}'.format(datastore_name, source),\n datacenter,\n '[{0}] {1}'.format(datastore_name, destination),\n datacenter,\n True\n )\n wait_for_task(task)", "def move(self, dest_fqpath):\n ret = move_file(self._host, self._fqpath, dest_fqpath)\n\n if ret:\n # TODO: change this to use a setter/getter for heavy lifting once\n # and can reset everything from one place\n self._previous_fqpath = self._fqpath\n self._fqpath = dest_fqpath\n\n return True\n\n return False", "def cleaning_this_directory():\n import os, shutil\n files = os.listdir(\".\")\n for f in files:\n if os.path.isfile(f):\n extension = f.split(\".\")[-1]\n if extension == 'jpg':\n #move the file\n os.rename(f, \"images/\"+f)\n elif extension == 'JPG':\n #move to xml file\n os.rename(f, 'xml/'+f)\n else:\n pass", "def move_to_dest(self):\n destpath = self._move_to_dest(self.outfile, self.tempdir, self.destdir)\n\n if destpath:\n self.all_files.append(destpath)\n download_logger.info('Completed {}'.format(destpath))\n\n with self._lock:\n self.outfile = \"\"", "def move_delete(dir_path, filename):\n # Get path, name from filename\n path, name = os.path.split(filename)\n # Normalize with destination considerations\n nf = os.path.join(dir_path, increment_file_number(dir_path, name))\n move_file(filename, nf)", "def move_files(probs):\r\n path = '../brain_tiny_dataset_class/png/'\r\n for _, _, files in os.walk(path):\r\n for file in files:\r\n # Reads the ID\r\n id = file[3:-4]\r\n try:\r\n # Reads dictionary of probabilities\r\n result = probs[id]\r\n # Moves pictures in 2 folders\r\n if result['epidural'] > 0 or result['intraparenchymal'] > 0 \\\r\n or result['intraventricular'] > 0 or result['subarachnoid'] > 0 \\\r\n or result['subdural'] > 0:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/hemorrhage/' + file)\r\n else:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/healthy/' + file)\r\n except KeyError:\r\n continue", "def move_from_temp_directory(self):", "def move_to_folder(self):\n if \"moveToFolder\" in self._prop_dict:\n return self._prop_dict[\"moveToFolder\"]\n else:\n return None", "def post(self):\n services.file.move_files(**request.json)\n return {\n \"status\": True\n }", "def move_class(token_stream, parse_tree, args):\n move_class_listener = MoveClassRefactoringListener(\n common_token_stream=token_stream, source_package=source_package, target_package=target_package,\n class_identifier=class_identifier, filename=args.file, dirname=directory\n )\n walker = ParseTreeWalker()\n walker.walk(t=parse_tree, listener=move_class_listener)\n\n with open(args.file, mode='w', newline='') as f:\n f.write(move_class_listener.token_stream_rewriter.getDefaultText().replace(\"\\r\", \"\"))", "def get_out_path(in_path, filetype):\n if filetype == \"yaml\":\n out_dir = check_for_yaml_folder(in_path)\n if out_dir:\n filename, _ = os.path.splitext(os.path.basename(in_path))\n out_path = os.path.join(out_dir, filename)\n else:\n filename, _ = os.path.splitext(os.path.abspath(in_path))\n out_path = filename\n elif filetype == \"json\":\n out_dir = check_for_json_folder(in_path)\n if out_dir:\n filename, _ = os.path.splitext(os.path.basename(in_path))\n out_path = os.path.join(out_dir, filename)\n else:\n filename, _ = os.path.splitext(os.path.abspath(in_path))\n out_path = filename\n else:\n if check_if_plist(in_path):\n out_path = in_path + \".yaml\"\n else:\n print(\"\\nERROR: File is not PLIST, JSON or YAML format.\\n\")\n usage()\n exit(1)\n return out_path", "def upload_file(self, file_path, file_name, output_path):", "def trash_file(file_to_trash, document_name) :\n dtpo_log('debug', \"trash_file file -> %s\", file_to_trash)\n\n source = Config.config.get_source_directory() + '/' + file_to_trash\n destination = Config.config.get_trash_directory() + '/' + document_name\n\n os.rename(source, destination)", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)", "def move_file(self, old_file: str, new_sub_dir: str):\n full_old_path = os.path.join(self.root, old_file)\n full_new_path = os.path.join(self.root, new_sub_dir, old_file)\n os.rename(full_old_path, full_new_path)", "def classify(source_name):\n maindir = os.path.dirname(__file__)\n subdir = os.path.join(maindir, source_name)\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n #for fits_file in glob.glob('*.fits')\n for fits_file in glob.glob('*.fits'):\n fits_content = fits.open(fits_file)\n try:\n if fits_content[0].header['targname'] == source_name:\n fits_content.close()\n new_name = os.path.join(subdir, fits_file)\n os.rename(fits_file, new_name)\n print 'moved file {0}'.format(fits_file)\n except KeyError:\n pass\n finally:\n fits_content.close()", "def move(copy_command: str = \"cp\") -> None:\n run_name = \"010\"\n final_loc = cst.FINAL_LOC\n fig_prefix = os.path.join(cst.FIGURE_PATH, \"RUN_\" + run_name)\n name_dict = {\n fig_prefix + \"_pc_map\" + cst.FIGURE_TYPE: \"figure-1\" + cst.FIGURE_TYPE,\n fig_prefix + \"_s3d_clusters\" + cst.FIGURE_TYPE: \"figure-2\" + cst.FIGURE_TYPE,\n fig_prefix + \"_i_metric_dual\" + cst.FIGURE_TYPE: \"figure-3\" + cst.FIGURE_TYPE,\n fig_prefix + \"_map_i_comp\" + cst.FIGURE_TYPE: \"figure-4\" + cst.FIGURE_TYPE,\n fig_prefix + \"_profiles\" + cst.FIGURE_TYPE: \"figure-5\" + cst.FIGURE_TYPE,\n fig_prefix + \"_y_sobel\" + cst.FIGURE_TYPE: \"figure-6\" + cst.FIGURE_TYPE,\n fig_prefix + \"_i_metric_comp\" + cst.FIGURE_TYPE: \"figure-7\" + cst.FIGURE_TYPE,\n fig_prefix + \"_i_metric_single\" + cst.FIGURE_TYPE: \"figure-8\" + cst.FIGURE_TYPE,\n fig_prefix + \"_pc_y_sobel_comp\" + cst.FIGURE_TYPE: \"figure-A1\" + cst.FIGURE_TYPE,\n fig_prefix + \"_pc_y_sobel_corr\" + cst.FIGURE_TYPE: \"figure-A2\" + cst.FIGURE_TYPE,\n fig_prefix + \"_pc_x_sobel_comp\" + cst.FIGURE_TYPE: \"figure-A3\" + cst.FIGURE_TYPE,\n fig_prefix + \"_pc_x_sobel_corr\" + cst.FIGURE_TYPE: \"figure-A4\" + cst.FIGURE_TYPE,\n fig_prefix + \"_mean_plot\" + cst.FIGURE_TYPE: \"figure-B1\" + cst.FIGURE_TYPE,\n fig_prefix + \"_pca_real_space_plot\" + cst.FIGURE_TYPE: \"figure-B2\" + cst.FIGURE_TYPE,\n }\n for key in name_dict:\n os.system(\n copy_command + \" \" + key + \" \" + os.path.join(final_loc, name_dict[key])\n )", "def export_to(self, subdir, filetype='pdf'):\n file = self.partcode + '.' + filetype\n path = self.export_dir.joinpath(subdir).joinpath(file)\n print(str(path))\n self.doc.SaveAs(str(path), True)", "def move(self, target):\n if target.relto(self):\n raise error.EINVAL(target, \"cannot move path into a subdirectory of itself\")\n try:\n self.rename(target)\n except error.EXDEV: # invalid cross-device link\n self.copy(target)\n self.remove()", "def crop_img(img_path, lbl_path, output_path):\n img_file_list = os.listdir(img_path)\n lbl_file_list = os.listdir(lbl_path)\n\n img_endswith = img_file_list[0].split(\".\")[-1]\n cnt = 1\n for lbl_file in lbl_file_list:\n coords = get_coords_from_label(os.path.join(lbl_path, lbl_file))\n for coord in coords:\n xmin, ymin, xmax, ymax = coord\n\n img = cv2.imread(os.path.join(img_path, lbl_file.replace(\"xml\", img_endswith)))\n cropped = img[ymin:ymax, xmin:xmax]\n\n output_file = os.path.join(output_path, \"{}.{}\".format(cnt, img_endswith))\n cv2.imwrite(output_file, cropped)\n cnt += 1", "def move_file(host, source_fqpath, dest_fqpath):\n command = \"mv %s %s\" % (source_fqpath, dest_fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('mv failed: %s' % rerr)\n return False", "def moveImage(image, dest):\n if not os.path.exists(dest):\n os.mkdir(dest)\n move(image, dest)", "def move_ocr_results(doc_dict):\n # get OCR result files from OCR output directory\n result_files = os.listdir(os.path.join(config.TOC_OCR_OUT, doc_dict['name']))\n if len(result_files) == 0:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Result files not found in {os.path.join(config.TOC_OCR_OUT, doc_dict['name'])}...\")\n\n for item in result_files:\n try:\n\n # check if does not yet exist in document root directory\n if not os.path.isfile(os.path.join(doc_dict['path'], item)):\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): Copying {os.path.join(config.TOC_OCR_OUT, doc_dict['name'], item)} to {doc_dict['path']}...\")\n\n # copy the output files if they are not in the document root directory\n shutil.copy2(src=os.path.join(config.TOC_OCR_OUT,doc_dict['name'], item), dst=doc_dict['path'])\n\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} WARNING (OCR): File {item} is already in the directory {doc_dict['path']}...\")\n except:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Failed to copy result file {item} to {doc_dict['path']}...\")", "def cut_paste(src_path, dst_path):\n shutil.move(src_path, dst_path)\n return True", "def save_annotated_image(self, file: Path) -> None:\n pass", "def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)", "def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):\n for file_no in range(len(selected_image_list)):\n shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])\n return", "def organize_by_order(current_path):\n\tfor file in sorted(os.listdir(current_path)):\n\t\tif file != 'file_organizer.py':\n\t\t\ttry:\n\t\t\t\tos.makedirs(file[0])\n\t\t\t\tclick.echo(\"Creating a Folder\",file[0])\n\t\t\texcept:\n\t\t\t\tNone\n\t\t\tshutil.move(file,file[0])\n\t\t\tclick.secho(('Finished moving : {} to {} folder'.format(file,file[0])),fg='green')", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def moveFile(sourceFullPath,targetDir):\n\n thisFunc = inspect.currentframe().f_code.co_name\n try:\n shutil.move(sourceFullPath,targetDir)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def mv(self, source: str, filename: str) -> None:\n\n self.cp(source, filename)\n self.rm(source)", "def move_file(src, dst):\n # Sanity checkpoint\n src = re.sub('[^\\w/\\-\\.\\*]', '', src)\n dst = re.sub('[^\\w/\\-\\.\\*]', '', dst)\n if len(re.sub('[\\W]', '', src)) < 5 or len(re.sub('[\\W]', '', dst)) < 5:\n debug.log(\"Error: Moving file failed. Provided paths are invalid! src='%s' dst='%s'\"%(src, dst))\n else:\n # Check destination\n check = False\n if dst[-1] == '/':\n if os.path.exists(dst):\n check = True # Valid Dir\n else:\n debug.log(\"Error: Moving file failed. Destination directory does not exist (%s)\"%(dst)) #DEBUG\n elif os.path.exists(dst):\n if os.path.isdir(dst):\n check = True # Valid Dir\n dst += '/' # Add missing slash\n else:\n debug.log(\"Error: Moving file failed. %s exists!\"%dst)\n elif os.path.exists(os.path.dirname(dst)):\n check = True # Valid file path\n else:\n debug.log(\"Error: Moving file failed. %s is an invalid distination!\"%dst)\n if check:\n # Check source\n files = glob.glob(src)\n if len(files) != 0:\n debug.log(\"Moving File(s)...\", \"Move from %s\"%src, \"to %s\"%dst)\n for file_ in files:\n # Check if file contains invalid symbols:\n invalid_chars = re.findall('[^\\w/\\-\\.\\*]', os.path.basename(file_))\n if invalid_chars:\n debug.graceful_exit((\"Error: File %s contains invalid \"\n \"characters %s!\"\n )%(os.path.basename(file_), invalid_chars))\n continue\n # Check file exists\n if os.path.isfile(file_):\n debug.log(\"Moving file: %s\"%file_)\n shutil.move(file_, dst)\n else:\n debug.log(\"Error: Moving file failed. %s is not a regular file!\"%file_)\n else: debug.log(\"Error: Moving file failed. No files were found! (%s)\"%src)", "def upload_path(self, **kwargs):\n\n # Files not uploaded , skip\n if not (path := kwargs.get('path')):\n return\n\n if not os.path.exists(path):\n return\n\n with open(path, 'rb') as file:\n self.system()\n\n # Remove content type for files\n self.headers.pop('content-type', None)\n files = {'file': file}\n params = {'url': self.URL_POST_FILE, 'files': files}\n response = self.make_request(method='post', **params)\n response.update({'type': kwargs.get('type', AttachmentHelper.TYPE_OTHER)})\n\n # Remove local\n if kwargs.get('delete', True, ):\n os.remove(path)\n\n return self._save(response=response)", "def MovePath(options, src, dst):\n # if the destination is not an existing directory, then overwrite it\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n\n # If the destination exists, the remove it\n if os.path.exists(dst):\n if options.force:\n Remove(['-vfr', dst])\n if os.path.exists(dst):\n raise OSError('mv: FAILED TO REMOVE ' + dst)\n else:\n raise OSError('mv: already exists ' + dst)\n for _ in range(5):\n try:\n os.rename(src, dst)\n break\n except OSError as error:\n print('Failed on %s with %s, retrying' % (src, error))\n time.sleep(5)\n else:\n print('Gave up.')\n raise OSError('mv: ' + error)", "def move_files(origin=''):\n\tpng_file_list = glob.glob(origin+'*png')\n\tif png_file_list != []:\n\t\tif not os.path.exists(origin+'positions-histograms'):\n\t\t\tos.makedirs(origin+'positions-histograms')\n\t\tfor png in png_file_list:\n\t\t\tshutil.move(str(png), origin+'positions-histograms')", "def move_prepfile_in_assigned(self, data):\r\n conf = self.func.config_info()\r\n folder_name = self.bid_folder_name() \r\n\r\n if \"Responsible\" in list(data.keys()):\r\n prep_files = os.listdir(conf[\"path_to_batches_prepfiles\"])\r\n if folder_name in prep_files:\r\n #Move folder from prepared to assgned\r\n src = os.path.join(conf[\"path_to_batches_prepfiles\"], folder_name)\r\n dst = os.path.join(conf[\"path_to_batches_assigned\"], folder_name)\r\n self.func.move_folder(src, dst)\r\n #Copy front end macro to assigned dir\r\n dir_feli = os.listdir(conf[\"path_to_frontend\"])\r\n dir_feli = [f for f in dir_feli if f.endswith('.xlsm')]\r\n feFile = [f for f in dir_feli if 'BETA' not in f.upper()][0]\r\n fe_macro_path = os.path.join(conf[\"path_to_frontend\"], feFile)\r\n fe_newpath = os.path.join(dst, \"_{} {}\".format(folder_name, feFile))\r\n self.func.copy_file(fe_macro_path, fe_newpath)\r\n \r\n if not self.func.folder_exists(dst):\r\n raise Exception(\"Folder {} not moved in '3 ASSIGNED'!\".format(folder_name))\r\n else:\r\n raise Exception(\"Folder {} not found in '2 PREPARED FILES'!\".format(folder_name))", "def move(self, new_path):\n assert isinstance(new_path, str)\n if not new_path.startswith('/'):\n new_path = '/' + new_path\n if new_path.endswith('/'):\n self.filename = new_path + self.name\n else:\n try:\n self.items.get(filepath=new_path, is_dir=True)\n self.filename = new_path + '/' + self.name\n except exceptions.NotFound:\n self.filename = new_path\n\n return self.update(system_metadata=True)", "def moveTo(self, **kwargs):\n\n try:\n colour = kwargs[\"fname\"]\n position = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n rospy.loginfo('moveTo(): new position = %s' % str(position))\n self.locator.update_pose()\n goal_pose = self.locator.pose[:]\n if self.exp_position_occupied:\n self.baxter.no()\n self.baxter.mm.changeMenuTitle(\"Goal position not empty\")\n else:\n goal_pose[0:2] = position[0:2]\n success = self.locator.locate(colour, goal_pose)\n #update target position of the colour\n if success:\n self.target_locations[colour] = goal_pose[0:2]\n self.mm.loadMenu(\"actionMenu\")", "def move_attachment(request, pagename, dest_pagename, target, dest_target,\n overwrite=False):\n # replace illegal chars\n target = wikiutil.taintfilename(target)\n dest_target = wikiutil.taintfilename(dest_target)\n\n attachment_path = os.path.join(getAttachDir(request, pagename),\n target).encode(config.charset)\n dest_attachment_path = os.path.join(getAttachDir(request, dest_pagename, create=1),\n dest_target).encode(config.charset)\n if not overwrite and os.path.exists(dest_attachment_path):\n raise DestPathExists\n if dest_attachment_path == attachment_path:\n raise SamePath\n filesize = os.path.getsize(attachment_path)\n try:\n filesys.rename(attachment_path, dest_attachment_path)\n except Exception:\n raise\n else:\n _addLogEntry(request, 'ATTDEL', pagename, target)\n event = FileRemovedEvent(request, pagename, target, filesize)\n send_event(event)\n _addLogEntry(request, 'ATTNEW', dest_pagename, dest_target)\n event = FileAttachedEvent(request, dest_pagename, dest_target, filesize)\n send_event(event)\n\n return dest_target, filesize", "def organize_files(subject_id, timepoint, files, organized_dir):\n note = \"\"\n for file_ in files:\n orig_dir, file_name = os.path.split(file_)\n \n # Create the destination dir if it doesn't already exist.\n org_dir = os.path.join(organized_dir, subject_id, timepoint)\n if not os.path.exists(org_dir):\n os.makedirs(org_dir)\n\n # If the file does not exist in the destination dir, copy it there and\n # move the original to a \"done\" subdir.\n # If it does, return a note saying that the file exists.\n if os.path.isfile(org_dir + file_name):\n note += \"File {0} already exists in {1}. \".format(file_name, org_dir)\n else:\n shutil.copy(file_, org_dir)\n out_dir = os.path.join(orig_dir, \"done\", os.sep)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n shutil.move(file_, out_dir)\n\n return note" ]
[ "0.6297892", "0.5993658", "0.58924556", "0.58497065", "0.574657", "0.5685253", "0.5554716", "0.544296", "0.5296529", "0.5167809", "0.5167291", "0.51232123", "0.5116003", "0.51118386", "0.5106717", "0.50523245", "0.5047933", "0.50246215", "0.50095254", "0.5005804", "0.49968418", "0.49850872", "0.49801317", "0.49681452", "0.49163377", "0.4898899", "0.4884747", "0.4865526", "0.48339573", "0.48265603", "0.48144644", "0.481266", "0.4802239", "0.4802022", "0.47718856", "0.475077", "0.4734135", "0.47001213", "0.4689177", "0.4675949", "0.46678215", "0.46625677", "0.46592924", "0.4656731", "0.4630456", "0.457968", "0.45792413", "0.4575738", "0.4568568", "0.45589405", "0.4556445", "0.45425573", "0.45373192", "0.45366347", "0.45351127", "0.4534846", "0.45208445", "0.45129505", "0.45123136", "0.4509954", "0.45007068", "0.44855455", "0.44771335", "0.44764924", "0.44737783", "0.44705158", "0.44614744", "0.44598272", "0.4459446", "0.44582358", "0.44562784", "0.44493183", "0.444639", "0.44459057", "0.44426158", "0.44380072", "0.44269916", "0.4424655", "0.44235", "0.44145975", "0.44142416", "0.44000936", "0.4392694", "0.4390164", "0.43887788", "0.438495", "0.4375363", "0.43646893", "0.43609062", "0.43334043", "0.43333653", "0.43223724", "0.43176267", "0.4314401", "0.43104792", "0.43060878", "0.42989856", "0.4295179", "0.4293058", "0.42771825" ]
0.7432932
0
Undo a former file movement by moving the file back to its origin.
def move_back(self) -> None: if self._file_was_moved: os.rename(self._new_path, self._file_path) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "def undo():\n\n try:\n my_file.undo()\n except FileNotFoundError:\n print('No file has been read yet')\n except Exception:\n print('You must make an edit to undo')", "def undo():", "def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(os.path.dirname(move))\r\n except OSError as e:\r\n logging.error('There was an error moving the file {}'.format(move))\r\n logging.error('Error status: {}'.format(e))\r\n logging.info(\"Completed undoing moves\")\r\n try:\r\n os.remove(self.backup)\r\n except OSError as e:\r\n logging.error('There was an error removing the file {}'.format(self.backup))\r\n logging.error('Error status: {}'.format(e))", "def undo(self):\n\n if not self.can_undo():\n print(\"error: trying to undo\")\n return\n\n func = self.undo_gen(self.undo_act())\n func()\n self.position -= 1", "def undo(self) :\n \n raise NotImplementedError()", "def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0", "def undo(self):\n self.setIndex(self._index-1)", "def __undo(self):\n self.__undo_controller.undo()", "def undo(self):\n self._check_undo_prerequisites()\n self._decrement_history_pointer()\n self._replay_history()", "def undo(self):\n if self._history_position > 0:\n self._history_position -= 1\n self._commands[\n self._history[self._history_position][1]\n ].execute(self._history[self._history_position][2])\n else:\n print(\"nothing to undo\")", "def _restore_file(file):\n\n os.remove(file)\n os.rename(file + '.bak', file)", "def undo(self):\n self.cnvImgTest.undoLast()", "def restore_last_undo_point(self):\n self.unload()", "def rollback(self):\n self.stream.seek(0)", "def reset(self):\n self.source.seek(0)\n self.target.seek(0)", "def undoChanges(self):\n Objects.undoChanges(self)\n self.draw()", "def undo_settings(self):\r\n cF.undo_settings()", "def _undo_action(self):\n pass", "def undo_last_move(self):\n if self.last_move is None:\n return\n x, y, i, j = self.last_move\n self.boards[x][y].undo_last_move()\n if len(self.history) > 1:\n self.last_move = self.history[-2]\n else:\n self.last_move = None\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n del self.history[-1]", "def undo_move(self, n=1):\n self.state = self.move_history[-n - 1]\n self.positions = self.copy_board(self.state[1])\n # delete all moves between the current state and the restored state\n del self.move_history[-n:]", "def onUndo(self, event):\r\n\t\tself.ActionHistory.Undo()", "def move_file(self, ctx):\n pass", "def undo(self):\n if self.__undo is None: # if we can not undo anymore we raise an error\n raise ControllerException(\"Error!!! Can't undo anymore!!!\\n\")\n else: # otherwise we simply do the swap from the undo list once more\n self.__scramble.swap(self.__undo[0], self.__undo[1], self.__undo[2], self.__undo[3])\n # self.__scramble.inc()\n self.__undo = None # undo becomes None because we don't want the user to do multiple undo operations", "def rewind(self):\n self.seek(0)", "def rewind(f):\n f.seek(0)", "def rewind(self):\n self.seek(0)", "def __editUndo(self):\n self.activeWindow().undo()", "def undo(self):\r\n\r\n if self.done.size() > 0:\r\n command = self.done.pop()\r\n if command[0] == 'add':\r\n uncommand = (('del'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.delete(uncommand[1],\r\n False)\r\n if command[0] == 'del':\r\n uncommand = (('add'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.addnew(uncommand[2],\r\n uncommand[3],\r\n False)\r\n if command[0] == 'move':\r\n uncommand = (('move'),\r\n command[2],\r\n command[1])\r\n self.move(uncommand[1],\r\n uncommand[2],\r\n False)\r\n self.undone.add(uncommand)", "def rewind(f):\n\tf.seek(0)", "def onUndo(self):\n pass", "def _move_current_to_previous(self, metadata_role):\n\n # Get the 'current' and 'previous' full file paths for 'metadata_role'\n metadata_filepath = metadata_role + '.txt'\n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filepath)\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filepath)\n\n # Remove the previous path if it exists.\n if os.path.exists(previous_filepath):\n os.remove(previous_filepath)\n\n # Move the current path to the previous path. \n if os.path.exists(current_filepath):\n tuf.util.ensure_parent_dir(previous_filepath)\n os.rename(current_filepath, previous_filepath)", "def undo_move(self):\n # general idea:\n # store the state of the board in a stack before every successful attempted move \n # when this is called, set the current board equal to the top state in the stack\n # print(\"Undo\")\n # print(self)\n # if len(self.board_states) != 0:\n if self.moves != 0:\n self.moves -= 1\n self.stock = []\n self.wp = []\n self.foundations = []\n self.tableaus = []\n self.stock, self.wp, self.foundations, self.tableaus = self.board_states.pop()\n self.init_move_dict()", "def undo(self):\n for command in reversed(self.commands):\n command.undo()", "def undo(self):\n if self._snapshot_index >= 0:\n snapshot = self._snapshots[self._snapshot_index]\n for chunk_location in snapshot:\n dimension, cx, cz = chunk_location\n chunk = self._unserialise_chunk(dimension, cx, cz, -1)\n self._chunk_cache[chunk_location] = chunk\n self._snapshot_index -= 1", "def rewind(self):\n self.run_command('rewind')", "def move_back(self) -> None:\n if self.label == 'ignore':\n return\n\n for crop in self._content:\n crop.move_back()", "def unmakeMove(self, move):", "def __editRevert(self):\n self.activeWindow().revertToUnmodified()", "def rewind():", "def undo(*args, **kwargs)->None:\n pass", "def _undo(self, action, data):\n if self.undobuffer is None:\n return\n if action == \"rot\":\n angle, degPAU = data\n self._rotate(-angle*degPAU/self._degreesPerAU)\n dummy = self.undobuffer.pop()\n elif action == \"stamp\":\n stitem = data[0]\n self.clearstamp(stitem)\n elif action == \"go\":\n self._undogoto(data)\n elif action in [\"wri\", \"dot\"]:\n item = data[0]\n self.screen._delete(item)\n self.items.remove(item)\n elif action == \"dofill\":\n item = data[0]\n self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)),\n fill=\"\", outline=\"\")\n elif action == \"beginfill\":\n item = data[0]\n self._fillitem = self._fillpath = None\n if item in self.items:\n self.screen._delete(item)\n self.items.remove(item)\n elif action == \"pen\":\n TPen.pen(self, data[0])\n self.undobuffer.pop()", "def cancel_move(self):\n self.should_move = False", "def rollback(self):\n if len(self.__stack) == 0:\n raise EmptyStackException()\n self.__current_pos = self.__stack[-1][0]\n self.line = self.__stack[-1][1]\n self.linePos = self.__stack[-1][2]\n self.__stack = self.__stack[:-1]", "def revert(self, *args, **kwargs):", "def UndoChanges(self):\n if (len(self.alignmentHistory) > 1):\n self.alignmentHistory.pop()\n self.alignment = self.alignmentHistory[-1][:,:]\n self.Show(self.displayedColumn)\n else:\n self.AlertMessage('Nothing to undo.', 'low')", "def undo_transaction(self):\n transaction = self.context\n entries = transaction.entries()\n\n # check if we can undo\n if not transaction.canUndoOrReverse():\n raise AccessControl_Unauthorized('No permission to create transactionentries, or there are no entries to reverse')\n \n # force a remove from the balances and update the references\n for transactionEntry in entries:\n transactionEntry.removeTransactionEntryFromAccount()\n\n # remove transaction\n transaction.getTransactionFolder().manage_delObjects(ids=transaction.getId())", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def move_back(self):\r\n self.center_x, self.center_y = self.save_pos", "def undo(self):\r\n previous = self.memory.pop()\r\n if not isinstance(previous, task2.ListADT):\r\n raise TypeError(\"Did not expect any other object in memory\")\r\n if previous[0] == \"d\":\r\n index = previous[1]\r\n for i in range(len(previous)-1, 1, -1):\r\n self.text_lines.insert(index, previous[i])\r\n elif previous[0] == \"i\":\r\n start = previous[1]\r\n for j in range(previous[2]):\r\n self.text_lines.delete(start)\r\n else:\r\n raise ValueError(\"Did not expect any other action other than delete or insert\")", "def rollback(self) -> None:\n for k in self._moved_cols:\n self._cols[k].move_back()", "def undo(self, event=None):\n if not self.segs == []:\n self.requestSegByDct((self.segs[-1].getDct() + 2) % 4)", "def erase_files(self):\n self.ofile_handle()\n self.efile_handle()\n\n os.remove(self.ofile_name())\n os.remove(self.efile_name())\n return None", "def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]", "def pre_revert(self):", "def undo(self):\n if not self.undo_stack:\n return\n self.begin_not_undoable_action()\n self.undo_in_progress = True\n undo_action = self.undo_stack.pop()\n self.redo_stack.append(undo_action)\n if isinstance(undo_action, self.insertclass):\n self._undo_insert(undo_action)\n elif isinstance(undo_action, self.deleteclass):\n self._undo_delete(undo_action)\n else:\n self._handle_undo(undo_action)\n self.end_not_undoable_action()\n self.undo_in_progress = False", "def revert(self):\n reverted = Line(l=self)\n reverted.direction *= -1.0\n return reverted", "def untuck(self):\n self.move_to_neutral()", "def attempt_file_reset(f):\r\n if hasattr(f, 'seek'):\r\n f.seek(0)", "def undo_move(board: 'Block', option: int):\n if option == 0:\n perform_move(board, 0)\n elif option == 1:\n perform_move(board, 1)\n elif option == 2:\n perform_move(board, 3)\n elif option == 3:\n perform_move(board, 2)", "def abort(self):\n for command in reversed(self.commands):\n command.undo()", "def revert(self):\n original = getattr(self, \"_original\", None)\n if not original:\n return\n\n if hasattr(self, \"output\"):\n output = self.output\n keep_output = True\n else:\n keep_output = False\n\n del self._original\n\n self.__dict__ = original.__dict__\n\n if keep_output:\n self.output = output", "def reset(self):\n self._cmd_line = 0\n self._file_line = 0", "def undo(self):\n if (0 == len(self._undoStack)):\n raise ValueError(\"Nothing to undo\")\n else:\n self._redoStack.append(self.gameState())\n\n lastGameState = self._undoStack.pop()\n self.counter = lastGameState[\"counter\"]\n self.wonRounds = lastGameState[\"wonRounds\"]\n self.wonGames = lastGameState[\"wonGames\"]\n self.currentMaxPoints = lastGameState[\"currentMaxPoints\"]\n self.sidesChanged = lastGameState[\"sidesChanged\"]\n self.playerPositions = lastGameState[\"playerPositions\"]\n self.servePosition = lastGameState[\"servePosition\"]", "def revert(self, a):\n raise NotImplementedError", "def backToSource(self, point):\n if self.revertTransformation is not None:\n return self.revertTransformation(point)\n return point", "def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"", "def un_move(self, previous_move: int) -> None:\n self._column_to_row[previous_move] -= 1\n if self._column_to_row[previous_move] == 5:\n self._valid_moves.insert(self._valid_move_order[previous_move], previous_move)\n row = self._column_to_row[previous_move]\n self.board_array[row][previous_move] = 0\n\n self._is_red_active = not self._is_red_active\n\n if self._is_red_active:\n self.hash = self.hash ^ int(self._red_hash_keys[row][previous_move])\n else:\n self.hash = self.hash ^ int(self._red_hash_keys[row][previous_move])\n\n if self._win_state is not None:\n self._win_state = None\n\n self.move_number -= 1", "def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0", "def _decrement_file_counter(self):\n self._add_to_file_counter(-1)", "def revert(self, ref=None):\n # TODO\n raise NotImplementedError", "def mv(self, source: str, filename: str) -> None:\n\n self.cp(source, filename)\n self.rm(source)", "def _undo(self):\n if not self._executed:\n raise TransactionNotExecuted(self)\n # The default implementation is to return the inverse of this\n # transaction.\n return Inverse(self)", "def back(self):\n self.position -= 1", "def overwrite_file(self):\n\n new_file = open(self.temp_filename, 'r')\n file = open(self.filename, 'w')\n file.writelines(new_file.readlines())\n new_file.close()\n file.close()\n os.remove(self.temp_filename)", "def undo_move(self, move):\n if move in self.board:\n self.board[move] = self.BLANK_CELL_CHAR\n self.available_moves.add(move)\n else:\n raise ValueError('Move-undo [{}] not possible.'.format(move))", "def undo(self, outer_instance):\n pass", "def c_undo(self):\r\n try:\r\n self.canvas.delete(self.canvas.find_all()[-1])\r\n self.update()\r\n return True\r\n except: return False", "def removeNextMove(self):\n self.path.remove(self.path[self.index])", "def __redo(self):\n self.__undo_controller.redo()", "def act_move_file(self, file_source, file_target):\n try:\n if not os.path.isfile(file_source):\n return\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.move(file_source, file_target)\n #shutil.copy2(file_source, file_target)\n #os.remove(file_source)\n self.logger.debug('%s: Action: <move> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file move: %s -> %s', file_source, file_target)", "def flushUndo(*args, **kwargs)->None:\n pass", "def reset_movement(self):\n self.direction = [0, 0]", "def close(self):\n file = self.file\n self.file = None\n self.filename = None\n self.current_line = None\n file.close()", "def wipeFile(file_name):\r\n WipeFileThread(file_name)", "def reset(self) -> None:\n self.file.write(\"\\b\" * (len(self.current_frame) + self.current_padding))", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def trash_file(file_to_trash, document_name) :\n dtpo_log('debug', \"trash_file file -> %s\", file_to_trash)\n\n source = Config.config.get_source_directory() + '/' + file_to_trash\n destination = Config.config.get_trash_directory() + '/' + document_name\n\n os.rename(source, destination)", "def moveToPreviousFrame(self):\n\t\tall_ts = [s for t in self.stamps_by_stream.values() for s in t]\n\t\tall_ts.sort()\n\t\tfirst_frame = all_ts[0]\n\n\t\tselected_index = bisect.bisect_right(all_ts, self._timeline.current_pos)-1\n\t\tif selected_index <= 0 or all_ts[selected_index-1] < first_frame:\n\t\t\t# There is no data before, or no frame. Do nothing\n\t\t\treturn\n\t\tself._timeline.current_pos = all_ts[selected_index-1]\n\t\tself.objectSelected.emit(\n\t\t self.getFileAtStamp(self._timeline.current_pos)\n\t\t)", "def delete_original( self ):\n try:\n os.remove( self.PATH_TO_SOURCE_FILE )\n copy_check = utility_code.checkFileExistence( self.PATH_TO_SOURCE_FILE ) # should not exist\n if copy_check == 'exists':\n message = 'deletion of original file at ```%s``` failed, as determined by copy_check' % self.PATH_TO_SOURCE_FILE\n log.error( message )\n sys.exit( message )\n else:\n log.info( 'deletion successful of original file at ```%s```' % self.PATH_TO_SOURCE_FILE )\n except Exception, e:\n message = 'deletion of original file at ```%s``` failed; exception, `%s`' % ( self.PATH_TO_SOURCE_FILE, unicode(repr(e)) )\n log.error( message )\n sys.exit( message )\n return", "def move_to(self, path: str) -> None:\n self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path))\n os.rename(self._file_path, self._new_path)\n self._file_was_moved = True", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def cancelMove(self):\n self._moveHelper(self.moveStartPos.pos, cancellingMove=True)\n# self.currentMove = None\n self.selectedUnit.selected = False\n self.selectedUnit = None\n self.moveStartPos = None\n self.map.hideMovementRange()", "def reset(self):\n self.undo_stack = Stack(self.undo_stack_size)\n self.redo_stack[:] = []\n self.not_undoable_action = False\n self.undo_in_progress = False", "def rem_file(self, key):\n del self.fileList[key]\n\n path = os.path.join(self.file_path, '%s.xoj' % key)\n try:\n os.remove( path )\n except:\n print \"Unable to remove\", path\n self.save()", "def reset(self):\n self.prev_obj1_position = None\n self.prev_obj2_position = None", "def rollback(self):\n self._rollback = True", "def move_file(source, destination):\n shutil.move(source, destination)", "def FSLFlip(self, infile, prefix):\n cmd = '3dresample -orient LPI -prefix %s.nii -inset %s+orig' % \\\n (prefix, infile)\n self.CheckExec(cmd, ['%s.nii' % prefix])\n fname = '%s+orig.BRIK' % infile\n if os.path.exists(fname):\n os.remove(fname)\n fname = '%s+orig.HEAD' % infile\n if os.path.exists(fname):\n os.remove(fname)", "def restore(self,):\n self.pos, self.dataptr, = self.stack.pop()" ]
[ "0.6898709", "0.6796202", "0.67440903", "0.66569364", "0.6606794", "0.6522146", "0.6483597", "0.6466735", "0.6456174", "0.6436866", "0.64312315", "0.6424864", "0.63239264", "0.62896913", "0.62687606", "0.61865735", "0.6161903", "0.614128", "0.6138943", "0.6126032", "0.6055876", "0.6041878", "0.6013321", "0.5997303", "0.59900224", "0.59493136", "0.5941243", "0.5939681", "0.5936037", "0.5912631", "0.5899942", "0.5859381", "0.58580697", "0.5855819", "0.57614714", "0.5715396", "0.5677039", "0.56576985", "0.56427103", "0.5636318", "0.5626864", "0.562326", "0.561234", "0.5600484", "0.5600261", "0.558224", "0.5570238", "0.556632", "0.5547517", "0.554255", "0.55280125", "0.5525275", "0.55145824", "0.5508018", "0.5486911", "0.54855305", "0.5484918", "0.54767823", "0.5467324", "0.54385287", "0.5421909", "0.5418719", "0.5397842", "0.53952646", "0.5389522", "0.5381421", "0.5365131", "0.53598124", "0.5357689", "0.5356321", "0.53540784", "0.5353174", "0.53482664", "0.5338022", "0.533144", "0.53246933", "0.5321003", "0.5317193", "0.53171134", "0.53156036", "0.53027064", "0.5293883", "0.52907664", "0.5273701", "0.5272862", "0.5272565", "0.5261766", "0.5250483", "0.5246355", "0.52378494", "0.5228861", "0.5228604", "0.52081245", "0.52070236", "0.5206397", "0.5187837", "0.51833284", "0.5169871", "0.5168376", "0.51671326" ]
0.7445615
0
Insert the Crop into this column.
def insert(self, item: Crop) -> None: self._content.append(item) self._file_counts[item.annot_type] = self._file_counts.get(item.annot_type, 0) + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, file_path: str, annot_type: str) -> None:\n if self._valid_file_name_regex.match(os.path.basename(file_path)) is None:\n raise ValueError(f'Illegal file name: {os.path.basename(file_path)}')\n x_pos = get_metadata_from_filename(file_path).x_pos\n if x_pos in self._x_positions:\n col = self._cols[x_pos]\n else:\n col = Column()\n self._x_positions.append(x_pos)\n self._x_positions.sort()\n col.insert(Crop(file_path, annot_type))\n self._cols[x_pos] = col\n\n self.n_cols = len(self._cols)", "def insert(self, *args, **kwargs):\n return _image.image_insert(self, *args, **kwargs)", "def set_crop(self, crop):\n self.crop = crop", "def place_disc(self, column):\n stdout.write(\"place_disc %d\\n\" % column)\n stdout.flush()", "def placeCrate (self, crates_char, row, column):", "def newClip(self, book, content, typ, date):\n sql = ''' insert into clippings values (NULL, '%s', '%s', '%s', '%s', '%s')\n''' % (book, '0', typ, date, content)\n\n self.__execute__(sql)\n pass", "def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"", "def insert (self, column, figure):\r\n\t\tc = self.board[column]\r\n\t\tif c[0] != NONE:\r\n\t\t\traise Exception('Column is full')\r\n\r\n\t\ti = -1\r\n\t\twhile c[i] != NONE:\r\n\t\t\ti -= 1\r\n\t\tc[i] = figure\r\n\r\n\t\tself.checkForWin()", "def set_cropping(self, crop=True):\n self._crop = crop\n self._final = None # Force rebuild", "def insert(self):\n pass", "def insert_in_tree(self, pic_name, pic_num, crop_num, is_crop=False):\n \n crop = self.communicator.image_store.get_crop(pic_num, crop_num)\n \n # insert the picture/crop name in column 0\n if (is_crop == False):\n myiter = self.tree_store.append(None, None)\n if crop.available == True:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n else:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#A0A0A0\"><b>' + pic_name + '</b></span>')\n elif (is_crop == True):\n #determine iter that points to row containing pic_num\n # in column 1\n parent = None\n for i in range(0, len(self.tree_store)):\n if (pic_num == self.tree_store[i][1]):\n #found the parent, insert the child\n parent = self.tree_store[i].iter\n myiter = self.tree_store.append(parent, None)\n self.tree_store.set_value(myiter, 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n break\n # expand the row to show the crop\n self.image_tree.expand_row(self.tree_store.get_path(parent), True)\n\n # fill in the remaining columns\n self.tree_store.set_value(myiter, 1, pic_num)\n self.tree_store.set_value(myiter, 2, crop_num)\n self.tree_store.set_value(myiter, 3, \"0%\")\n \n return myiter", "def tool_gen_crop_clicked(self, widget, data=None):\n self._generate_crop()", "def _crop(self, fieldname, scale, box):\n croputils = IImageCroppingUtils(self.context)\n data = croputils.get_image_data(fieldname)\n\n original_file = StringIO(data)\n image = PIL.Image.open(original_file)\n image_format = image.format or self.DEFAULT_FORMAT\n\n cropped_image = image.crop(box)\n cropped_image_file = StringIO()\n cropped_image.save(cropped_image_file, image_format, quality=100)\n cropped_image_file.seek(0)\n\n croputils.save_cropped(fieldname, scale, cropped_image_file)\n\n # store crop information in annotations\n self._store(fieldname, scale, box)\n\n # Purge caches if needed\n notify(Purge(self.context))", "def insert_copied(self, *args):\n copy = self.get_copied_food()\n if copy is not None and type(copy.food) == Food:\n fsp = self.get_food_search_panel()\n fsp.reset_food_icon()\n\n ingredient = Ingredient(food=copy.food, amount=decimal.Decimal(100))\n if self.recipe.ingredients is None:\n self.recipe.ingredients = [ingredient]\n else:\n self.recipe.ingredients.append(ingredient)\n\n self.add_food_node(ingredient)\n\n fsp.remove_copy()\n self.update_text(True)\n else:\n print(\"empty or not a food\")", "def insert_data(self):\n\n pass", "def placeImage(self, img, x=0, y=0):\n if img.getSize() == self.getSize() and img.getWidth() == self.__width:\n # Same dimensions\n self._c = img._c\n\n elif x == 0 and self.__height == img.getHeight():\n # Same height, just overwrite a block\n p_start = y * self.__height\n p_end = y*self.__height + img.getSize()\n self._c[p_start:p_end] = img._c\n\n else:\n # Different dimensions\n for dx in range(min(img.getWidth(), self.getWidth() - x)):\n self.writeCol(x+dx, img.getCol(dx), y)", "def _insert_op(self, op):", "def cropbox(row):\n if row['Type'] == 'Rectangle':\n cropbox = [row['X'], row['Y'], row['X'] + row['Width'], \n row['Y'] + row['Height']]\n else:\n # damnit I should set up a logger\n print('WARNING: The annotation \"%s\" (index %d) is not a \\\n rectangle!' %(row['Image'], row['Index']))\n cropbox = None\n return cropbox", "def add_crop_center(self, shape):\n self.methods.append(self._crop_center)\n self.args.append([shape])", "def cover_crop_added(self):\n\n ## getting input parameter\n crop_input = self.soil_inputs.crop_cover.values[0]\n if pd.isnull(crop_input):\n crop_input = \"nan\"\n #climate_input = self.soil_inputs.climate.values[0]\n years_cropcover_tech = self.soil_inputs.time_using_crop_cover.values[0]\n\n if np.isnan(years_cropcover_tech):\n years_cropcover_tech = 10\n\n if self.language == \"spanish\":\n #climate_options = [i.lower() for i in tl.climate_options[0]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[0]]\n else:\n #climate_options = [i.lower() for i in tl.climate_options[1]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[1]]\n\n if crop_input.lower() in cover_crop_options:\n\n cc_eng_input = tl.cover_crop_options[1][cover_crop_options.index(crop_input.lower())]\n self._cc_eng_input = cc_eng_input\n #cl_eng_input = tl.climate_options[1][climate_options.index(self._cl_eng_input.lower())]\n\n covercropfilter = ef.cover_cropping_factors.Change.str.lower() == cc_eng_input.lower()\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == self._cl_eng_input.lower()\n\n if climatefilter.sum() == 0:\n cl_eng_input = tl.world_climate_bouwman[1][tl.world_climate_bouwman[0].index(self._cl_eng_input)]\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == cl_eng_input.lower()\n\n filter_conditions = climatefilter & covercropfilter\n if np.array(filter_conditions).sum() != 0:\n factor_change_20years = ef.cover_cropping_factors.Factor.loc[filter_conditions].values[0]\n else:\n factor_change_20years = 1\n\n self.cover_crop_soc_change = cumulative_socemissions_for_20years(years_cropcover_tech,\n factor_change_20years,\n self.soil_c_stock)\n else:\n self.cover_crop_soc_change = [0]", "def _crop_image_and_paste(self, image, center, size):\n center_y, center_x = center\n target_h, target_w = size\n img_h, img_w, img_c = image.shape\n\n x0 = max(0, center_x - target_w // 2)\n x1 = min(center_x + target_w // 2, img_w)\n y0 = max(0, center_y - target_h // 2)\n y1 = min(center_y + target_h // 2, img_h)\n patch = np.array((int(x0), int(y0), int(x1), int(y1)))\n\n left, right = center_x - x0, x1 - center_x\n top, bottom = center_y - y0, y1 - center_y\n\n cropped_center_y, cropped_center_x = target_h // 2, target_w // 2\n cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)\n for i in range(img_c):\n cropped_img[:, :, i] += self.mean[i]\n y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)\n x_slice = slice(cropped_center_x - left, cropped_center_x + right)\n cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]\n\n border = np.array([\n cropped_center_y - top, cropped_center_y + bottom,\n cropped_center_x - left, cropped_center_x + right\n ],\n dtype=np.float32)\n\n return cropped_img, border, patch", "def insert_direct(self, key, data, *args, **kwargs):\n return pycassa.ColumnFamily.insert(self, key, data, *args, **kwargs)", "def insert_placement(self, insert_placement):\n\n self._insert_placement = insert_placement", "def insert(self, data):\r\n pass", "def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n if not self.insert_mode:\n self.log.info(f\"Truncating {self.table_name}.\")\n redshift.run(self.sql_truncate.format(self.table_name))\n self.log.info(f\"Inserting data into {self.table_name}.\")\n redshift.run(self.sql_insert)", "def insert(self, product):\n pass", "def basic_crop(data):\n return data['crop'];", "def image_tree_menu_add_manually_activate(self, widget, data=None):\n \n # TODO: this uses bad design principles! all access of the image_store\n # should go through the communicator, then crop_reset should be\n # moved to an update procedure.\n (model, treeiter) = self.image_tree.get_selection().get_selected()\n pic_num = int(self.tree_store.get_value(treeiter, 1))\n crop_num = int(self.tree_store.get_value(treeiter, 2))\n crop = self.communicator.image_store.get_crop(pic_num, crop_num)\n crop.set_for_manual()\n self.crop_reset(pic_num, crop_num)", "def insert_copied(self, *args):\n copy = self.get_copied_food()\n if copy is not None:\n fsp = self.get_food_search_panel()\n fsp.reset_food_icon()\n if type(copy.food) == Food:\n food = FoodUsage(food=copy.food, amount=decimal.Decimal(100))\n if self.meal.foods is None:\n self.meal.foods = [food]\n else:\n self.meal.foods.append(food)\n\n self.add_food_node(food)\n else: # copy.food is Recipe\n recipe_exec = Recipe(name=copy.food.name, is_template=False, notes=\"\",\n serving_size=decimal.Decimal(1), template=copy.food)\n for ing in copy.food.ingredients:\n recipe_exec.add_food(ing.food, ing.amount)\n self.meal.add_recipe(recipe_exec)\n recipe_node = TreeViewRecipe(recipe=recipe_exec,\n meal_tree_box=self.meal_tree_box,\n parent_node=self)\n fsp.remove_copy()\n self.update_text(True)", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def __call__(self, img):\n image_width, image_height = img.size\n image_short = min(image_width, image_height)\n\n crop_size = float(self.imgsize) / (self.imgsize + 32) * image_short\n\n crop_height, crop_width = crop_size, crop_size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height))", "def insert(self, *args):\n return _ida_hexrays.qvector_ccase_t_insert(self, *args)", "def insert_values():\n pass", "def finalize(self, col):\n\t\traise NotImplementedError()", "def on_insert(self, callback):\n self._insert_callback = callback if callable(callback) else _void", "def paragraph_to_insert(self, paragraph_to_insert):\n\n self._paragraph_to_insert = paragraph_to_insert", "def Discretize(self, col):\n return Discretize(col)", "def before_insert(self, obj, st):\n pass", "def _crop_concat(self, upsampled, bypass):\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = F.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)", "def copy(self) -> \"Column\":\n return replace(self, _cells=[])", "def configure_crop(self, context_pad):\n # crop dimensions\n in_ = self.net.inputs[0]\n tpose = self.transformer.transpose[in_]\n inv_tpose = [tpose[t] for t in tpose]\n self.crop_dims = np.array(self.net.blobs[in_].data.shape[1:])[inv_tpose]\n #.transpose(inv_tpose)\n # context padding\n self.context_pad = context_pad\n if self.context_pad:\n in_ = self.net.inputs[0]\n transpose = self.transformer.transpose.get(in_)\n channel_order = self.transformer.channel_swap.get(in_)\n raw_scale = self.transformer.raw_scale.get(in_)\n # Padding context crops needs the mean in unprocessed input space.\n mean = self.transformer.mean.get(in_)\n if mean is not None:\n inv_transpose = [transpose[t] for t in transpose]\n crop_mean = mean.copy().transpose(inv_transpose)\n if channel_order is not None:\n channel_order_inverse = [channel_order.index(i)\n for i in range(crop_mean.shape[2])]\n crop_mean = crop_mean[:,:, channel_order_inverse]\n if raw_scale is not None:\n crop_mean /= raw_scale\n self.crop_mean = crop_mean\n else:\n self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)", "def append_construct(self, c):\n if self.array_index is not None:\n self.parent_item.construct.args[self.arg_index].insert(self.array_index + 1, c)\n else:\n raise ValueError(\"Invalid parent\")", "def crop_center_img(self):\n # TODO Task 1.1\n img = self.data\n img_with_missing_crop = np.copy(img)\n dim =128\n crop = dim // 2\n start = crop - (crop // 2)\n #ground truth overlaps img_with_missing_crop by 7 pixels in all directions\n img_with_missing_crop[:,start+7:start + crop-7, start+7:start + crop-7,:] = 0\n #255\n #inpu = Image.fromarray((img_with_missing_crop[1,:,:,:]*255).astype('uint8'))\n #inpu.save(\"cropped.png\")\n groundtruth_crop = img[:,start:start + crop, start:start + crop,:]\n self.data = (img_with_missing_crop, groundtruth_crop)", "def copy_entry(self, row, col):\n if self.results and self.settings['auto_copy']:\n row, col = self.table.currentRow(), self.table.currentColumn()\n to_copy = self.results[row][col]\n self.clipboard.setText(to_copy)", "def paste(self, table_model, index):\n old_value = table_model.data(index, Qt.DisplayRole)\n new_value = pyperclip.paste()\n self.undostack.push(EditCommand(table_model, index, old_value, new_value))\n del old_value, new_value", "def _copy_custom_attributes(self, column):\n\n column.min_length = self.min_length\n column.max_length = self.max_length\n column.allow_blank = self.allow_blank\n column.allow_whitespace = self.allow_whitespace\n\n super()._copy_custom_attributes(column)", "def cut(self, table_model, index):\n old_value = table_model.data(index, Qt.DisplayRole)\n new_value = \"\"\n self.copy(table_model, index)\n self.undostack.push(EditCommand(table_model, index, old_value, new_value))\n del old_value, new_value", "def create_clipping(request):\n if request.method == 'POST':\n image = request.POST.get('image')\n\n # check if the blanked image should be saved to the backend\n if request.POST.get('save_clipping') in ['false', False]:\n save = False\n else:\n save = True\n\n selection = {\n 'id': int(request.POST.get('selection[id]')),\n 'x': int(round(float(request.POST.get('selection[x]')))),\n 'y': int(round(float(request.POST.get('selection[y]')))),\n 'width': int(round(float(request.POST.get('selection[width]')))),\n 'height': int(round(float(request.POST.get('selection[height]')))),\n 'full_width': int(round(float(request.POST.get('selection[full_width]')))),\n 'full_height': int(round(float(request.POST.get('selection[full_height]'))))\n }\n\n # get the image id, the model object and the selection from the model\n edit_url = request.POST.get('edit_url')\n image_id = int(edit_url.split(\"/\")[-2])\n image_object = CustomImage.objects.get(id=image_id)\n original_selection = image_object.selections\n\n reseized_image = image_object.resize_url\n\n if selection['id'] == -1: # no selection -> use the whole image\n cropped_image = reseized_image\n else: # a specific selecion is used -> get the cropped image and selecion attributes\n image_object = CroppedImage.objects.get(id=selection['id'])\n cropped_image = original_selection[unicode(request.POST.get('selection[id]'))][\"url\"]\n\n # calculate the new blanking mask\n mask = get_mask_from_image(image, selection, cropped_image, save, image_object)\n\n # stream the new mask to the output\n stream = BytesIO()\n flat_mask = []\n for line in mask:\n flat_mask.extend(line)\n np.savetxt(stream, flat_mask, fmt=\"%u\", delimiter=', ', newline=', ')\n stream.seek(0)\n return HttpResponse(stream.read())\n\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )", "def _image_paste(self, image, dest_image, pos_x, pos_y):\n raise NotImplementedError", "def on_insert(self) -> None:", "def InsertMode(self):\n self.stc.SetLineCaret()\n self.stc.SetOvertype(False)\n self.BlockMode = False\n self._SetMode(ViKeyHandler.INSERT, u\"INSERT\")", "def replace_construct(self, c):\n if self.array_index is not None:\n self.parent_item.construct.args[self.arg_index][self.array_index] = c\n elif self.arg_index is not None:\n self.parent_item.construct.args[self.arg_index] = c\n else:\n raise ValueError(\"Invalid parent\")", "def crown(self):\n self.crowned = True", "def crop_id(self):\n return self._crop_id", "def insert(self, new):\n return self.replace(None, new)", "def after_insert(self, obj, st):\n pass", "def __insert_data_in_img(self):\n data_df = pd.read_csv(\n os.path.join(\n self.shap_logs_path,\n \"SHAP_summary_{}_{}_{}.csv\".format(\n self.classifier_name, \"PRESENT\", self.datetime\n ),\n ),\n index_col=0,\n )\n for feature_category in self.unique_feature_category_names:\n self.category_img_dict[feature_category][\"value\"] = int(\n data_df.loc[feature_category, :].sum()\n )\n\n for row_cnt, (feature_category_name, feature_data) in enumerate(\n self.category_img_dict.items()\n ):\n arrow_width = int(\n (self.baseline_scale_img.shape[1] / 100) * abs(feature_data[\"value\"])\n )\n if feature_data[\"value\"] > 0:\n arrow_end = (self.arrow_start[0] + arrow_width, self.arrow_start[1])\n arrow_middle = int(\n ((arrow_end[1] - self.arrow_start[1]) / 2) + self.arrow_start[1] - 7\n )\n for bracket_no, bracket in enumerate(self.ranges_lst):\n if abs(feature_data[\"value\"]) in bracket:\n color = (\n self.positive_arrow_colors[bracket_no][2],\n self.positive_arrow_colors[bracket_no][1],\n self.positive_arrow_colors[bracket_no][0],\n )\n cv2.arrowedLine(\n self.img, self.arrow_start, arrow_end, color, 5, tipLength=0.1\n )\n cv2.putText(\n self.img,\n \"+\" + str(abs(feature_data[\"value\"])) + \"%\",\n (arrow_end[0] - 7, arrow_middle - 15),\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n color,\n 2,\n )\n\n else:\n arrow_end = (self.arrow_start[0] - arrow_width, self.arrow_start[1])\n arrow_middle = int(\n ((self.arrow_start[1] - arrow_end[1]) / 2) + arrow_end[1] - 7\n )\n for bracket_no, bracket in enumerate(self.ranges_lst):\n if abs(feature_data[\"value\"]) in bracket:\n color = (\n self.negative_arrow_colors[bracket_no][2],\n self.negative_arrow_colors[bracket_no][1],\n self.negative_arrow_colors[bracket_no][0],\n )\n cv2.arrowedLine(\n self.img, self.arrow_start, arrow_end, color, 5, tipLength=0.1\n )\n cv2.putText(\n self.img,\n \"-\" + str(abs(feature_data[\"value\"])) + \"%\",\n (arrow_end[0] - 7, arrow_middle - 15),\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n color,\n 2,\n )\n\n if row_cnt != (len(list(self.category_img_dict.keys())) - 1):\n self.arrow_start = (\n arrow_end[0],\n self.side_scale_y_tick_cords[row_cnt + 1][0],\n )\n\n small_arrow_top_left = (\n int(arrow_end[1]) + 20,\n int(arrow_end[0] - self.small_arrow_img.shape[1] / 2),\n )\n small_arrow_bottom_right = (\n small_arrow_top_left[0] + self.small_arrow_img.shape[0],\n small_arrow_top_left[1] + self.small_arrow_img.shape[1],\n )\n self.img[\n small_arrow_top_left[0] : small_arrow_bottom_right[0],\n small_arrow_top_left[1] : small_arrow_bottom_right[1],\n ] = self.small_arrow_img\n color_bar_top_left = (\n arrow_end[1] + self.small_arrow_img.shape[0] + 25,\n self.baseline_scale_top_left[1],\n )\n color_bar_bottom_right = (\n color_bar_top_left[0] + self.color_bar_img.shape[0],\n color_bar_top_left[1] + self.color_bar_img.shape[1],\n )\n self.img[\n color_bar_top_left[0] : color_bar_bottom_right[0],\n color_bar_top_left[1] : color_bar_bottom_right[1],\n ] = self.color_bar_img\n\n color_bar_middle = (\n (int(580 + self.baseline_scale_img.shape[1] / 2)),\n color_bar_bottom_right[0] + 50,\n )\n cv2.putText(\n self.img,\n \"CLASSIFICATION PROBABILITY\",\n color_bar_middle,\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n (0, 0, 0),\n 2,\n )\n cv2.imwrite(self.img_save_path, self.img)\n self.visualization_timer.stop_timer()\n stdout_success(\n msg=f\"SHAP summary graph saved at {self.img_save_path}\",\n elapsed_time=self.visualization_timer.elapsed_time_str,\n )", "def create_insert_trigger(self):\n self.execute(self.commands.insert_function(\n self.name,\n cols=self._join_cols(self.intersection.dest_columns),\n vals=self._qualify('NEW', self.intersection.origin_columns)\n ))\n\n self.execute(self.commands.insert_trigger(\n self.triggers['INSERT'],\n self.source.name,\n self.name\n ))", "def test_crop_append():\n raw = _test_raw_reader(\n read_raw_bti,\n pdf_fname=pdf_fnames[0],\n config_fname=config_fnames[0],\n head_shape_fname=hs_fnames[0],\n )\n y, t = raw[:]\n t0, t1 = 0.25 * t[-1], 0.75 * t[-1]\n mask = (t0 <= t) * (t <= t1)\n raw_ = raw.copy().crop(t0, t1)\n y_, _ = raw_[:]\n assert y_.shape[1] == mask.sum()\n assert y_.shape[0] == y.shape[0]", "def _copy_custom_attributes(self, column):\n\n column._fk = self._fk\n column._fk_on_update = self._fk_on_update\n column._fk_on_delete = self._fk_on_delete\n\n super()._copy_custom_attributes(column)", "def paste(self, image, xy=(0,0)):\n # Parse xy location from any type of unit to pixels\n x,y = xy\n x = units.parse_dist(x,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n y = units.parse_dist(y,\n ppi=self.ppi,\n default_unit=\"px\",\n canvassize=[self.width,self.height])\n xy = (x,y)\n # Need more options, eg anchor point, and coordinate xy\n self.drawer.flush()\n if isinstance(image, Canvas): image = image.img\n if image.mode == \"RGBA\":\n self.img.paste(image, xy, image) # paste using self as transparency mask\n else: self.img.paste(image, xy)\n self.update_drawer_img()\n return self", "def insert_before(self, text, line, col):\n col = self.canonicalize_column_index(line, col)\n col_off = self.col_offs[line]\n adj_col = (col_off.get_rewritten_pos(col) -\n col_off.get_insertion_length(col))\n theline = self.lines[line]\n self.lines[line] = theline[:adj_col] + text + theline[adj_col:]\n col_off.insert(col, len(text))", "def draw_hand_box(data,box,c=[255,255,255]):\n crop = np.array(data['crop']);\n if box is not None:\n cv2.rectangle(crop, *box, c);\n return crop;", "def corp_image(self):\n try:\n # Open image\n image_to_crop = Image.open(self.captcha_image_filename, 'r')\n # Crop image\n image = image_to_crop.crop((-1, 8, 65, 22))\n # Save image\n image.save(self.cropped_captcha_filename)\n except UnidentifiedImageError as error:\n raise(error)", "def get_crop(self):\n if self.cropping_method == self.CROP_NONE:\n self.autocrop()\n return '{h}% {v}%'.format(h=self.from_left, v=self.from_top)", "def insert_to_database(self, db):\n \n self.remove_bad_characters()\n print(\"Inserting \"+self.categorie_name+\" to database.\")\n db.query(\"INSERT INTO categorie (categorie_name) VALUES (:categorie_name)\", \\\n categorie_name=self.categorie_name)", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def get_db_prep_save(self, value, connection=None):\n if value:\n return super(StdImageField, self).get_db_prep_save(value, connection=connection)\n else:\n return u''", "def _insert_single(self, disc, class_num):\n self.cursor.execute(self.INSERT, (disc, class_num))\n self.conn.commit()", "def insert(self, loc, column, value, allow_duplicates=False, inplace=False):\n if inplace:\n self.frame.insert(loc, column, value, allow_duplicates=allow_duplicates)\n else:\n output = self.copy()\n output.frame.insert(loc, column, value, allow_duplicates=allow_duplicates)\n return output", "def onCut(self):\n pass", "def _crop_data(cfg, raw, subject):\n if subject != 'emptyroom' and cfg.crop_runs is not None:\n raw.crop(*crop_runs)", "def crop(self, left:int=0, bottom:int=0, right:int=0, top:int=0):\n return Glyph(tuple(\n _row[left : (-right if right else None)]\n for _row in self._rows[top : (-bottom if bottom else None)]\n ))", "def _image_paste(self, image, dest_image, pos_x, pos_y):\n height, width = image.shape[:2]\n dest_image[pos_y:(pos_y + height), pos_x:(pos_x + width)] = image", "def onAddCutToolClicked(self, event):\n i_cube = self.cube_choice.GetSelection()\n i_dimension = self.cut_dimension_choice.GetSelection()\n\n if i_dimension <= 0:\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut dimension not selected'))\n else:\n value = self.cut_value_textCtrl.GetValue()\n if not value.strip():\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut value not specified'))\n else:\n cube = self._OLAP_server.getCubes()[i_cube]\n dimension = cube.getDimensions()[i_dimension - 1]\n row = (dimension.getLabel(), dimension.getName(), value)\n self.appendListCtrlRow(listctrl=self.cut_listCtrl, row=row)\n\n # After adding, clear the controls\n self.cut_dimension_choice.SetSelection(0)\n self.cut_value_textCtrl.SetValue(u'')\n\n event.Skip()", "def _image_paste(self, image, dest_image, pos_x, pos_y):\n dest_image.paste(image, (pos_x, pos_y))", "def insert(self, val):\n self.data.insert(0,val)\n self.size = self.size + 1", "def _insert_img(self, cursor, img, fmt, metadata=None):\n if metadata:\n width = metadata.get('width', None)\n height = metadata.get('height', None)\n else:\n width = height = None\n try:\n image = QtGui.QImage()\n image.loadFromData(img, fmt.upper())\n if width and height:\n image = image.scaled(width, height,\n QtCore.Qt.IgnoreAspectRatio,\n QtCore.Qt.SmoothTransformation)\n elif width and not height:\n image = image.scaledToWidth(width, QtCore.Qt.SmoothTransformation)\n elif height and not width:\n image = image.scaledToHeight(height, QtCore.Qt.SmoothTransformation)\n except ValueError:\n self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)\n else:\n format = self._add_image(image)\n cursor.insertBlock()\n cursor.insertImage(format)\n cursor.insertBlock()", "def _prep_im_for_blob(self, im, pixel_means, bbox):\n im = im.astype(np.float32, copy=False)\n im -= pixel_means\n im_shape = im.shape\n\n # crop version 2\n x, y, w, h = bbox\n crop_img, crop_w, crop_h = None, None, None\n if (x, y, w, h) == (0, 0, im.shape[1]-1, im.shape[0]-1):\n crop_img = im[:,:,:]\n crop_w = w\n crop_h = h\n else:\n # 1. random shifted image'\n # crop_x = np.random.randint(x)\n # crop_w = np.random.randint(x+w, im_shape[1]-1) - crop_x\n # crop_y = np.random.randint(y)\n # crop_h = np.random.randint(y+h, im_shape[0]-1) - crop_y\n # crop_img = im[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w, :]\n # 2. original image\n crop_img = im[y:y+h, x:x+w, :]\n crop_w, crop_h = w, h\n\n im_scale_x = float(self._width) / float(crop_w)\n im_scale_y = float(self._height ) / float(crop_h)\n crop_img = cv2.resize(crop_img, None, None, fx=im_scale_x, fy=im_scale_y,\n interpolation=cv2.INTER_LINEAR)\n\n return crop_img", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def DocumentInlineBlipInsert(self, wave_id, wavelet_id, blip_id, position):\n inline_blip_data = self.__CreateNewBlipData(wave_id, wavelet_id)\n inline_blip_data.parent_blip_id = blip_id\n op = Operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id,\n blip_id=blip_id,\n index=position,\n prop=inline_blip_data)\n self.__context.AddOperation(op)\n return inline_blip_data", "def batch_insert_direct(self, data, *args, **kwargs):\n return pycassa.ColumnFamily.batch_insert(self, data, *args, **kwargs)", "def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)", "def cut(self,cell):\r\n self.grid[cell[0]][cell[1]] = 1", "def _insert_internal(self, cols, vals) :\n\n self.row_id += 1\n vals[0] = self.row_id\n\n if None in vals :\n cvs = list(zip(cols, vals))\n cvs = [cv for cv in cvs if cv[1] is not None]\n cs = [cv[0] for cv in cvs]\n vs = [cv[1] for cv in cvs]\n else :\n cs = cols\n vs = vals\n\n value_sql = ','.join([self._quoter(cols[i]) % str(vs[i]) \n for i in range(len(vs))])\n\n col_sql = ','.join(['\"%s\"' % c for c in cs])\n insert_sql = 'INSERT INTO \"%s\" (%s) VALUES (%s)' % (self.name, col_sql, value_sql)\n cur = self.con.cursor()\n cur.execute(insert_sql)", "def save(self, force_insert=False, force_update=False):\n if not self.width and not self.height:\n from django.db import IntegrityError\n raise IntegrityError, \"A dimension must have a width and a height.\"\n else:\n super(Dimension, self).save(force_insert, force_update)", "def addcolumn(self, colname, coldata):\n if len(coldata) != len(self):\n raise ValueError,\"Column length must match catalog length\"\n\n #Most of the bookkeeping is the same as for an empty column\n self.addemptycolumn(colname,coldata.dtype)\n\n #and then we reset the column to contain the actual data\n setattr(self,colname,coldata)", "def insert_item(self, axis, loc, value, how=\"inner\", replace=False):\n assert isinstance(value, type(self))\n\n def mask(idx):\n if len(idx) == len(self.get_axis(axis)):\n return self\n return (\n self.getitem_column_array(idx, numeric=True)\n if axis\n else self.getitem_row_array(idx)\n )\n\n if 0 <= loc < len(self.get_axis(axis)):\n first_mask = mask(list(range(loc)))\n second_mask_loc = loc + 1 if replace else loc\n second_mask = mask(list(range(second_mask_loc, len(self.get_axis(axis)))))\n return first_mask.concat(axis, [value, second_mask], join=how, sort=False)\n else:\n return self.concat(axis, [value], join=how, sort=False)", "def insert(self, name, address, city, state, zipcode, hour, phone, rating, image):\r\n pass", "def save(self, *args, **kwargs):\n if self.pk is None:\n saved_image = self.logo\n self.logo = None\n super().save(*args, **kwargs)\n self.logo = saved_image\n kwargs.pop('force_insert', None)\n super().save(*args, **kwargs)", "def insert(self):\n #vim.command(\"autocmd! CursorMovedI *\")\n try:\n placeholder = self.placeholders.pop()\n pos = self.findPlaceholder(placeholder)\n except IndexError:\n #TODO here I could do a findAllPlaceHolders on the complete file, for\n #reducing errors!\n pos = (0,0,0)\n if pos !=(0,0,0):\n line = self.buffer[pos[0]]\n new_line = line[:pos[1]] + \"\" + line[pos[1]+pos[2]:]\n cursor = (pos[0]+1, pos[1])\n vim.current.window.cursor = cursor\n vim.command(\"startinsert\")\n vim.command(\"redraw\")\n self.buffer[pos[0]] = new_line\n yield\n self.templateMode = False\n return", "def DocumentInlineBlipInsertAfterElement(self):\n raise NotImplementedError()", "def InsertColumn(self, before, text, width=_DEFAULT_COL_WIDTH,\r\n flag=wx.ALIGN_LEFT, image=-1, shown=True, colour=None, \r\n edit=False):\r\n \r\n colInfo = TreeListColumnInfo(text, width, flag, image, shown, colour, \r\n edit)\r\n self.InsertColumnInfo(before, colInfo)", "def crop(self,crop_vector = [None, None, None, None]):\n xmin,xmax,ymin,ymax = crop_vector\n \n xmin = self._obj.x.min() if xmin is None else xmin\n xmax = self._obj.x.max() if xmax is None else xmax\n ymin = self._obj.y.min() if ymin is None else ymin\n ymax = self._obj.y.max() if ymax is None else ymax \n \n self._obj = self._obj.sel(x=slice(xmin, xmax),y=slice(ymin,ymax))\n\n return self._obj", "def insert(self, tname, valdict, cols = None):\n icmd, vals = make_insert_command(tname, valdict, cols)\n self.write_curs.execute(icmd, vals)", "def test(self):\n model = cropped_manual()\n model.image_id = 123\n model.time_stamp = 1547453775.2\n model.cropped_path = '/im/a/totally/real/cropped/path/i/swear.jpg'\n\n truncateTable('cropped_manual')\n dao = CroppedManualDAO(defaultConfigPath())\n\n self.assertEqual(dao.addImage(None), -1)\n\n resultingId = dao.addImage(model)\n self.assertIsNotNone(resultingId)\n self.assertNotEqual(resultingId, -1)", "def insert(self, sample, *args):\n raise NotImplementedError", "def insert(self, column, color):\n c = self.board[column]\n if c[0] != 0:\n raise Exception('Column is full')\n\n i = -1\n while c[i] != 0:\n i -= 1\n c[i] = color\n\n self.checkForWin()", "def placement(self, placement):\n\n self.container['placement'] = placement", "def crop_and_concat(self, upsampled, bypass, crop=False):\n logging.debug(\"Before - Upsampled: {}\".format(upsampled.size()))\n logging.debug(\"Before - bypass: {}\".format(bypass.size()))\n if crop:\n c1 = (bypass.size()[2] - upsampled.size()[2]) // 2\n c2 = (bypass.size()[3] - upsampled.size()[3]) // 2\n bypass = F.pad(bypass, (-c2, -c2, -c1, -c1))\n logging.debug(\"Upsampled: {}\".format(upsampled.size()))\n logging.debug(\"bypass: {}\".format(bypass.size()))\n return torch.cat((upsampled, bypass), 1)" ]
[ "0.61796373", "0.55629903", "0.5431215", "0.5284711", "0.5271825", "0.52523685", "0.5205015", "0.51911217", "0.5175209", "0.5126394", "0.51254916", "0.51038134", "0.50434446", "0.49886337", "0.49873215", "0.49750116", "0.49747944", "0.49618477", "0.49073657", "0.48555687", "0.4799446", "0.4780833", "0.47556216", "0.47403595", "0.4725979", "0.46758163", "0.4668189", "0.46655014", "0.46467578", "0.46054676", "0.46054676", "0.45981684", "0.45814082", "0.45759335", "0.4575729", "0.45407623", "0.45371526", "0.45345545", "0.4534248", "0.45286125", "0.4516637", "0.4507474", "0.4501273", "0.44996533", "0.44956607", "0.44951653", "0.44848722", "0.44758168", "0.44699624", "0.44694495", "0.4467922", "0.44561324", "0.44546217", "0.44533914", "0.44511026", "0.44477898", "0.4438147", "0.4436601", "0.44355646", "0.44149518", "0.44133013", "0.44089648", "0.44089305", "0.44008002", "0.43963224", "0.43939936", "0.43922472", "0.43833354", "0.438321", "0.43790725", "0.43752596", "0.43729267", "0.43728608", "0.4367115", "0.43649912", "0.43649378", "0.4359921", "0.43540856", "0.43402267", "0.43384567", "0.43345013", "0.43320352", "0.4321801", "0.43185976", "0.43144113", "0.43114513", "0.43033287", "0.429755", "0.42928347", "0.4287662", "0.42859995", "0.42857763", "0.42815024", "0.42792794", "0.4278341", "0.42707434", "0.42653576", "0.42651007", "0.42567816", "0.42555138" ]
0.4963357
17
Mark this column with the provided label. Returns number of labelled crops.
def mark_as(self, label: str) -> int: self.label = label return len(self._content) // len(ANNOTATIONS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit(self, label=None):\n self.labels[label] += 1", "def label_index(self, label: Text) -> int:\n count = 0\n for l in self.le.classes_:\n if(l == label):\n return count\n count += 1", "def get_count_by_label(self, label=None):\n if label is None:\n return len(self.data)\n else:\n return sum(1 for d in self.data if d.pred == label)", "def encode_label(self, label: str) -> int:\n return self.class_map[label]", "def label(self):\n return self._label_shape", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_GetCount(self, label)", "def get_label_num(self, *args):\n return _ida_hexrays.ctree_item_t_get_label_num(self, *args)", "def nr_labels(self):\n return None if self.pY is None else self.Y.shape[1]", "def num_labels(self) -> int:\n raise NotImplementedError", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_GetCount(self, label)", "def label(self, location, *args, **kwargs):\n\n if isinstance(location, fslimage.Image):\n return self.maskLabel(location, *args, **kwargs)\n else:\n return self.coordLabel(location, *args, **kwargs)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUC3_GetCount(self, label)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUC2_GetCount(self, label)", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_GetCount(self, label)", "def _select(start, n, label) -> int:\n n_selected = 0\n for i in range(start, int(start + n)):\n x = self._x_positions[i]\n n_selected += self._cols[x].mark_as(label)\n return n_selected", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUC3_GetCount(self, label)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUC3_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2ISS2_GetCount(self, label)", "def labelpos(self):\n return self._labelpos", "def get_label(self, label):\n\n return torch.from_numpy(np.array(label)).long()", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3ISS3_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2ISS2_GetCount(self, label)", "def _alter(self, label):\n altered = np.full(self.n, -1)\n altered[np.where(self.y_train == label)] = +1\n return altered", "def label_counts(rows):\n counts = rows.iloc[:, -1].value_counts()\n return counts", "def label_pos_x_scaled(self):\n return self.label_pos_x * self.photo.aspect_ratio", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def label_extraction(self) -> None:\n self.df[\"label\"] = self.df[\"y\"]", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUS2_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3ISS3_GetCount(self, label)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUS2_GetCount(self, label)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3ISS3_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2ISS2_GetCount(self, label)", "def get_num_labels(self):\n return self.num_labels", "def set_label_shape(label):\n label.set_shape([1])\n return label", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetCount(self, label)", "def get_index(self, label):\n if label in self.labels:\n return self.labels.index(label)\n else:\n self.labels.append(label)\n return self.labels.index(label)", "def recall_for_label(gt, pred, label):\n mask = gt == label\n masked_pred = pred[mask]\n n = float(masked_pred.numel())\n tp = torch.sum(masked_pred == 1)\n return tp / n", "def count_labels(self, add_no_ne_label=False):\n return sum([count[1] for count in self.get_label_counts(add_no_ne_label=add_no_ne_label)])", "def positionInPile(self):\n \n return self._pile.cardLabelIndex(self)", "def _draw_label(label, label_x, label_y):\n pass", "def rename_labels_by_count(labels):\n new_labels, label_counts = _count_labels(labels)\n\n return new_labels", "def get_mask_by_label(data, label):\n mask = np.copy(data)\n mask[mask != label] = -1\n mask[mask == label] = 0\n mask += 1\n return mask", "def _get_img_label(self, path):\n food_items = self.annotations[path]\n tomato_items = [\n item for item in food_items\n if item['id'] in self.tomato_label_ids\n ]\n return 1 if len(tomato_items) > 0 else 0", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUS3_GetCount(self, label)", "def _label(self, column):\n # XXX\n return column", "def count_labels(labels_path):\n counts = np.zeros(4)\n with open(labels_path, 'r') as f:\n for line in f:\n line = int(line.split()[1]) - 1\n counts[line] += 1\n\n return counts", "def encode_label(label: str) -> int:\n\tif not label:\n\t\treturn 0\n\t# part after letter if it has a number, otherwise 1\n\tindex = int(label[1:]) if len(label) > 1 else 1\n\t# A = 1, B = 2, ... E = 5\n\toffset = ord(label[0]) - ord(\"A\") + 1\n\t# compute label number\n\treturn (index - 1) * 5 + offset", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUS2_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_GetCount(self, label)", "def get_proba_by_label(self, label=None):\n if self.get_count_by_label(label) == 0:\n if label == 0:\n # REMEMBER: this is a display only, not a math model, in display we sub neg from 1, so return 1 to get zero\n return 1\n else:\n return 0\n elif len(self.data) - self.get_count_by_label(-1) == 0:\n # they're all unpredictable\n return 0\n elif label is None:\n # weird case, change neg's to 1-proba, which is different than rest of display\n pos_proba = sum(d.proba for d in self.data if d.pred == 1)\n neg_proba = sum(1 - d.proba for d in self.data if d.pred == 0)\n return (pos_proba + neg_proba) / (len(self.data) - self.get_count_by_label(-1))\n else:\n return sum(d.proba for d in self.data if d.pred == label) / self.get_count_by_label(label)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetCount(self, label)", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def HasLabel(self, label: 'unsigned char') -> \"bool\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_HasLabel(self, label)", "def nr_labels(self):\n return self.model.nr_labels", "def mask_label_into_class_label(self, mask_labels, img_resolution = 256, bigger_than_percent=3.0):\n array_of_number_of_change_pixels = []\n\n for mask in mask_labels:\n number_of_ones = np.count_nonzero(mask.flatten()) # << loading takes care of this 0 vs non-zero\n array_of_number_of_change_pixels.append(number_of_ones)\n\n self.debugger.save_arr(array_of_number_of_change_pixels, \"BALANCING\")\n array_of_number_of_change_pixels = self.debugger.load_arr(\"BALANCING\")\n\n array_of_number_of_change_pixels = array_of_number_of_change_pixels / (\n img_resolution * img_resolution) * 100.0 # percentage of image changed\n\n class_labels = []\n for value in array_of_number_of_change_pixels:\n is_change = value > bigger_than_percent\n class_labels.append(int(is_change))\n\n return np.array(class_labels)", "def HasLabel(self, label: 'unsigned char') -> \"bool\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUC2_HasLabel(self, label)", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUS2_GetCount(self, label)", "def label_index(self):\n return self._label_index", "def label_index(self):\n return self._label_index", "def set_mark( self, mark, index ):\n\n try:\n int(self.__grid[index-1])\n\n if mark.lower() == 'x' or mark.lower() == 'o': \n self.__grid[index-1] = mark\n\n return 1\n\n except ValueError:\n return 0", "def HasLabel(self, label: 'unsigned char') -> \"bool\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_HasLabel(self, label)", "def relabel_labelmask(labelmask, preserve_order=True):\n mask = np.copy(labelmask)\n # Get all object labels and their counts.\n labels, counts = np.unique(mask, return_counts=True)\n # Get the indexes of sorted counts, descending.\n ordered_indexes = np.argsort(counts)[::-1]\n # Set largest object as background (ID=0).\n background_label = labels[ordered_indexes[0]]\n mask[mask == background_label] = 0\n # Renumber the rest of the objects 1..n.\n obj_num=1\n if (preserve_order):\n oldlabels = labels\n else:\n oldlabels = labels[ordered_indexes]\n for old_label in oldlabels:\n if (old_label != background_label):\n mask[labelmask == old_label] = obj_num\n obj_num = obj_num + 1\n return mask", "def lookup_class_idx(self,label):\r\n \r\n return self.class_labels[label]", "def encoding_labelcount(df, target=None):\n if not target:\n target = ['user_id', 'title']\n\n norm = round(\n df.shape[0] / 10000) # normalize the count by /per 100000 entries\n for col in target:\n df[col + '_labelcount'] = df[col].map(df[col].value_counts()) / norm\n df.drop([col], axis=1, inplace=True)\n return None", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUS3_GetCount(self, label)", "def labelingLVQ(self):\n numLabels = len(np.unique(self.y))\n for i, x in enumerate(self.x):\n w = self.find_closest(x)[0]\n for nl in range(numLabels):\n if self.y[i] == nl:\n self.labels[nl, w[0], w[1]] += 1\n return self.labels", "def labelIndex(self, label):\n for idx, taskDef in enumerate(self):\n if taskDef.label == label:\n return idx\n return -1", "def colnum(self):\n \n colnum = 0\n for table in self.columnlabels:\n table = np.asarray(table)\n if np.ndim(table) <= 1:\n table = np.reshape(table, (1, -1))\n colnum += table.shape[1]\n return colnum", "def create_label(self, loaded_img, loaded_label):\n _, label = cv2.threshold(loaded_label, 120, 255, cv2.THRESH_BINARY)\n kernel = np.ones((5, 5), np.uint8)\n label = cv2.dilate(label, kernel, iterations=1)\n _, contours, _ = cv2.findContours(label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if contours:\n areas = [cv2.contourArea(cnt) for cnt in contours]\n x, y, w, h = cv2.boundingRect(contours[np.argmax(areas)])\n label = label[y:y + h, x:x + w]\n return loaded_img.astype(np.float32) / 255, cv2.resize(label, (self.label_w, self.label_h)).astype(np.float32) / 255\n else:\n return loaded_img.astype(np.float32) / 255, np.zeros([self.label_h, self.label_w], dtype=np.float32)", "def add_label_to_unique_species_labels(self, label: str) -> str:\n unique_label, i = label, 0\n while unique_label in self.unique_species_labels:\n unique_label = f'{label}_{i}'\n i += 1\n self.unique_species_labels.append(unique_label)\n return unique_label", "def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_GetNumberOfLabels(self)", "def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_GetNumberOfLabels(self)", "def HasLabel(self, label: 'unsigned char') -> \"bool\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUC3_HasLabel(self, label)", "def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2ISS2_GetNumberOfLabels(self)", "def autolabel(rects):", "def HasLabel(self, label: 'unsigned char') -> \"bool\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUC3_HasLabel(self, label)", "def HasLabel(self, label: 'unsigned char') -> \"bool\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS2IUC2_HasLabel(self, label)", "def create_labels_base(df, col_name, window_size=11):\n\n #self.log(\"creating label with original paper strategy\")\n row_counter = 0\n total_rows = len(df)\n labels = np.zeros(total_rows)\n labels[:] = np.nan\n print(\"Calculating labels\")\n\n while row_counter < total_rows:\n if row_counter >= window_size - 1:\n window_begin = row_counter - (window_size - 1)\n window_end = row_counter\n window_middle = int((window_begin + window_end) / 2)\n\n min_ = np.inf\n min_index = -1\n max_ = -np.inf\n max_index = -1\n for i in range(window_begin, window_end + 1):\n price = df.iloc[i][df.columns.get_loc(col_name)]\n if price < min_:\n min_ = price\n min_index = i\n if price > max_:\n max_ = price\n max_index = i\n\n if max_index == window_middle:\n labels[window_middle] = 0\n elif min_index == window_middle:\n labels[window_middle] = 1\n else:\n labels[window_middle] = 2\n\n row_counter = row_counter + 1\n\n return labels", "def relabel_particles(df, col1='raw_data', col2='particle'):\n\n\tdf.sort_values(by=[col1, col2])\n\tfile_names = df[col1].unique()\n\ti = 0\n\n\tind = 1\n\ttot = len(file_names)\n\tfor file_name in file_names:\n\t\tprint(\"Relabeling (%d/%d): %s\" % (ind, tot, file_name))\n\t\tind = ind + 1\n\n\t\tsub_df = df.loc[df[col1] == file_name]\n\t\tparticles = sub_df[col2].unique()\n\n\t\tfor particle in particles:\n\t\t\tdf.loc[(df[col1] == file_name) & \\\n\t\t\t(df[col2] == particle), 'tmp'] = i\n\t\t\ti+=1\n\n\tdf['tmp'] = df['tmp'].astype('int')\n\tdf[col2] = df['tmp']; del df['tmp']\n\n\treturn df", "def _next_unlabelled_col(x):\n for i in range(self.n_cols):\n idx = (x + i) % self.n_cols\n x_current = self._x_positions[idx]\n if self._cols[x_current].label is None:\n return idx", "def write_label(self, label):\n self._write_line('label ' + label) # TODO generate unique labels?", "def num_labels(self):\n return len(self.get_labels())", "def num_labels(self):\n return len(self.get_labels())", "def HasLabel(self, label: 'unsigned char') -> \"bool\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUC3_HasLabel(self, label)", "def setLabelColumn(self, v):\n return self._set(labelColumn=v)", "def label_thin(self, orig_label):\n pil_thin = thin(orig_label)\n # Keep the original label and set non-thinning label as 0\n orig_label[~pil_thin] = 0\n\n return orig_label", "def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_GetNumberOfLabels(self)", "def count_labels(labels, num_classes):\n return np.array([\n np.bincount(segment_labels, minlength=num_classes) for _, segment_labels in labels\n ])", "def GetCount(self, label: 'unsigned short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetCount(self, label)", "def pixwidth(self):\n return self._labelWidth * self.transform.scale[0]", "def ball_num(self):\n counter = 0\n for i in range(0, 100):\n if self.cells[i].is_ball:\n counter += 1\n return int(counter)", "def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups", "def GetNumberOfLabels(self) -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2ISS2_GetNumberOfLabels(self)", "def get_class_index(self, label):\n assert label in CLASSES\n return CLASSES.index(label)" ]
[ "0.6160278", "0.6008114", "0.5806169", "0.54976237", "0.5418939", "0.5353223", "0.5351463", "0.5350682", "0.52923065", "0.5270436", "0.52658045", "0.52610487", "0.52374464", "0.5227169", "0.5211575", "0.5198103", "0.51884943", "0.5185271", "0.51499337", "0.5137268", "0.51093817", "0.5106107", "0.50990295", "0.50824285", "0.50658983", "0.50600773", "0.5056616", "0.50445646", "0.50445646", "0.50445646", "0.50445646", "0.5033529", "0.50222903", "0.50202173", "0.5017895", "0.50098825", "0.5007166", "0.49970192", "0.4978274", "0.49773204", "0.49695504", "0.49679917", "0.4955755", "0.4955275", "0.49520043", "0.4947303", "0.49472904", "0.49368736", "0.4934296", "0.49282813", "0.49237967", "0.4922517", "0.49167615", "0.49145034", "0.490606", "0.48991212", "0.4883825", "0.4878768", "0.4865999", "0.4851549", "0.48465797", "0.48422846", "0.48346037", "0.4832075", "0.4832075", "0.4825552", "0.4822315", "0.48187065", "0.48118228", "0.48075598", "0.4802788", "0.47960752", "0.4782403", "0.47807822", "0.47790334", "0.476623", "0.47569513", "0.47520646", "0.47304195", "0.47246313", "0.47198984", "0.47196054", "0.4716514", "0.4712343", "0.4704402", "0.4703957", "0.47026098", "0.47012234", "0.47012234", "0.47002444", "0.46933478", "0.4688987", "0.46885964", "0.46850362", "0.46847236", "0.46812642", "0.46776733", "0.466928", "0.46654913", "0.46645752" ]
0.6514316
0
Move all files of this column to the corresponding directory, if this column is not labeled to be ignored. Returns number of files moved.
def move(self, dry_run: bool) -> int: if self.label == 'ignore': return 0 file_counter = 0 for crop in self._content: if not dry_run: crop.move_to(self.label) file_counter += 1 return file_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_disk(self, dry_run: bool) -> int:\n file_counter = 0\n for k, col in self._cols.items():\n self._moved_cols.append(k)\n file_counter += col.move(dry_run=dry_run)\n return file_counter", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r", "def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0", "def move_file(self, ctx):\n pass", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()", "def move_files(self, files: List[str], directory=\"\"):\n result = []\n for file in files:\n if directory == \"\":\n temp_file = File(file)\n new_directory = self._create_or_define(temp_file)\n origin_folder = \"\"\n else:\n new_directory = directory\n origin_folder = os.path.basename(os.path.dirname(file))\n temp_file = File(os.path.basename(file))\n\n if not file.startswith(new_directory):\n if temp_file.get_extension():\n temp_extension = \".\" + temp_file.get_extension()\n else:\n temp_extension = \"\"\n\n ordinal_number = self.check_same_objects(new_directory, temp_file)\n target_name = temp_file.get_just_name() + temp_extension\n if ordinal_number:\n formatted_ordinal_number = f\" ({ordinal_number - 1})\"\n target_name = (\n temp_file.get_just_name()\n + formatted_ordinal_number\n + temp_extension\n )\n\n if self.underscore_flag:\n target_name = target_name.replace(\" \", \"_\")\n\n new_position = os.path.join(self.directory, new_directory, target_name)\n\n file_position = os.path.join(\n self.directory, origin_folder, str(temp_file)\n )\n if file_position != os.path.join(\n self.directory,\n new_directory,\n temp_file.get_just_name() + temp_extension,\n ):\n result.append(os.path.join(origin_folder, str(temp_file)))\n self.possibilities[new_directory].files.append(temp_file)\n if not self.dry_run:\n os.rename(file_position, new_position)\n else:\n print(f\"{file_position} would be moved to {new_position}\")\n elif self.dry_run:\n print(\n f\"{file_position} won't be move since the location is the same\"\n )\n\n self.log_result(result, directory)", "def movedir(self):\n pass", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def move_from_temp_directory(self):", "def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)", "def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):\n for file_no in range(len(selected_image_list)):\n shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])\n return", "def move_backups(self, name, source, destination, regex):\n files = os.listdir(source)\n pattern = re.compile(regex)\n for entry in files:\n match = pattern.match(entry)\n if match is None:\n continue\n if name == match.group(1):\n self.logger.debug('Archiving %s', entry)\n path = os.path.join(source, entry)\n result = self.os_rename(path, os.path.join(destination, entry))\n if result != 0:\n return result\n return 0", "def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def sort_folder():\n for file in downloads_path.iterdir():\n if file.is_file():\n extension = file.suffix\n file = str(file)\n if extension in program_types:\n move_file(file, programs_path)\n elif extension in compressed_types:\n move_file(file, compressed_path)\n elif extension in doc_types:\n move_file(file, documents_path)\n elif extension in music_types:\n move_file(file, music_path)\n elif extension in video_types:\n move_file(file, video_path)\n elif extension in picture_types:\n move_file(file, pictures_path)\n else:\n move_file(file, other_path)", "def move(self, # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n narg=None, **kw):\n cwd = self.thisdir\n kw.setdefault('cycle', self.fm.settings['wrap_scroll'])\n direction = Direction(kw)\n if 'left' in direction or direction.left() > 0:\n steps = direction.left()\n if narg is not None:\n steps *= narg\n directory = os.path.join(*(['..'] * steps))\n self.thistab.enter_dir(directory)\n self.change_mode('normal')\n\n if not cwd or not cwd.accessible or not cwd.content_loaded:\n return\n\n if 'right' in direction:\n mode = 0\n if narg is not None:\n mode = narg\n tfile = self.thisfile\n selection = self.thistab.get_selection()\n if not self.thistab.enter_dir(tfile) and selection:\n result = self.execute_file(selection, mode=mode)\n if result in (False, ASK_COMMAND):\n self.open_console('open_with ')\n elif direction.vertical() and cwd.files:\n pos_new = direction.move(\n direction=direction.down(),\n override=narg,\n maximum=len(cwd),\n current=cwd.pointer,\n pagesize=self.ui.browser.hei)\n cwd.move(to=pos_new)\n if self.mode == 'visual':\n pos_start = min(self._visual_pos_start, (len(cwd.files) - 1))\n self._visual_move_cycles += direction.move_cycles()\n\n # Haven't cycled\n if self._visual_move_cycles == 0:\n targets = set(cwd.files[min(pos_start, pos_new):(max(pos_start, pos_new) + 1)])\n # Cycled down once\n elif self._visual_move_cycles == 1:\n if pos_new < pos_start:\n targets = set(cwd.files[:(pos_new + 1)] + cwd.files[pos_start:])\n else:\n targets = set(cwd.files)\n # Cycled up once\n elif self._visual_move_cycles == -1:\n if pos_new > pos_start:\n targets = set(cwd.files[:(pos_start + 1)] + cwd.files[pos_new:])\n else:\n targets = set(cwd.files)\n # Cycled more than once\n else:\n targets = set(cwd.files)\n\n # The current selection\n current = set(cwd.marked_items)\n # Set theory anyone?\n if self._visual_reverse:\n for fobj in targets & current:\n cwd.mark_item(fobj, False)\n for fobj in self._previous_selection - current - targets:\n cwd.mark_item(fobj, True)\n else:\n for fobj in targets - current:\n cwd.mark_item(fobj, True)\n for fobj in current - self._previous_selection - targets:\n cwd.mark_item(fobj, False)\n if self.ui.pager.visible:\n self.display_file()", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)", "def convert_dir(self) -> int:\n old_classifiers: List[Classifier] = self.get_entities_by_entity_type(\n self.pack.classifiers, FileType.OLD_CLASSIFIER\n )\n intersection_fields = self.get_classifiers_schema_intersection_fields()\n for old_classifier in old_classifiers:\n self.create_classifier_from_old_classifier(\n old_classifier, intersection_fields\n )\n self.create_mapper_from_old_classifier(old_classifier)\n\n return 0", "def moveFiles(self, fids, pid):\n\n f = self.getFileInfo(fids[0])\n if not f or f.package == pid:\n return False\n if not self.getPackageInfo(pid):\n raise PackageDoesNotExists(pid)\n\n # TODO move real files\n\n self.db.moveFiles(f.package, fids, pid)\n\n return True", "def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def moveFiles(outputDir, files):\n\tfor fn in files:\n\t\tshutil.move(fn, join(outputDir, getFilenameWithoutPath(fn)))", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def on_moved(self, event):\n\n # build the relative source and destination paths\n source_path = event.src_path.replace(self.root_path, \".\")\n destination_path = event.dest_path.replace(self.root_path, '.')\n is_directory = event.is_directory\n\n # propagate the moved event if server connection is established\n if self.protocol.connected:\n self.protocol.send_move_event(is_directory, source_path, destination_path)\n else:\n logging.info(\"Connection with server has not been established, changes will not be propagated.\")", "def putDir(self, inlocaldir, inirodsdir):\n num=0\n utilities.log.info('putDir: Local tree {} into iRODS tree {}'.format(inlocaldir, inirodsdir))\n for root, dirnames, filenames in os.walk(inlocaldir):\n irodsdir = self.assembleIRODScollectionName(root, inlocaldir, inirodsdir)\n irodsColl = self.createSubCollection(newcollection=irodsdir)\n num += self.putFile(root, irodsColl, filenames)\n utilities.log.info('Copied a total of {} files to iRODS'.format(num))\n utilities.log.info('Finished copying dir {} to {} '.format(inlocaldir,inirodsdir))\n return num", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def move_files(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n data_files = project_obj.files.all()\n\n for data in data_files:\n working_dir = get_sequencedir(project_obj)\n create_dir(working_dir)\n path = data.file.name.split('/')[-1]\n end_path = os.path.join(working_dir, path)\n\n if file_exists(end_path):\n print(\"File: \", end_path, \" already found. No need to copy.\")\n else:\n try:\n print(\"Copying from %s to %s\" % (data.file.name, end_path))\n shutil.copyfile(data.file.name, end_path)\n # if somehow the user deleted the database files, they are told to restart the database\n except FileNotFoundError:\n print(\"Protected database files have been deleted by the user. Restart the database to continue.\")", "def _renameDir(self) -> None:\n try:\n path = self._currPath.joinpath(self._editItemNameBefore)\n nameAfter = self._editItem.text()\n pathTo = self._currPath.joinpath(nameAfter)\n path.rename(pathTo)\n self._listDirectories()\n renamedItem = self._model.findItems(nameAfter)\n index = self._model.indexFromItem(renamedItem[0])\n self._mainFileView.scrollTo(index)\n self._mainFileView.setCurrentIndex(index)\n except FileExistsError:\n self._statusBar.showMessage('File/folder with that name already exists!', 3000)\n self._listDirectories()", "def test_move_nulltgzfile(self):\n dbf = self.createDummyDBF('emptytar.tgz')\n\n real_ans = (os.path.join(self.td, 'emptytar.tgz'),\n os.path.join(self.td, 'L1', 'emptytar.tgz'))\n self.assertFalse(os.path.isdir(os.path.join(self.td, 'L1')))\n # Method return may not be helpful but this is it for now\n self.assertEqual(real_ans, dbf.move())\n self.assertTrue(os.path.isdir(os.path.join(self.td, 'L1')))", "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def moveFiles(rootDir):\n\n homedir = os.environ['HOME']\n albumDirec = 'AlbumCoverImages'\n #Check if a directory exists\n if not os.path.isdir(os.path.join(homedir, 'Pictures', albumDirec)):\n print('AlbumCoverImages not found, trying to make...')\n os.makedirs(os.path.join(homedir, 'Pictures', albumDirec))\n \n for root, dirs, files in os.walk(rootDir, topdown=False):\n #print('testtest')\n for name in files:\n \n\n #Find image files, and move them to albumCoverImages\n #For some bullshit reason or statments won't work here, have to\n # parse this out to elif statements, ughhhh...\n \n if '.jpg' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec)))\n \n elif '.png' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.gif' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.pdf' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n\n else:\n try:\n #Use tinytag to get file metadata\n tag = TinyTag.get(os.path.join(root, name))\n artistName = tag.artist\n albumName = tag.album\n \n #TODO: Need to add more conditions\n if isinstance(artistName, str):\n artistName = artistName.replace('/', '_')\n\n elif isinstance(albumName, str):\n albumName.replace('/', '_')\n \n\n #Check if the artists directory exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName)):\n os.makedirs(os.path.join(rootDir, artistName))\n print('{0} directory made!'.format(artistName))\n \n except ValueError:\n print('ValueError with {0}'.format(root+'/'+name))\n continue\n\n except TypeError:\n print('TypeError with {0}'.format(root+'/'+name))\n continue\n\n #Check if the songs album exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName, albumName)):\n os.makedirs(os.path.join(rootDir, artistName, albumName))\n print('{0} directory made!'.format(albumName))\n \n except TypeError:\n print('TypeError with {0}! Look at album directory making.'.format(root+'/'+name))\n continue\n\n #TODO: Check if album is in artist direc, if not, move it\n\n #Check if song is in album, if not move it \n try:\n if os.path.isfile(os.path.join(rootDir, artistName, albumName, name)) == False:\n os.rename(os.path.join(root, name), os.path.join(rootDir, artistName, albumName, name))\n print('{0} moved to {1}!'.format(name, albumName))\n \n except TypeError:\n print('TypeError with file {0}! Look at line song moving'.format(root+'/'+name))\n continue\n \n #TODO: Check if this part works\n except LookupError:\n if (\".jpg\") or (\".png\") or (\".7z\") or (\"README\") or (\".zip\") in name:\n continue\n \n else:\n print('No reader support for {0}'.format(name))\n continue", "def main():\n os.chdir(\"FilesToSort\")\n files = os.listdir('.')\n for file in files:\n extension_directory = file[file.find('.') + 1:]\n try:\n os.mkdir(extension_directory)\n except FileExistsError:\n pass\n shutil.move(file, extension_directory)", "def test_6e_move_data_btw_folders(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists) or (not GST.dir2_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dirs\")\n elif not GST.moving_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare moving data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"move_file\"] % (GST.gs_file_paths[\"file_to_move_to_folder_source_path\"], GST.gs_file_paths[\"move_to_folder_target_path\"])\n try:\n self.send_request(function, \"move_file()\")\n except Exception as e:\n raise MoveException(\"Failed to move the data between folders. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise MoveException(\"Failed to move the data between folders. \\n\" + response)", "def move_files(self, download_path):\n if self.file_list is None:\n self._set_file_list()\n\n for individual_file in self.file_list:\n source_path = os.path.join(self.base_dir, individual_file)\n dest_path = os.path.join(download_path, individual_file)\n # We don't move files that don't exist\n if not os.path.exists(source_path):\n continue\n\n # Make sure the destination directory exists\n if not os.path.exists(os.path.dirname(dest_path)):\n os.makedirs(os.path.dirname(dest_path))\n if self.to_copy:\n shutil.copy(source_path, dest_path)\n else:\n os.rename(source_path, dest_path)\n return", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def batch_mover(pattern, directory=None):\n if directory is None:\n directory = Path().cwd()\n\n for i in os.scandir(directory):\n if file_check(pattern, i.name):\n pass\n # shutil.move(i.name, yeah we gotta change a lot here", "def mv(self, src: int, dest: int) -> bool:\n url = 'https://webapi.115.com/files/move'\n result = self.s.post(url, data={'pid': dest, 'fid[0]': src}, headers={'Origin': origin['webapi'], 'Referer': referer['115'].format(self.default_dir)}).json()\n if result['errno'] == '':\n _ = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs) # TODO: need to test\n self._dirs_lookup[src] = self._dirs_lookup[dest].append(dest)\n parent = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs)\n if src not in parent:\n parent.update({src: _})\n else:\n parent.get(src).update(_)\n return True", "def moveBigFiles(self):\n if not self.bigFilesArea:\n self.logger.info('Moving of big files to a separate volume has not been requested.')\n return\n\n self.logger.info('Moving of big files to a separate volume is requested. Scanning...')\n \n if not os.path.exists(self.bigFilesArea):\n m = 'Cannot shift big files onto inexistent volume: %s' % self.bigFilesArea\n self.logger.error(m)\n return\n \n bigFiles = self.getBigFiles()\n\n if not [val for val in bigFiles.values() if val]:\n self.logger.info('No big files were found, returning.')\n return\n \n placeToDump = createBigFileIO(self.site, self.bigFilesArea, self.workDirs, self.isNightly).getJobDumpLocation(self)\n if not placeToDump:\n m = 'Unable to retrieve location of big files volume. Not moving big files.'\n self.logger.warning(m)\n return\n\n # We have files to move, let's move them\n for bigFileBaseDir, bigFiles in bigFiles.items():\n for bigFile in bigFiles:\n src = bigFile # file\n dst = placeToDump # directory\n self.moveBigFile(src, dst)\n # If big file origin is results path, replace with a soft link\n # to separate big file volume.\n if bigFileBaseDir == self.resPath:\n self.makeReplacementKeepFile(bigFile, placeToDump)", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def movenotfb2(self, input_folder_path, trash_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving not fb2 files')\n if os.listdir(input_folder_path):\n if any([x[-4:] != '.fb2' for x in os.listdir(input_folder_path)]):\n for file_name in os.listdir(input_folder_path):\n if file_name[-4:] != '.fb2':\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(trash_folder_path, file_name))\n logg.writing_log(conn, 'All files with incorrect format are moved to trash folder')\n else:\n logg.writing_log(conn, 'All files in the input folder are correct')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()", "def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)", "def move_files(probs):\r\n path = '../brain_tiny_dataset_class/png/'\r\n for _, _, files in os.walk(path):\r\n for file in files:\r\n # Reads the ID\r\n id = file[3:-4]\r\n try:\r\n # Reads dictionary of probabilities\r\n result = probs[id]\r\n # Moves pictures in 2 folders\r\n if result['epidural'] > 0 or result['intraparenchymal'] > 0 \\\r\n or result['intraventricular'] > 0 or result['subarachnoid'] > 0 \\\r\n or result['subdural'] > 0:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/hemorrhage/' + file)\r\n else:\r\n shutil.move(path + file, '../brain_tiny_dataset_class/healthy/' + file)\r\n except KeyError:\r\n continue", "def rename_files(dir): #Copy files to rootdir, and rename (according to extension)\n\n #total file count (for WSJ should be 500)\n total = 0\n\n #COPY and RENAME\n print \"\\n==========================\\nCopying files from {} to {}...\\n\".format(originalDir, rootdir)\n\n for subdir, dirs, files in os.walk(dir):\n for file in files:\n if ((file != \".DS_Store\") and (file != \"CVS\") and (file[-4:] == (\".ref\"))):\n total += 1\n origName = file #saving original name for printing\n\n shutil.copy(subdir + \"/\" + file, rootdir + str(total) + extension)\n\n //os.rename(rootdir + file, rootdir + str(total) + extension)\n\n print \"\\tCopied file: {} to \\n\\t\\t {}\".format(file, rootdir)\n print \"\\t\\t\\t and renamed it to: {} \".format((str(total) + extension))\n\n print \"\\n============ Total files copied and renamed: %d ==============\\n\" % total", "def move_next_image(self):\r\n self.index += 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n \r\n # if 'OCT_V2' in sorting_string:\r\n # cat_string = 'Unlabelled'\r\n # else:\r\n # cat_string = \r\n \r\n for label in labels:\r\n if label not in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index])\r\n else:\r\n self.master.quit()", "def move_files(file: str, destination: str):\n\n try:\n result = _process_files(\"mv\", \"-v\", file, destination)\n except FileNotFoundError:\n print(\"ERROR: '{}' does not exist.\".format(file))\n except FolderNotFoundError:\n print(\n \"ERROR: '{}' destination does not exist.\".format(destination)\n )\n except InsufficientRightsError:\n print(\"ERROR: Insufficient rights to destination '{}'.\".format(\n destination)\n )\n else:\n print(result)", "def test_move_badtgzfile(self):\n dbf = self.createDummyDBF('badtar.tgz')\n\n real_ans = (os.path.join(self.td, 'badtar.tgz'),\n os.path.join(self.td, 'L1', 'badtar.tgz'))\n self.assertFalse(os.path.isdir(os.path.join(self.td, 'L1')))\n # Method return may not be helpful but this is it for now\n self.assertEqual(real_ans, dbf.move())\n self.assertTrue(os.path.isdir(os.path.join(self.td, 'L1')))", "def organize_by_order(current_path):\n\tfor file in sorted(os.listdir(current_path)):\n\t\tif file != 'file_organizer.py':\n\t\t\ttry:\n\t\t\t\tos.makedirs(file[0])\n\t\t\t\tclick.echo(\"Creating a Folder\",file[0])\n\t\t\texcept:\n\t\t\t\tNone\n\t\t\tshutil.move(file,file[0])\n\t\t\tclick.secho(('Finished moving : {} to {} folder'.format(file,file[0])),fg='green')", "def process_IN_ISDIR(self, event):", "def _rename_ondisk(self):\n if not self.has_moved or not self.renames_remaining:\n return\n\n try:\n os.rename(self.rename_phase_src, self.rename_phase_dst)\n except Exception:\n sys.stderr.write(\"Failed to renamed '%s' to '%s'\\n\" %\n (self.rename_phase_src,\n self.rename_phase_dst))\n raise\n\n self._rename_phase += 1", "def move_delete(dir_path, filename):\n # Get path, name from filename\n path, name = os.path.split(filename)\n # Normalize with destination considerations\n nf = os.path.join(dir_path, increment_file_number(dir_path, name))\n move_file(filename, nf)", "def move_files_with_extension(self, extension: str):\n\n while True:\n files_with_extension = self.collect_files_with_extensions(extension)\n print(files_with_extension)\n folders_containing = set(\n [\n os.path.basename(os.path.dirname(file))\n for file in files_with_extension\n ]\n )\n directory = input(\n f\"Files with '{extension}' extension are scattered in your folders:\\n\"\n f\" {', '.join(folders_containing)}\\n\"\n f\"Where do you want to put them?\\n\"\n f\"({', '.join(self.possibilities.keys())})\\n\"\n )\n if directory in self.possibilities:\n self.move_files(files_with_extension, directory)\n break\n else:\n print(\"Invalid Input\")", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def act_move_file(self, file_source, file_target):\n try:\n if not os.path.isfile(file_source):\n return\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.move(file_source, file_target)\n #shutil.copy2(file_source, file_target)\n #os.remove(file_source)\n self.logger.debug('%s: Action: <move> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file move: %s -> %s', file_source, file_target)", "def cleaning_this_directory():\n import os, shutil\n files = os.listdir(\".\")\n for f in files:\n if os.path.isfile(f):\n extension = f.split(\".\")[-1]\n if extension == 'jpg':\n #move the file\n os.rename(f, \"images/\"+f)\n elif extension == 'JPG':\n #move to xml file\n os.rename(f, 'xml/'+f)\n else:\n pass", "def move_files(self, file_dict: Dict[str, List[str]]) -> NoReturn:\n\n for folder in file_dict:\n target_folder = os.path.join(self.out_folder, folder)\n mkdirr(target_folder)\n for file_path in file_dict[folder]:\n annotation_file_name = (\n os.path.basename(file_path)\n .replace(\"png\", \"json\")\n .replace(\"jpg\", \"json\")\n )\n annotation_file_path = os.path.join(\n self.annotation_folder, annotation_file_name\n )\n\n copy_file(file_path, os.path.join(target_folder, DATA_FOLDER))\n copy_file(\n annotation_file_path, os.path.join(target_folder, ANNOTATION_FOLDER)\n )", "def add_to_split_numbered(rec_dir, target, label):\n for file_name in os.listdir(rec_dir):\n path = os.path.join(rec_dir, file_name)\n if (os.path.isfile(path)):\n count = 0\n if os.path.isdir(os.path.join(target, str(label))):\n count = len([f for f in os.listdir(os.path.join(target, str(label)))])\n else:\n os.makedirs(os.path.join(target, str(label)))\n shutil.copy(path, os.path.join(target, str(label), str(count)))", "def move_to(self, file_name, to_dir, change_name_to=None):\n raise NotImplementedError", "def move(self, newPath):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.move(newPath)\n\t\telse:\n\t\t\tsuper( textureFile, self ).move( newPath )", "def move_prev_image(self):\r\n self.index -= 1\r\n progress_string = \"%d/%d\" % (self.index+1, self.n_paths)\r\n self.progress_label.configure(text=progress_string)\r\n \r\n #sorting_string = df.sorted_in_folder[self.index].split(os.sep)[-2] #shows the last folder in the filepath before the file\r\n sorting_string = self.df.sorted_in_folder[self.index].split(\"/\")[-2]\r\n self.sorting_label.configure(text=(\"In folder: %s\" % (sorting_string)))\r\n\r\n #Add Current Label\r\n if 'OCT_V2' in sorting_string:\r\n cat_string = 'Unlabelled'\r\n else:\r\n cat_string = sorting_string\r\n \r\n self.cat_label.configure(text = ('Current Category : %s' %(cat_string)))\r\n \r\n display_name = \"Name = %s\" % (self.file_names[self.index])\r\n self.name_label.configure(text = display_name)\r\n \r\n if self.index < self.n_paths:\r\n self.set_image(self.df.sorted_in_folder[self.index]) # change path to be out of df\r\n else:\r\n self.master.quit()", "def walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n for filename in filenames:\n shutil.move(os.path.join(directory_name, filename),\n os.path.join(directory_name) + '/' + get_fixed_filename(filename))", "def move_tbimported_in_finished(self, data):\r\n conf = self.func.config_info()\r\n folder_name = self.bid_folder_name() \r\n\r\n if \"ProofreaderStatus\" in list(data.keys()):\r\n if data[\"ProofreaderStatus\"] == \"FINISHED\":\r\n files = os.listdir(conf[\"path_to_batches_tbimported\"])\r\n if folder_name in files:\r\n src = os.path.join(conf[\"path_to_batches_tbimported\"], folder_name)\r\n dst = os.path.join(conf[\"path_to_batches_finished\"], folder_name)\r\n self.func.move_folder(src, dst)\r\n\r\n if not self.func.folder_exists(dst):\r\n raise Exception(\"Folder {} not moved in '6 FINISHED'!\".format(folder_name))\r\n else:\r\n raise Exception(\"Folder {} not found in '5 TO BE IMPORTED'!\".format(folder_name))", "def remove_current_logs_and_mv_comp_files(to_move_files, files_to_be_moved):\n [os.remove(f\"{file_name}\") for file_name in to_move_files]\n [shutil.move(os.path.join(LOGS_PATH, file_name), DESTINATION) for file_name in files_to_be_moved]", "def perform_action(self):\n errors = ErrorList()\n dest = self.cleaned_data['destination_folder']\n for item in self.cleaned_data['items']:\n path = os.path.join(self.file_dir, item)\n try:\n utility.move_items([path], self.dest_dir)\n except FileExistsError:\n errors.append(format_html(\n 'Item named <i>{}</i> already exists in <i>{}</i>',\n item, dest))\n except OSError:\n if not os.path.exists(path):\n errors.append(format_html(\n 'Item named <i>{}</i> does not exist', item))\n else:\n errors.append(format_html(\n 'Unable to move <i>{}</i> into <i>{}</i>', item, dest))\n return 'Your items have been moved', errors", "def unnest_directory(source_path: Path) -> dict[str, int]:\n counter = {\n 'moved': 0,\n 'deleted': 0,\n 'failed_move': 0,\n 'failed_delete': 0\n }\n # loop through everything in the source directory\n for path in source_path.iterdir():\n if path.is_dir():\n new_counts = unnest_directory(path) # recurse!\n counter = update_counters(counter, new_counts)\n for file in path.iterdir():\n is_successful = move_file_up_one_level(file)\n if is_successful:\n counter['moved'] += 1\n else:\n counter['failed_move'] += 1\n # delete empty directories\n if is_directory_empty(path):\n path.rmdir()\n counter['deleted'] += 1\n else:\n counter['failed_delete'] += 1\n return counter", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def move_fast5_files(args):\n # Create pandas dataframe with x columns.\n fast5_df = pd.DataFrame(columns=['fast5_file', 'subfolder', 'mv_command'])\n\n fast5_df['fast5_file'] = [fast5_file for fast5_file in os.listdir(READS_DIR) if fast5_file.endswith(\".fast5\")]\n fast5_df['subfolder'] = [standardise_int_length(int(i / 4000)) for i in xrange(len(fast5_df))]\n fast5_df['mv_command'] = [\"mv %s %s/\" % (fast5_file, subfolder)\n for fast5_file, subfolder in izip(fast5_df.fast5_file, fast5_df.subfolder)]\n\n subdirectories = fast5_df.subfolder.unique().tolist()\n print(subdirectories)\n for subdirectory in subdirectories:\n # Create directory\n if os.path.isdir(subdirectory):\n # If directory already exists, make sure nothing is inside\n if len(os.listdir(subdirectory)) > 0:\n sys.exit(\"Directory '%s' exists with files inside\" % subdirectory)\n else:\n os.mkdir(subdirectory)\n\n processes = (subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n for cmd in fast5_df.mv_command.tolist())\n\n # We use the islice command to split our list of mv commands into five smaller lists.\n running_processes = list(itertools.islice(processes, args.num_threads))\n while running_processes:\n for i, process in enumerate(running_processes):\n if process.poll() is not None: # Means that the process is complete!\n stdout, stderr = process.communicate() # Get the output of the completed process\n if not stderr == \"\": # Print stderr if it exists.\n print stderr\n running_processes[i] = next(processes, None)\n # Run the next number in the list.\n if running_processes[i] is None: # No more commands waiting to be processed.\n del running_processes[i] # Not a valid process.\n break\n\n return subdirectories", "def MovieScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in movtypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(moviePath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Movies'", "def mv_file(file_name: str, path: str) -> None:\n global number_of_files\n if file_name.startswith(\".\"):\n pass\n else:\n for extensions in file_formats_list:\n if file_.endswith(extensions):\n shutil.move(desktop + \"/\" + file_, path)\n print(f\"moving {colored(file_name, 'yellow')} to {path}\")\n number_of_files += 1\n else:\n pass", "def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(os.path.dirname(move))\r\n except OSError as e:\r\n logging.error('There was an error moving the file {}'.format(move))\r\n logging.error('Error status: {}'.format(e))\r\n logging.info(\"Completed undoing moves\")\r\n try:\r\n os.remove(self.backup)\r\n except OSError as e:\r\n logging.error('There was an error removing the file {}'.format(self.backup))\r\n logging.error('Error status: {}'.format(e))", "def move_calc_files(ase_obj, new_label):\n old_label = ase_obj.calc.label\n home, scratch = get_active_dirs()\n\n home_exts, scratch_exts = ['.com'], ['.log', '.chk', '.fchk']\n\n ssh = remote.connect_server(ssh=True)\n\n commands = []\n for ext in home_exts:\n command = ''.join(['mv', home, old_label, ext, ',', home, new_label, ext])\n commands.append(command)\n for ext in scratch_exts:\n command = ''.join(['mv', scratch, old_label, ext, ',', scratch, new_label, ext])\n commands.append(command)\n\n for command in commands:\n i,o,e = ssh.exec_command(command)\n\n ssh.close()", "def move(self, direction):\n head = self.snake[0]\n delta = self.dirs[direction]\n nextMove = [head[0] + delta[0], head[1] + delta[1]]\n if not self.isValidMove(nextMove):\n return -1\n\n if self.food and nextMove == self.food[0]:\n self.food.popleft()\n else:\n self.snake.pop()\n\n self.snake.appendleft(nextMove)\n\n return len(self.snake) - 1", "def supportRecursiveMove(self, destPath):\r\n return True", "def supportRecursiveMove(self, destPath):\r\n return True", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def applyDir(self,srcDir,destDir,exts): \n for srcFile in os.listdir(srcDir):\n srcExt = os.path.splitext(srcFile)[-1].lower()\n srcPath = os.path.join(srcDir,srcFile)\n destPath = os.path.join(destDir,srcFile)\n if srcExt in exts:\n if not os.path.exists(destDir):\n os.makedirs(destDir)\n shutil.copyfile(srcPath,destPath)\n if self.progress: \n self.cumSize += os.path.getsize(srcPath)\n self.progress(self.cumSize,_('Copying Files...'))\n elif os.path.isdir(srcPath):\n self.applyDir(srcPath,destPath,exts)", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def move(self, n):\n return self.file.seek(n, 0)", "def file_move(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path)}\n\n url, params, headers = self.request(\"/fileops/move\", params)\n\n return self.rest_client.POST(url, params, headers)", "def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)", "def move(self):\n self.pos += self.direc\n self.nearest_node = self.pixel_to_node()", "def walk_dir(self, path = '/srv/www/mod_intf/interface_mod_sec/rules_dir/tmp'):\n for root, dirs, files in os.walk('/srv/www/mod_intf/interface_mod_sec/rules_dir/tmp'):\n for each_file in files:\n print root+\"/\"+ each_file\n self.move_files_to_db(path_file =root+\"/\"+each_file, file_name=each_file)\n return True", "def moveFile(sourceFullPath,targetDir):\n\n thisFunc = inspect.currentframe().f_code.co_name\n try:\n shutil.move(sourceFullPath,targetDir)\n return True\n except Exception as e:\n print(f\"{thisFunc} issue: {e}\")\n return False", "def move(self, destination, **kwargs):\n assert _os.path.exists(self.__str__()) == True\n _shutil.move(self.__str__(), destination, **kwargs)", "def MusicScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in mustypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(musicPath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Music'", "def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)", "def move_file(source, destination):\n shutil.move(source, destination)", "def move_files_checked(fname_fout, extensions, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Check if all requested files are present\n for ext in extensions:\n cur_file = fname + ext\n if not os.path.isfile(cur_file):\n return False\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n\n # Move files\n for ext in extensions:\n cur_file = fname + ext\n os.renames(cur_file, os.path.join(dest_dir, cur_file))\n return True", "def Move(args):\n\n parser = argparse.ArgumentParser(usage='mv [Options] sources... dest',\n description=Move.__doc__)\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n default=False,\n help='verbose output.')\n parser.add_argument(\n '-f', '--force', dest='force', action='store_true',\n default=False,\n help='force, do not error it files already exist.')\n parser.add_argument('srcs', nargs='+')\n parser.add_argument('dest')\n\n options = parser.parse_args(args)\n\n if options.verbose:\n print('mv %s %s' % (' '.join(options.srcs), options.dest))\n\n for src in options.srcs:\n MovePath(options, src, options.dest)\n return 0", "def alternativeSeperation(path=\"data\"):\n path = os.path.join(path, \"val\")\n val_df = pd.read_csv(os.path.join(path, \"val_annotations.txt\"), delimiter=\"\\t\",\n header=None, index_col=0)\n val_labels = val_df.to_dict()[1]\n\n for image, label in val_labels.items():\n label_path = os.path.join(path, label)\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n shutil.move(os.path.join(os.path.join(path, \"images\"), image), label_path)", "def move(self, *args, **kw):\n return self.execute_action('move', *args, **kw)", "def process_dir(self, src_dir, dst_dir):\n self.logger.tree(src_dir)\n for srcpath in self.list_all_files(src_dir):\n dstpath = srcpath.replace(src_dir, dst_dir)\n # TODO: Can we clean up the way we handle relative_path?\n # Relative path is here so that when we print files in the log it\n # shows only the file's path. Should we just pass it to the logger\n # when we create it? Or let the logger figure it out?\n # relative_path = srcpath.replace(src_dir + '/', '')\n self.cur_file = File(srcpath, dstpath, self.logger)\n self.process_file(self.cur_file)", "def to_dir_changed(self):\n text = self.to_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set destination_path = (?) where id is 1', text)", "def sizeDir(self,srcDir,destDir,exts): \n for srcFile in os.listdir(srcDir):\n srcExt = os.path.splitext(srcFile)[-1].lower()\n srcPath = os.path.join(srcDir,srcFile)\n destPath = os.path.join(destDir,srcFile)\n if srcExt in exts:\n self.totSize += os.path.getsize(srcPath)\n elif os.path.isdir(srcPath):\n self.sizeDir(srcPath,destPath,exts)" ]
[ "0.6540349", "0.6040318", "0.59826165", "0.5923412", "0.5619923", "0.55904424", "0.5581633", "0.54979753", "0.54583585", "0.5416308", "0.53824425", "0.53258795", "0.5291548", "0.52602667", "0.52368546", "0.5232204", "0.52239007", "0.52128977", "0.52018017", "0.5200375", "0.51892644", "0.51365745", "0.5114978", "0.51113755", "0.5096242", "0.50931275", "0.5090273", "0.5087816", "0.50804347", "0.5079122", "0.5079122", "0.5072665", "0.50592625", "0.505564", "0.5050308", "0.5038649", "0.5038503", "0.50007355", "0.49947736", "0.49812803", "0.49675322", "0.49667317", "0.49562716", "0.4942932", "0.4938355", "0.4937236", "0.49364868", "0.49298418", "0.49271426", "0.4925713", "0.49218768", "0.491097", "0.4910286", "0.49093306", "0.48965943", "0.48912084", "0.48897886", "0.48853633", "0.4884934", "0.48749712", "0.4852298", "0.48510677", "0.48492625", "0.484683", "0.48450503", "0.4844983", "0.48444414", "0.4835462", "0.48269024", "0.480989", "0.47995365", "0.47857064", "0.4783666", "0.4770557", "0.4759219", "0.47539616", "0.47436816", "0.47419885", "0.47366598", "0.47366598", "0.47347826", "0.47306597", "0.47264874", "0.470792", "0.47006294", "0.4691943", "0.46840227", "0.46832466", "0.46748745", "0.46731403", "0.46673024", "0.4655456", "0.46457922", "0.4644612", "0.4641195", "0.46327573", "0.46294874", "0.46109796", "0.46086487", "0.46067476" ]
0.6531521
1
Undo all former file movements.
def move_back(self) -> None: if self.label == 'ignore': return for crop in self._content: crop.move_back()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(os.path.dirname(move))\r\n except OSError as e:\r\n logging.error('There was an error moving the file {}'.format(move))\r\n logging.error('Error status: {}'.format(e))\r\n logging.info(\"Completed undoing moves\")\r\n try:\r\n os.remove(self.backup)\r\n except OSError as e:\r\n logging.error('There was an error removing the file {}'.format(self.backup))\r\n logging.error('Error status: {}'.format(e))", "def undo():", "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "def undo(self):\n for command in reversed(self.commands):\n command.undo()", "def undo(self):\n self._check_undo_prerequisites()\n self._decrement_history_pointer()\n self._replay_history()", "def undo(self) :\n \n raise NotImplementedError()", "def __undo(self):\n self.__undo_controller.undo()", "def undo(self):\n self.cnvImgTest.undoLast()", "def undo():\n\n try:\n my_file.undo()\n except FileNotFoundError:\n print('No file has been read yet')\n except Exception:\n print('You must make an edit to undo')", "def _undo_action(self):\n pass", "def undo(self):\n if self._history_position > 0:\n self._history_position -= 1\n self._commands[\n self._history[self._history_position][1]\n ].execute(self._history[self._history_position][2])\n else:\n print(\"nothing to undo\")", "def undo(self):\n if self.__undo is None: # if we can not undo anymore we raise an error\n raise ControllerException(\"Error!!! Can't undo anymore!!!\\n\")\n else: # otherwise we simply do the swap from the undo list once more\n self.__scramble.swap(self.__undo[0], self.__undo[1], self.__undo[2], self.__undo[3])\n # self.__scramble.inc()\n self.__undo = None # undo becomes None because we don't want the user to do multiple undo operations", "def undo(self):\n\n if not self.can_undo():\n print(\"error: trying to undo\")\n return\n\n func = self.undo_gen(self.undo_act())\n func()\n self.position -= 1", "def onUndo(self, event):\r\n\t\tself.ActionHistory.Undo()", "def undo_settings(self):\r\n cF.undo_settings()", "def undoChanges(self):\n Objects.undoChanges(self)\n self.draw()", "def undo(self):\r\n\r\n if self.done.size() > 0:\r\n command = self.done.pop()\r\n if command[0] == 'add':\r\n uncommand = (('del'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.delete(uncommand[1],\r\n False)\r\n if command[0] == 'del':\r\n uncommand = (('add'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.addnew(uncommand[2],\r\n uncommand[3],\r\n False)\r\n if command[0] == 'move':\r\n uncommand = (('move'),\r\n command[2],\r\n command[1])\r\n self.move(uncommand[1],\r\n uncommand[2],\r\n False)\r\n self.undone.add(uncommand)", "def abort(self):\n for command in reversed(self.commands):\n command.undo()", "def onUndo(self):\n pass", "def undo(self):\n self.setIndex(self._index-1)", "def move_back(self) -> None:\n if self._file_was_moved:\n os.rename(self._new_path, self._file_path)\n pass", "def restore_last_undo_point(self):\n self.unload()", "def reset(self):\n self.source.seek(0)\n self.target.seek(0)", "def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0", "def undo_move(self):\n # general idea:\n # store the state of the board in a stack before every successful attempted move \n # when this is called, set the current board equal to the top state in the stack\n # print(\"Undo\")\n # print(self)\n # if len(self.board_states) != 0:\n if self.moves != 0:\n self.moves -= 1\n self.stock = []\n self.wp = []\n self.foundations = []\n self.tableaus = []\n self.stock, self.wp, self.foundations, self.tableaus = self.board_states.pop()\n self.init_move_dict()", "def undo(self):\n if self._snapshot_index >= 0:\n snapshot = self._snapshots[self._snapshot_index]\n for chunk_location in snapshot:\n dimension, cx, cz = chunk_location\n chunk = self._unserialise_chunk(dimension, cx, cz, -1)\n self._chunk_cache[chunk_location] = chunk\n self._snapshot_index -= 1", "def __editUndo(self):\n self.activeWindow().undo()", "def reset(self):\n self.undo_stack = Stack(self.undo_stack_size)\n self.redo_stack[:] = []\n self.not_undoable_action = False\n self.undo_in_progress = False", "def erase_files(self):\n self.ofile_handle()\n self.efile_handle()\n\n os.remove(self.ofile_name())\n os.remove(self.efile_name())\n return None", "def undo_move(self, n=1):\n self.state = self.move_history[-n - 1]\n self.positions = self.copy_board(self.state[1])\n # delete all moves between the current state and the restored state\n del self.move_history[-n:]", "def flushUndo(*args, **kwargs)->None:\n pass", "def undo(*args, **kwargs)->None:\n pass", "def revert_pristine(self):\n self.revert_all()\n self.svn_update()\n\n status = self.svn_status()\n if not status.unversionned:\n return\n # delete unversionned files\n for entry in status.unversionned:\n path = entry.path\n if os.path.isdir(path) and not os.path.islink(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n if self.svn_update() == 0:\n raise Error('Failed to reset workspace !')", "def undo_last_move(self):\n if self.last_move is None:\n return\n x, y, i, j = self.last_move\n self.boards[x][y].undo_last_move()\n if len(self.history) > 1:\n self.last_move = self.history[-2]\n else:\n self.last_move = None\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n del self.history[-1]", "def undo_transaction(self):\n transaction = self.context\n entries = transaction.entries()\n\n # check if we can undo\n if not transaction.canUndoOrReverse():\n raise AccessControl_Unauthorized('No permission to create transactionentries, or there are no entries to reverse')\n \n # force a remove from the balances and update the references\n for transactionEntry in entries:\n transactionEntry.removeTransactionEntryFromAccount()\n\n # remove transaction\n transaction.getTransactionFolder().manage_delObjects(ids=transaction.getId())", "def UndoChanges(self):\n if (len(self.alignmentHistory) > 1):\n self.alignmentHistory.pop()\n self.alignment = self.alignmentHistory[-1][:,:]\n self.Show(self.displayedColumn)\n else:\n self.AlertMessage('Nothing to undo.', 'low')", "def pre_revert(self):", "def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"", "def rollback(self):\n self.stream.seek(0)", "def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0", "def revert(self, *args, **kwargs):", "def __editRevert(self):\n self.activeWindow().revertToUnmodified()", "def undo(self):\n if not self.undo_stack:\n return\n self.begin_not_undoable_action()\n self.undo_in_progress = True\n undo_action = self.undo_stack.pop()\n self.redo_stack.append(undo_action)\n if isinstance(undo_action, self.insertclass):\n self._undo_insert(undo_action)\n elif isinstance(undo_action, self.deleteclass):\n self._undo_delete(undo_action)\n else:\n self._handle_undo(undo_action)\n self.end_not_undoable_action()\n self.undo_in_progress = False", "def undo(self, num=1):\n for i in range(num):\n super().undo()", "def undo(self):\n if (0 == len(self._undoStack)):\n raise ValueError(\"Nothing to undo\")\n else:\n self._redoStack.append(self.gameState())\n\n lastGameState = self._undoStack.pop()\n self.counter = lastGameState[\"counter\"]\n self.wonRounds = lastGameState[\"wonRounds\"]\n self.wonGames = lastGameState[\"wonGames\"]\n self.currentMaxPoints = lastGameState[\"currentMaxPoints\"]\n self.sidesChanged = lastGameState[\"sidesChanged\"]\n self.playerPositions = lastGameState[\"playerPositions\"]\n self.servePosition = lastGameState[\"servePosition\"]", "def restore(self, clean=False):\n\n for origfilename in self.filenames[:]:\n if not origfilename.endswith(\".\"+self.BACKUP_EXTENSION):\n continue\n filename = origfilename.strip(\".\"+self.BACKUP_EXTENSION)\n shutil.copy(origfilename, filename)\n self.filenames.append(filename)\n if clean:\n os.remove(origfilename)", "def undo(self):\r\n previous = self.memory.pop()\r\n if not isinstance(previous, task2.ListADT):\r\n raise TypeError(\"Did not expect any other object in memory\")\r\n if previous[0] == \"d\":\r\n index = previous[1]\r\n for i in range(len(previous)-1, 1, -1):\r\n self.text_lines.insert(index, previous[i])\r\n elif previous[0] == \"i\":\r\n start = previous[1]\r\n for j in range(previous[2]):\r\n self.text_lines.delete(start)\r\n else:\r\n raise ValueError(\"Did not expect any other action other than delete or insert\")", "def clean_files(self):\n self.filenames.clear()", "def rollback(self) -> None:\n for k in self._moved_cols:\n self._cols[k].move_back()", "def __redo(self):\n self.__undo_controller.redo()", "def reset(self):\n self._cmd_line = 0\n self._file_line = 0", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def wipe(self):", "def wipe(self):", "def rewind(self):\n self.run_command('rewind')", "def rewind():", "def rewind(self):\n self.seek(0)", "def _restore_file(file):\n\n os.remove(file)\n os.rename(file + '.bak', file)", "def undo(self, outer_instance):\n pass", "def clean_up():\n for action in reversed(undo_actions):\n try:\n action()\n except Exception, exc:\n sys.stderr.write(\"BAD CLEANUP: Call to %s failed\\n\"\n % action.func_name)\n sys.stderr.write(\" %s\\n\" % exc)", "def reset(self):\n self.prev_obj1_position = None\n self.prev_obj2_position = None", "def rewind(self):\n self.seek(0)", "def reset(self):\n self.fscore_history = []", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def rollback(self):\n pass", "def undo():\n\n # pressing undo twice restores the original value \n global current_value, operations\n \n # solution: since there are only 2 values stored, swap\n operations[0], operations[1] = operations[1], operations[0]\n current_value = operations[-1]", "def _undo(self, action, data):\n if self.undobuffer is None:\n return\n if action == \"rot\":\n angle, degPAU = data\n self._rotate(-angle*degPAU/self._degreesPerAU)\n dummy = self.undobuffer.pop()\n elif action == \"stamp\":\n stitem = data[0]\n self.clearstamp(stitem)\n elif action == \"go\":\n self._undogoto(data)\n elif action in [\"wri\", \"dot\"]:\n item = data[0]\n self.screen._delete(item)\n self.items.remove(item)\n elif action == \"dofill\":\n item = data[0]\n self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)),\n fill=\"\", outline=\"\")\n elif action == \"beginfill\":\n item = data[0]\n self._fillitem = self._fillpath = None\n if item in self.items:\n self.screen._delete(item)\n self.items.remove(item)\n elif action == \"pen\":\n TPen.pen(self, data[0])\n self.undobuffer.pop()", "def undo(self):\n LOG.debug(\"In the undo method, will attempt to restore\")\n\n # validate detected nothing to do for this, nothing was done\n # for execute, so simply return\n if self.no_op:\n return\n\n if not self.source_dev or not self.target_dev:\n return\n LOG.debug(\"The source dictionary is: %s\", self.source_dict_restore)\n LOG.debug(\"The target dictionary is: %s\", self.target_dict_restore)\n\n # In scenario where no source IP Address...\n if self.source_dict_restore:\n self.commandex.send_ifcfg(self.source_dev,\n self.source_dict_restore)\n\n # May have failed because the ifcfg didn't even exist, nothing\n # to roll back then\n if self.target_dict_restore:\n self.commandex.send_ifcfg(self.target_dev,\n self.target_dict_restore)", "def c_undo(self):\r\n try:\r\n self.canvas.delete(self.canvas.find_all()[-1])\r\n self.update()\r\n return True\r\n except: return False", "def rollback(self):\n self._rollback = True", "def reset(self):\n # from pathlib import Path\n # import pickle as pkl\n # path_traj = Path.home() / 'TmrlData' / 'reward' / 'traj.pkl'\n # with open(path_traj, 'wb') as file_traj:\n # pkl.dump(self.traj, file_traj)\n\n self.cur_idx = 0\n self.step_counter = 0\n self.failure_counter = 0\n\n # self.traj = []", "def undo(self, event=None):\n if not self.segs == []:\n self.requestSegByDct((self.segs[-1].getDct() + 2) % 4)", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()\n self.reset_snippet()", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()", "def post_revert(self):", "def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())", "def revertToNormal(self, revertEffectFiles = True):\n for j in enumerate(self.inputFilesAll):\n # Load the backups of msb/luabnd files\n print(\"[Unrandomize] Reverting msb and luabnd files \" + str(j[0]) + \"/\" + str(len(self.inputFiles)))\n self.restoreBackup(self.MAPSTUDIO + j[1] + '.msb')\n self.restoreBackup('event/{0}.emevd{1}'.format(j[1], '.dcx' if self.useDCX else ''))\n \n if not (j[1] == \"m12_00_00_01\"):\n if (self.useDCX):\n self.restoreBackup(self.AISCRIPTS + j[1] + '.luabnd.dcx')\n else:\n self.restoreBackup(self.AISCRIPTS + j[1] + '.luabnd')\n\n if (revertEffectFiles):\n for iFile in self.inputFFXFiles:\n if (iFile != \"NONE\"):\n if (self.useDCX):\n self.restoreBackup(self.FFX_DIR_REMASTERED.format(iFile))\n else:\n self.restoreBackup(self.FFX_DIR.format(iFile))\n\n check_exe.restore_exe()\n\n self.revertParam()", "def rollback(self):\n if len(self.__stack) == 0:\n raise EmptyStackException()\n self.__current_pos = self.__stack[-1][0]\n self.line = self.__stack[-1][1]\n self.linePos = self.__stack[-1][2]\n self.__stack = self.__stack[:-1]", "def reset_original(self):\n self._original = [] # Empty out self._originals", "def clear_redo(self):\r\n self.command_manager.clear_redo()", "def revertInterims(self):\n for interim in self.getInterim():\n interim.revertInterim()", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)", "def reset(self):\n if self._key:\n self._lib.StObjectReset(self._key)\n os.chdir(self._cwd)\n self._layers.clear() # layer: index\n self._substrate = None\n self._experiments.clear() # analyzed experiments\n self._tmpstandards.clear()", "def redo():", "def redo(self):\n for command in self.commands:\n command.redo()", "def reset_old_files():\n commands = [\n 'rm -f {0}/tools/perf/page_sets/url*'.format(CHROMIUM_SRC),\n 'rm -f {0}/tools/perf/page_sets/data/url*'.format(CHROMIUM_SRC),\n 'rm -f ' \\\n '{0}/tools/perf/benchmarks/telemetryBenchmarks.py'.format(CHROMIUM_SRC),\n 'rm -f data/wpr_source/*',\n 'rm -f temp/*',\n 'rm -f data/results.db',\n 'rm -f {0}/data/har/*'.format(PLT_SRC),\n 'rm -f {0}/data/replay/*'.format(PLT_SRC),\n 'rm -f {0}/webpagereplay_logs/*'.format(CHROMIUM_SRC),\n 'rm -f {0}/telemetry/count.db'.format(PLT_SRC),\n ]\n\n for cmd in commands:\n p = Popen(cmd, shell=True)\n p.wait()", "def _revert(self):\n self.release_from_output(\"data\")\n # delete ONA submissions on ONA", "def test_move_to_trash(self):\n os.chdir(\"testimages/\")\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n filename = os.path.abspath(\"image_to_edit.jpg\")\n files = [filename]\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file = os.path.join(self.trashdir, \"image_to_edit.jpg\")\n self.assertTrue(os.path.isfile(trashed_file))\n # Repeat, to check if backing up works\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file1 = os.path.join(self.trashdir, \"image_to_edit.jpg.1\")\n self.assertTrue(os.path.isfile(trashed_file1))\n shutil.copyfile(\"arch_001.jpg\", \"image_to_edit.jpg\")\n fileactions.move_to_trash(files, self.trashdir)\n trashed_file2 = os.path.join(self.trashdir, \"image_to_edit.jpg.2\")\n self.assertTrue(os.path.isfile(trashed_file2))\n # Clear the files\n os.remove(trashed_file)\n os.remove(trashed_file1)", "def reset(self):\n self.continued = False\n self.warned = False\n self.whatifs = None\n self.tablefmt = None\n self.saved = False", "def undoPossibleBarMoves(self):\r\n for num in self.diceNumbers:\r\n if self.currentPlayer == 0:\r\n potentialPoint = num - 1\r\n else:\r\n potentialPoint = num * (-1)\r\n self.points[potentialPoint].setValidMove(False)\r\n self.points[potentialPoint].setBorder(BLACK, 1)", "def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def reset(self):\n self.previous = None\n self.state = None\n self.args = None\n self.context = None", "def unmakeMove(self, move):", "def redo(self):\n pass", "def rollback(self):\n raise NotImplementedError", "def _move_current_to_previous(self, metadata_role):\n\n # Get the 'current' and 'previous' full file paths for 'metadata_role'\n metadata_filepath = metadata_role + '.txt'\n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filepath)\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filepath)\n\n # Remove the previous path if it exists.\n if os.path.exists(previous_filepath):\n os.remove(previous_filepath)\n\n # Move the current path to the previous path. \n if os.path.exists(current_filepath):\n tuf.util.ensure_parent_dir(previous_filepath)\n os.rename(current_filepath, previous_filepath)", "def undo_act(self):\n\n return self.history[self.position]", "def rewind(f):\n\tf.seek(0)" ]
[ "0.7702279", "0.73885363", "0.7248035", "0.7157842", "0.7157292", "0.70697516", "0.69746536", "0.680541", "0.6793342", "0.67908883", "0.67872065", "0.67608637", "0.66969573", "0.66815704", "0.6664292", "0.6657332", "0.6656268", "0.6570457", "0.6570197", "0.6550941", "0.6458817", "0.64506215", "0.64284825", "0.6408765", "0.63862294", "0.6375784", "0.6324521", "0.6300555", "0.62783074", "0.62578094", "0.6245758", "0.6222257", "0.61745065", "0.6133923", "0.61141664", "0.6104881", "0.6099987", "0.6090112", "0.60838544", "0.6006714", "0.6005956", "0.5992376", "0.59466493", "0.59291536", "0.5918795", "0.589249", "0.5885011", "0.588392", "0.5860584", "0.58523756", "0.5835262", "0.58077013", "0.5796549", "0.5796549", "0.57881415", "0.57754415", "0.57638365", "0.5762815", "0.5735943", "0.5734499", "0.5728563", "0.57131654", "0.57027435", "0.5691954", "0.56888115", "0.56738347", "0.56692195", "0.5664133", "0.5653", "0.5631335", "0.56077105", "0.55943716", "0.5579955", "0.55747265", "0.55532306", "0.55512565", "0.5550339", "0.5542052", "0.5540544", "0.553304", "0.55266535", "0.552067", "0.5504752", "0.55024135", "0.54823667", "0.5469884", "0.54692864", "0.54622054", "0.5457169", "0.544696", "0.54451406", "0.5432429", "0.54193896", "0.54151815", "0.54101235", "0.54004484", "0.53958297", "0.5372089", "0.53720516", "0.53655314" ]
0.5601747
71
Defines and returns a parser for given command line arguments.
def get_args_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description='Partition the Ecotron-EInsect-2018 dataset into training, validation, and testing sets.', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=__doc__ ) data_group = parser.add_argument_group('Data input') data_group.add_argument( '--images', type=pathlib.Path, required=True, metavar='DIR', help='directory with images' ) data_group.add_argument( '--roots', type=pathlib.Path, required=True, metavar='DIR', help='directory with root masks for given images' ) data_group.add_argument( '--centerlines', type=pathlib.Path, required=True, metavar='DIR', help='directory with center line masks for given images' ) data_group.add_argument( '--radii', type=pathlib.Path, required=True, metavar='DIR', help='directory with radii maps for given images' ) data_group.add_argument( '--sin', type=pathlib.Path, required=True, metavar='DIR', help='directory with sine maps for given images' ) data_group.add_argument( '--cos', type=pathlib.Path, required=True, metavar='DIR', help='directory with cosine maps for given images' ) data_group.add_argument( '--crop-width', type=int, required=True, metavar='INT', help='crop width' ) split_group = parser.add_argument_group('Split control') split_group.add_argument( '--val-split', type=int, required=True, metavar='INT', help='percentage of data going into validation set' ) split_group.add_argument( '--test-split', type=int, required=True, metavar='INT', help='percentage of data going into test set' ) parser.add_argument( '-y', '--yes', action='store_true', help='Assume answer "yes" for all questions' ) parser.add_argument( '--dry-run', action='store_true', help='Only simulate the process, don\'t actually touch anything' ) parser.add_argument( '--vis', action='store_true', help='Create visualization of selection in ./vis (requires matplotlib)' ) parser.add_argument( '-r', '--random-seed', metavar='INT', type=int, help='Seed for the random number generator' ) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('webpage', help='webpage to search')\n\n return parser", "def get_parser():\n p = argparse.ArgumentParser(description='such a good program')\n p.add_argument('infile')\n p.add_argument('outfile')\n return p", "def create_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n add_help=False)\n parser.add_argument(\n '--help', '-h',\n action='store_true',\n dest='help',\n help=\"\"\"show this help message and exit\"\"\")\n parser.add_argument(\n '--verbose', '-v',\n action='count',\n default=0,\n help=\"\"\"Enable verbose output from '%(prog)s'. A second and third\n '-v' increases verbosity.\"\"\")\n parser.add_argument(\n '--sequential',\n action='store_true',\n help=\"\"\"Execute analyzer sequentialy.\"\"\")\n parser.add_argument(\n '--cdb',\n metavar='<file>',\n default=\"compile_commands.json\",\n help=\"\"\"The JSON compilation database.\"\"\")\n return parser", "def get_parser(self):\n parser = ArgumentParser()\n parser.add_argument(\n \"-c\", default='', dest='cmd',\n help=(\"just like python -c or sh -c (pass in a command)\"))\n parser.add_argument(\n \"-e\", \"--exec\", default='', dest='execfile',\n help='a filename to execute')\n parser.add_argument(\n \"-v\", '--version', default=False, dest='version',\n action='store_true',\n help=(\"show version information\"))\n parser.add_argument(\"--shell\", dest=\"shell\",\n default=False, help=\"application shell\",\n action='store_true')\n parser.add_argument(\"--config\", dest='config',\n default=\"\",\n help=\"use config file\")\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser", "def create_parser():\n p = NewParser()\n\n p.add_argument('reference', type=str,\n help = \"Fasta reference file that reads were mapped to.\")\n\n p.add_argument('gff', type=str,\n help = \"GFF file containing reference genome annotations.\")\n\n p.add_argument('vcf', type=str,\n help = \"VCF file to parse.\")\n\n args = p.parse_args(sys.argv[1:])\n return args", "def get_parser():\n parser = argparse.ArgumentParser()\n # parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('files', nargs='+')\n return parser", "def get_parser():\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n \"-s\", \"--sentence\", dest=\"sentence\", help=\"sentence, splitted by ';'\"\n )\n return parser", "def get_parser(self):\n parser = argparse.ArgumentParser(description='Short sample app')\n\n parser.add_argument('-a', action=\"store_true\", default=False)\n parser.add_argument('-b', action=\"store\", dest=\"b\")\n parser.add_argument('-c', action=\"store\", dest=\"c\", type=int)\n return parser", "def get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--method', type=str)\n parser.add_argument('--size_part', type=float, default=None)\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--count', type=int, default=None)\n return parser", "def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main", "def get_parser():\r\n parser = argparse.ArgumentParser(description=( # pylint: disable=redefined-outer-name\r\n \"Automatically finds translation errors in all edx-platform *.po files, \"\r\n \"for all languages, unless one or more language(s) is specified to check.\"\r\n ))\r\n\r\n parser.add_argument(\r\n '-l', '--language',\r\n type=str,\r\n nargs='*',\r\n help=\"Specify one or more specific language code(s) to check (eg 'ko_KR').\"\r\n )\r\n\r\n parser.add_argument(\r\n '-e', '--empty',\r\n action='store_true',\r\n help=\"Includes empty translation strings in .prob files.\"\r\n )\r\n\r\n parser.add_argument(\r\n '-v', '--verbose',\r\n action='count', default=0,\r\n help=\"Turns on info-level logging.\"\r\n )\r\n\r\n return parser", "def get_parser():\n # Parent and only parser.\n parser = argparse.ArgumentParser(\n add_help=True,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('mode', action='store',\n choices=range(len(MODES)),\n type=int,\n help='Select mode of file download.\\n'\n ' e.g: 0(rated) or 1(list).')\n parser.add_argument('torr_page', action='store',\n choices=range(len(TORRENTS)),\n type=int,\n help='Select tracking page to download from.\\n'\n ' e.g: 0 to .. ' + str(len(TORRENTS)-1) + '.')\n parser.add_argument('str_search', action='store',\n type=str,\n help='Input torrent string to search.\\n'\n ' e.g: \"String search\"')\n return(parser)", "def create_arg_parser():\n server_modes = ['builtin', 'waitress']\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('h', metavar='HOST', help='Server HOST (e.g. \"localhost\")', type=str)\n parser.add_argument('p', metavar='PORT', help='Server PORT (e.g. \"5001\")', type=int)\n parser.add_argument('m', metavar='SERVER_MODE', help=\", \".join(server_modes), choices=server_modes, type=str)\n parser.add_argument('--debug', help=\"Run builtin server in debug mode\", action='store_true', default=False)\n\n return parser", "def get_parser():\n\tparser = argparse.ArgumentParser('preprocessing.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\nRun a piepline for one NICER ObsID data. \n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('obsid', type=str, \n\t\thelp='ObsID (e.g., 4012010109)')\t\n\treturn parser", "def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser", "def get_parser():\n parser = ArgumentParser(description=\"Script used to generate Freeplane \"\n + \"mindmap files\")\n\n # This is use when people in Linaro aren't using their email address.\n parser.add_argument('--disable-altname', required=False,\n action=\"store_true\", default=False,\n help=\"Use alternative names (from cfg.yaml) to the tree\")\n\n parser.add_argument('--assignee', required=False,\n action=\"store_true\", default=False,\n help=\"Add assignees (from cfg.yaml) to the tree\")\n\n parser.add_argument('-a', '--author', required=False,\n action=\"store_true\", default=False,\n help=\"If set, git statistic only count the commit \"\n + \"from the author\")\n\n parser.add_argument('-p', '--path', required=False, action=\"store\",\n default=\"/home/jyx/devel/optee_projects/reference/linux\",\n help='Full path to the kernel tree')\n\n parser.add_argument('-s', '--since', required=False, action=\"store\",\n default=None,\n help='Used with the git log --since command')\n\n parser.add_argument('-o', '--output', required=False, action=\"store\",\n default=\"linux-kernel.mm\",\n help='Output filename')\n\n parser.add_argument('-v', required=False, action=\"store_true\",\n default=False,\n help='Output some verbose debugging info')\n\n return parser", "def get_parser():\n\n parser = parser.ArgumentParser()\n return parser", "def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p", "def get_parser():\n parser = argparse.ArgumentParser(description='Parser des liens sur les sites Jahia et Wordpress.')\n parser.add_argument('ficher_des_sites', help='le fichier contenant les sites a parser.')\n parser.add_argument('-v', '--version', help='affiche la version du parser',\n action='version', version='%(prog)s ' + __version__)\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(description='Parser des liens sur les sites Jahia et Wordpress.')\n parser.add_argument('ficher_des_sites', help='le fichier contenant les sites a parser.')\n parser.add_argument('-v', '--version', help='affiche la version du parser',\n action='version', version='%(prog)s ' + __version__)\n return parser", "def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser", "def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser", "def get_parser():\n\tparser = argparse.ArgumentParser(description=\"Twitter Searcher\")\n\tparser.add_argument(\"-q\",\n\t\t\t\t\t\t\"--query\",\n\t\t\t\t\t\tdest=\"query\",\n\t\t\t\t\t\thelp=\"Query/Filter\",\n\t\t\t\t\t\tdefault='*')\n\tparser.add_argument(\"-d\",\n\t\t\t\t\t\"--data-dir\",\n\t\t\t\t\tdest=\"city\",\n\t\t\t\t\thelp=\"Output/Data Directory\")\n\treturn parser", "def get_parser():\n module_parser = ArgumentParser(\n formatter_class=ArgumentDefaultsHelpFormatter)\n module_parser.add_argument(\"-i\", dest=\"data_path\", type=str,\n help=\"the location dataset\")\n module_parser.add_argument(\"-o\", dest=\"output_path\", type=str,\n help='base dir for outputs')\n module_parser.add_argument(\"-subdir\", dest=\"subdir\", type=str,\n choices=['test', 'train', 'val', 'all'],\n help='subdir: trn, test, val, or all ...')\n module_parser.add_argument(\"-n\", dest=\"n_train\", type=int,\n help='n: number of images for training')\n module_parser.add_argument(\"-Rx\", dest=\"x_res\", type=int,\n help='x resulution for final img')\n module_parser.add_argument(\"-Ry\", dest=\"y_res\", type=int,\n help='y resolution of final image')\n module_parser.add_argument(\"-d\", dest=\"d\",\n type=int,\n default=0,\n help='debug')\n return module_parser", "def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(description=config.DESCRIPTION)\n parser.add_argument('url_file', metavar='URL_FILE', type=str,\n help=config.HELP_URL_FILE)\n parser.add_argument('-d', metavar='DEST_DIR', dest='destination_dir', default=config.DEFAULT_DESTINATION_DIR, type=str,\n help=config.HELP_DESTINATION_DIR)\n parser.add_argument('-l', metavar='LOG_FILE', dest='log_file', default=config.DEFAULT_LOG_FILE, type=str,\n help=config.HELP_LOG_FILE % config.DEFAULT_LOG_FILE)\n\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(\n description=\"\"\"Start a Classy Vision training job.\n\n This can be used for training on your local machine, using CPU or GPU, and\n for distributed training. This script also supports Tensorboard, Visdom and\n checkpointing.\"\"\"\n )\n\n parser = add_generic_args(parser)\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(usage = \"{prog} [options]\")\n parser.add_argument(\"-n\", \"--nights\", type=str, help=\"nights as comma separated string\")\n parser.add_argument(\"--obstypes\", type=str, default=None, help=\"comma separated list of exposure types to include in \"+\\\n \"the exposure table, e.g. science,arc,flat,dark,zero, ...\")\n parser.add_argument(\"-i\", \"--path-to-data\", type=str, default=None, help=\"path to the raw input data\")\n parser.add_argument(\"-o\",\"--exp-table-path\", type=str, default=None, help=\"path to save exposure tables, without monthly subdirectory\")\n parser.add_argument(\"--overwrite-files\", action=\"store_true\", help=\"overwrite existing exposure tables\")\n parser.add_argument(\"--verbose\", action=\"store_true\", help=\"print verbose output\")\n\n return parser", "def _setup_parser():\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument('--eval_model', type=str, default=None)\n parser.add_argument('--stack', type=int, default=1)\n parser.add_argument('--flare', action='store_true')\n parser.add_argument('--mixreg', action='store_true')\n\n env_group = parser.add_argument_group(\"Env Args\")\n env_group.add_argument('--env_name', type=str, default=ENV_NAME)\n env_group.add_argument('--num_envs', type=int, default=NUM_ENVS)\n env_group.add_argument('--num_levels', type=int, default=NUM_LEVELS)\n env_group.add_argument('--start_level', type=int, default=START_LEVEL)\n\n agent_group = parser.add_argument_group(\"Agent Args\")\n PPOAgent.add_to_argparse(agent_group)\n\n model_group = parser.add_argument_group(\"Model Args\")\n ImpalaPPO.add_to_argparse(model_group)\n\n return parser", "def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')", "def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser", "def get_parser():\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-m\", \"--model\",\n dest=\"modelfile\",\n help=\"where is the model file (.tar)?\",\n metavar=\"FILE\",\n type=lambda x: utils.is_valid_file(parser, x),\n required=True)\n parser.add_argument(\"-i\", \"--input\",\n dest=\"inputvec\",\n help=\"\"\"a file which contains an input vector\n [[0.12, 0.312, 1.21 ...]]\"\"\",\n metavar=\"FILE\",\n type=lambda x: utils.is_valid_file(parser, x),\n required=True)\n return parser", "def get_parser():\n parser = ArgumentParser(\n description='phpMyAdmin work reporting tool\\n\\nGenerates list of commits and issues handled in given period.',\n epilog='Credentials can be also stored in ~/.config/phpmyadmin:\\n\\n[github]\\nuser=USER\\ntoken=TOKEN',\n formatter_class=RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n '-u', '--user',\n help='GitHub username, used for both reporting and authentication'\n )\n parser.add_argument(\n '-t', '--token',\n help='GitHub authentication token'\n )\n parser.add_argument(\n '-s', '--start-date',\n type=dateutil.parser.parse,\n default=datetime.now() - timedelta(days=7),\n help='Starting datetime, defaults to 7 days ago'\n )\n parser.add_argument(\n '-e', '--end-date',\n type=dateutil.parser.parse,\n default=datetime.now(),\n help='Ending datetime, defaults to current timestamp'\n )\n parser.add_argument(\n '-f', '--format',\n choices=('markdown', ),\n default='markdown',\n help='Output format',\n )\n parser.add_argument(\n '-w', '--weekly',\n action='store_true',\n help='Weekly report not including private repositories'\n )\n parser.add_argument(\n '-W', '--last-week',\n action='store_true',\n help='Create report for last week'\n )\n parser.add_argument(\n '-M', '--last-month',\n action='store_true',\n help='Create report for last month'\n )\n parser.add_argument(\n '--this-week',\n action='store_true',\n help='Create report for this week'\n )\n return parser", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def build_parser ():\n\n parser = argparse.ArgumentParser (description = __doc__)\n\n parser.add_argument (\n '-v', '--verbose', dest='verbose', action='count',\n help='increase output verbosity', default=0\n )\n parser.add_argument (\n '-l', '--live', dest='get_live_data', action='store_true',\n help='get live data from OSM database',\n )\n parser.add_argument (\n '-e', '--edit', action='store_true',\n help='edit the OSM database',\n )\n parser.add_argument (\n '-u', '--user', dest='my_edits', action='store_true',\n help='only report about my edits',\n )\n parser.add_argument (\n '--min-length', dest=\"min_length\", type=float, default=1000.0,\n help='way must be longer than this to get a ref (in m) (default=1000)',\n )\n parser.add_argument (\n '--batch-size', dest=\"batch_size\", type=int, default=10,\n help='apply OSM edits in changesets of this size (default=10)',\n )\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(description=\"Update golang.org/x/<name> in vendor folder\")\n parser.add_argument('-q', '--quiet', dest='verbose', action='store_false', help='work quietly')\n parser.add_argument('--revision', help='update deps to this revision', default='')\n parser.add_argument('name', help='name of the golang.org/x/ package. Can be empty', default='', nargs='?')\n return parser", "def create_parser():\n now = datetime.datetime.today()\n default_date = \"{}-{}-{}\".format(now.day, now.month, now.year)\n parser = argparse.ArgumentParser(description=\"Git plugin for automatic insertion of @since and @author annotations \"\n \"into *.java source files in a project.\",\n epilog=\"© Avner & Oded\")\n parser.add_argument(\"-v\", \"--version\", help=\"Display the version of this plugin\", action='store_true')\n parser.add_argument(\"--since\", nargs='?', help=\"Add the @since annotations to project\", const=default_date)\n parser.add_argument(\"--author\", nargs='?', help=\"Add the @author annotations to project\", const=getpass.getuser())\n\n return parser", "def get_parser():\n parser = libdot.ArgumentParser(description=__doc__)\n parser.add_argument('paths', nargs='+',\n help='Image files or directories to crush.')\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-p', '--platform', dest='platform', type=str, required=False, default='')\n return parser", "def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser", "def cmdline_parser():\n\n # http://docs.python.org/dev/howto/argparse.html\n parser = argparse.ArgumentParser(description=__doc__)\n \n parser.add_argument(\"--verbose\",\n action=\"store_true\",\n help=\"Be verbose\")\n parser.add_argument(\"--debug\",\n action=\"store_true\",\n help=\"Enable debugging\")\n parser.add_argument(\"-b\", \"--bam\",\n required=True,\n help=\"Input BAM file matching vcf\")\n parser.add_argument(\"-i\", \"--vcf\",\n help=\"Input VCF file containing variants to analyze\"\n \" (clashes with --var)\")\n parser.add_argument(\"-v\", \"--var\",\n help=\"Report reads for this variant only. Format: chr:pos:ref-alt\"\n \" (clashes with --vcf)\")\n default = 0\n parser.add_argument(\"--mq-filter\",\n dest=\"min_mq\",\n type=int,\n default=default,\n help=\"Ignore reads with mapping quality below this value (default=%d)\" % default)\n default = 5\n parser.add_argument(\"--bq-filter\",\n dest=\"min_bq\",\n type=int,\n default=default,\n help=\"Ignore reads with bases below this value (default=%d)\" % default)\n parser.add_argument(\"-a\", \"--use-orphan\",\n action=\"store_true\",\n help=\"Don't ignore orphan-reads / anomalous read-pairs\")\n\n return parser", "def get_parser():\n _program_name = Path(__file__).stem\n example = f''' Example: >> {_program_name} sample.odb\\n '''\n parser = ArgumentParser(description=__doc__.split('..')[0], # Don't include module author part of doc string\n formatter_class=ArgumentDefaultsHelpFormatter, epilog=example, prog=_program_name)\n parser.add_argument(nargs=1,\n dest='input_file',\n type=str,\n help='odb or odbreport file for extracting data',\n metavar='sample.odb')\n parser.add_argument('-o', '--output-file',\n dest='output_file',\n type=str,\n help='file for printing output',\n metavar='sample.h5')\n parser.add_argument('-f', '--output-file-type',\n dest='output_type',\n choices=['yaml', 'json', 'h5'],\n type=str,\n default='h5',\n help='Type of file in which to store output data',\n metavar='h5')\n parser.add_argument('-r', '--odb-report-args',\n dest='odb_report_args',\n type=str,\n help='Arguments to give to the odbreport command. Require the ``option=value`` interface style.',\n metavar='\"step=step1 results\"')\n parser.add_argument('-a', '--abaqus-command',\n dest='abaqus_command',\n type=str,\n default=_settings._default_abaqus_command,\n help='Abaqus command to use',\n metavar='/path/to/abaqus')\n parser.add_argument('-d', '--delete-report-file',\n action=\"store_true\",\n dest='delete_report_file',\n default=False,\n help='Delete after parsing the file created by the odbreport command')\n parser.add_argument('-v', '--verbose',\n action=\"store_true\",\n dest='verbose',\n default=False,\n help='Print all messages')\n return parser", "def get_parser():\n\tparser = argparse.ArgumentParser('tallyup.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\nTally up a student score file.\n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('--version', '-v', action='version', version=version,\n\t\thelp='show version of this command.')\n\tparser.add_argument('--csvfile', '-i', type=str, required=True, \n\t\thelp='input csv file.')\n\treturn parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--getErrors\",\n type=str,\n default=None,\n help=\"get error messages - send \\'yes\\' \")\n parser.add_argument(\"--host\",\n type=str,\n default=\"localhost\",\n help=\"Host of redis. Default : localhost\")\n parser.add_argument(\"--port\",\n type=int,\n default=6379,\n help=\"Port of redis. Default : 6379\")\n parser.add_argument(\"--db\",\n type=int,\n default=0,\n help=\"Db of redis. Default : 0\")\n parser.add_argument(\"--cleanTemp\",\n type=str,\n default=None,\n help=\"clean trash files from db - send \\'yes\\' \")\n return parser", "def parser_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config-file\", type=str, help=\"yaml configuration file name\")\n return parser.parse_args()", "def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser", "def get_parser():\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter,\n prog='pv2')\n subparsers = parser.add_subparsers(dest='cmd')\n # subparsers.add_parser('selfcheck',\n # add_help=False,\n # help=\"Self-check of the sst toolkit.\")\n # parser.add_argument('--version',\n # action='version',\n # version=('sst %s' % str(sst.__version__)))\n subparsers.add_parser('eval',\n add_help=False,\n parents=[evaluate.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Evaluate a single image\"))\n subparsers.add_parser('train',\n add_help=False,\n parents=[train.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Train a new model.\"))\n subparsers.add_parser('plot',\n add_help=False,\n parents=[plot.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Plot summary information.\"))\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir',\n help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser", "def _create_parser(self):\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument(\n '-v',\n '--verbose',\n action='store_true',\n default=False,\n help='Verbose mode (turn on logging.info)')\n\n parser.add_argument(\n '-d',\n '--debug',\n action='store_true',\n default=False,\n help='Debug (turn on logging.debug)')\n\n return parser", "def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser", "def get_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'names',\n help=(\n 'list of name-location pairs '\n '(location can be nat/hhs/cen/state or specific location labels)'))\n parser.add_argument(\n '--first',\n '-f',\n type=int,\n help='first epiweek override')\n parser.add_argument(\n '--last',\n '-l',\n type=int,\n help='last epiweek override')\n parser.add_argument(\n '--epiweek',\n '-w',\n type=int,\n help='epiweek override')\n parser.add_argument(\n '--test',\n '-t',\n default=False,\n action='store_true',\n help='dry run only')\n parser.add_argument(\n '--valid',\n '-v',\n default=False,\n action='store_true',\n help='do not fall back to stable wILI; require unstable wILI')\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--todir', help='destination directory for downloaded images')\n parser.add_argument('logfile', help='apache logfile to extract urls from')\n\n return parser", "def _build_arg_parser():\n parser = argparse.ArgumentParser(\n description=_description,\n add_help=True,\n )\n add_generic_args(parser)\n add_diff_args(parser)\n add_filename_args(parser, [\"base\", \"remote\"])\n\n parser.add_argument(\n '-o', '--output',\n default=None,\n help=\"if supplied, the diff is written to this file. \"\n \"Otherwise it is printed to the terminal.\")\n\n return parser", "def create_parser():\n desc_str = (\n \"\"\"Look at the results of inference with cbayes scripts.\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n \n parser.add_argument('-dir', '--directory',\n help = 'name of the cbayes ouput directory',\n type = str,\n required = True\n )\n \n # do the parsing\n args = parser.parse_args()\n\n return args", "def build_argument_parser():\n\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\"filenames\", nargs=\"*\", help=\"Filenames to check.\")\n return parser", "def make_argument_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"data_directory\",\r\n help=\"Directory where the data files live.\")\r\n parser.add_argument(\"out\", help=\"Output directory of files.\")\r\n parser.add_argument(\"-t\", \"--test\", action=\"store_true\",\r\n help=(\"Test mode, avoids slow classifiers and uses\"\r\n \" 3 folds\"))\r\n parser.add_argument(\"--folds\", default=10,\r\n help=\"Number of folds for n-fold cross validation\")\r\n parser.add_argument(\"--data_pattern\", default=\"*.mat\",\r\n help=\"Pattern for data files\")\r\n parser.add_argument(\"--label_pattern\", default=\"*.mat\",\r\n help=\"Pattern for label files\")\r\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(description=\"Tweet Downloader\")\n parser.add_argument(\"-d\",\n \"--data\",\n dest=\"data\",\n help=\"Read data from file or display initial setting\",\n default=False)\n\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(\n description='Converts HTML from file or url to a clean text version')\n parser.add_argument('input', nargs='?', default=None,\n help='Html input either from a file or an url '\n '(default:stdin)')\n parser.add_argument('-o', '--output', type=str,\n help='Output file (default:stdout).')\n parser.add_argument('-e', '--encoding', type=str,\n help='Content encoding for reading and writing files '\n '(default:utf-8)',\n default='utf-8')\n parser.add_argument('-i', '--display-image-captions',\n action='store_true', default=False,\n help='Display image captions (default:false).')\n parser.add_argument('-d', '--deduplicate-image-captions',\n action='store_true', default=False,\n help='Deduplicate image captions (default:false).')\n parser.add_argument('-l', '--display-link-targets',\n action='store_true', default=False,\n help='Display link targets (default:false).')\n parser.add_argument('-a', '--display-anchor-urls',\n action='store_true', default=False,\n help='Deduplicate image captions (default:false).')\n parser.add_argument('--indentation', default='extended',\n help='How to handle indentation (extended or strict;'\n ' default: extended).')\n parser.add_argument('-v', '--version',\n action='store_true', default=False,\n help='display version information')\n return parser", "def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser", "def make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device',\n type=str,\n choices=['CPU', 'GPU'],\n help='Execution device.',\n required=True)\n parser.add_argument('-N',\n type=int,\n default=DEFAULT_N,\n help='Number of particles.')\n parser.add_argument('--rho',\n type=float,\n default=DEFAULT_RHO,\n help='Number density.')\n parser.add_argument('--dimensions',\n type=int,\n choices=[2, 3],\n help='Number of dimensions.',\n default=DEFAULT_DIMENSIONS)\n parser.add_argument('--warmup_steps',\n type=int,\n default=DEFAULT_WARMUP_STEPS,\n help='Number of timesteps to run before timing.')\n parser.add_argument('--benchmark_steps',\n type=int,\n default=DEFAULT_BENCHMARK_STEPS,\n help='Number of timesteps to run in the benchmark.')\n parser.add_argument('--repeat',\n type=int,\n default=DEFAULT_REPEAT,\n help='Number of times to repeat the run.')\n parser.add_argument('-v',\n '--verbose',\n action='store_true',\n help='Verbose output.')\n return parser", "def make_parser():\n\n parser = argparse.ArgumentParser(add_help=True)\n\n parser_grp_main = parser.add_argument_group('Arguments')\n\n parser_grp_main.add_argument\n\n parser_grp_main.add_argument(\n \"-i\",\n \"--inp-dir\",\n default = \"out/ln/alias/sst/all_samples\",\n help=\"The folder containing files to tidy.\"\n )\n\n parser_grp_main.add_argument(\n \"-x\",\n \"--xlsx\",\n type=str,\n help=\"The xlsx file containing the metadata to use to find samples and tidy them.\",\n default=\"Sequencing_summary.xlsx\",\n required=False)\n\n parser_grp_main.add_argument(\n \"-b\",\n \"--by-column\",\n nargs='+',\n type=str,\n help=\"The column names from the xlsx file to use to tidy.\",\n default=\"sample_name\",\n required=False)\n \n parser_grp_main.add_argument(\n \"-d\",\n \"--delete\",\n help=\"Delete file only this arg is used. Unsafe. Always run first without this argument and check all files listed to deletion.\",\n default=False,\n type=bool,\n )\n\n return parser", "def make_parser():\n\n parser = ArgumentParser(description=\"Create dummy sensor stream esque data\")\n parser.add_argument('--tuples-per-emit', '-t', type=int, default=1,\n help='number of tuples to emit at once')\n parser.add_argument('--sensors', '-s', type=int, default=1,\n help='number of sensors to generate')\n\n return parser", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def cmdline_parser():\n parser = argparse.ArgumentParser(description=\"\"\" \"\"\")\n parser.add_argument(\"-g\", \"--gta\",\n help=\"\"\"gta sequences\"\"\",\n dest=\"gta\",\n required=True)\n return parser", "def create_parser() -> configargparse.ArgParser:\n parser = configargparse.ArgParser(default_config_files=[\n \"/etc/lookout/analyzer.conf\", \"~/.config/lookout/analyzer.conf\"],\n formatter_class=ArgumentDefaultsHelpFormatterNoNone,\n auto_env_var_prefix=\"lookout_\")\n slogging.add_logging_args(parser)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n\n def add_parser(name, help):\n return subparsers.add_parser(\n name, help=help, formatter_class=ArgumentDefaultsHelpFormatterNoNone)\n\n list_parser = add_parser(\"list\", \"Print globally available analyzers.\")\n list_parser.set_defaults(handler=list_analyzers)\n\n run_parser = add_parser(\n \"run\", \"Launch a new service with the specified (one or more) analyzers.\")\n run_parser.set_defaults(handler=run_analyzers)\n add_analyzer_arg(run_parser)\n run_parser.add(\"-c\", \"--config\", is_config_file=True,\n help=\"Path to the configuration file with option defaults.\")\n run_parser.add(\"-s\", \"--server\", required=True,\n help=\"Lookout server address, e.g. localhost:1234.\")\n run_parser.add(\"-w\", \"--workers\", type=int, default=1,\n help=\"Number of threads which process Lookout events.\")\n add_model_repository_args(run_parser)\n run_parser.add_argument(\"--request-server\", default=\"auto\",\n help=\"Address of the data retrieval service. \\\"same\\\" means --server.\")\n\n init_parser = add_parser(\"init\", \"Initialize the model repository.\")\n init_parser.set_defaults(handler=init_repo)\n add_model_repository_args(init_parser)\n\n tool_parser = add_parser(\"tool\", \"Invoke the tooling of a given analyzer.\")\n tool_parser.set_defaults(handler=run_analyzer_tool)\n tool_parser.add(\"analyzer\", help=\"Fully qualified package name with an analyzer.\")\n tool_parser.add(\"args\", nargs=argparse.REMAINDER)\n\n package_parser = add_parser(\n \"package\",\n \"Package several analyzers to a Docker container and write a sample Docker Compose config \"\n \"for Lookout.\")\n package_parser.set_defaults(handler=package_cmdline_entry)\n add_analyzer_arg(package_parser)\n package_parser.add(\"-w\", \"--workdir\", help=\"Generate files in this directory.\",\n default=tempfile.mkdtemp(prefix=\"lookout_package_\"))\n package_parser.add(\"--requirements\", help=\"Path to a custom requirements.txt\")\n package_parser.add(\"-r\", \"--repo\", help=\"GitHub repository name to watch. \"\n \"Example: \\\"src-d/lookout\\\".\",\n required=True)\n package_parser.add(\"-u\", \"--user\", help=\"GitHub user name which will send review comments.\",\n required=True)\n paturl = \"https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/\" # noqa\n package_parser.add(\"-t\", \"--token\", help=\"GitHub token for -u/--user. See \" + paturl,\n required=True)\n package_parser.add(\"-y\", \"--yes\", help=\"Run the commands in the end.\",\n action=\"store_true\")\n package_parser.add(\"-n\", \"--no\", help=\"Do not run the commands in the end.\",\n action=\"store_true\")\n return parser", "def create_parser():\n\n parser = argparse.ArgumentParser(description='Extract unique sensors')\n parser.add_argument('--file_name', help='File to extract unique sensors from.')\n parser.add_argument(\n '--url',\n help='A reference to SensorList.txt that specifies its location on a computer'\n 'network.'\n )\n parser.add_argument('--kat_sensor', required=True, help='Name of unique sensor')\n parser.add_argument('-v', metavar='verbosity', type=int, default=2, help='Logging'\n 'verbosity: 0 -critical, 1- error, 2 -warning, 3 -info, 4 -debug')\n\n args = parser.parse_args()\n return args", "def create_parser():\n parser = argparse.ArgumentParser(description='Watching for files containing magictext')\n parser.add_argument('--ext', help='File extensions to filter on, default=.txt', default='.txt')\n parser.add_argument('--poll', help=\"Polling interval in seconds, default=1.0\", type=float, default=1.0)\n parser.add_argument('directory', help='Directory to watch.')\n parser.add_argument('magictext', help='Text to search for within matching files.')\n return parser", "def make_parser():\n\n parser = argparse.ArgumentParser(description='Inference engine.')\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n subparsers.required = True\n solver_subparser = subparsers.add_parser('run')\n solver_subparser.add_argument(\n '-v', '--verbose', help='enable verbose mode.', action='store_true'\n )\n solver_subparser.add_argument(\n '-d', '--debug', help='enable debug mode.', action='store_true'\n )\n solver_subparser.add_argument(\n 'filename', type=str,\n help='filename containing the instructions to process.'\n )\n return parser", "def create_cli_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", nargs=\"?\", help=\"path to yaml configuration file\")\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n\n # required args\n parser.add_argument('--project_id',\n help='Project id for project containing BQ data',\n default=KEY_FILE,\n type=str,\n required=True)\n\n # data and model args\n parser.add_argument('--training_budget',\n help='Training budget in hours',\n default=1,\n type=int)\n parser.add_argument('--key_file',\n help='JSON key file for API access',\n default=KEY_FILE,\n type=str)\n parser.add_argument('--location',\n help='GCP region to run',\n default=LOCATION,\n type=str)\n parser.add_argument('--automl_dataset',\n help='Name of AutoML dataset',\n default=AUTOML_DATASET,\n type=str)\n parser.add_argument('--automl_model',\n help='Name of AutoML model',\n default=AUTOML_MODEL,\n type=str)\n parser.add_argument('--bq_dataset',\n help='BigQuery dataset to import from',\n default=BQ_DATASET,\n type=str)\n parser.add_argument('--bq_table',\n help='BigQuery table to import from',\n default=BQ_TABLE,\n type=str)\n parser.add_argument('--batch_gcs_input',\n help='GCS URI for batch predict CSV',\n default=BATCH_GCS_INPUT,\n type=str)\n parser.add_argument('--batch_gcs_output',\n help='GCS URI for batch predict output',\n default=BATCH_GCS_OUTPUT,\n type=str)\n return parser", "def get_parser():\n if sys.version_info[0] < 3:\n # Using a version of Python < 3.\n parser = ArgumentParser(version=VERSION) # pylint: disable=E1123\n else:\n parser = ArgumentParser()\n parser.add_argument('--version', action='version', version=VERSION)\n\n subparsers = parser.add_subparsers(\n title='actions', help='Types of zappa commands',\n dest='command')\n\n parser_update_stack = subparsers.add_parser(\n 'update', help='Update a zappa deploy')\n parser_update_stack.add_argument(\n '--name', required=True,\n help='Name of the deployment (dev, prod, etc.)')\n\n parser_create_stack = subparsers.add_parser(\n 'deploy', help='Create a zappa deploy')\n parser_create_stack.add_argument(\n '--name', required=True,\n help='Name of the deployment (dev, prod, etc.)')\n\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser", "def create_arg_parser():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n '-f',\n '--file',\n required=True,\n help='Name of clean data file'\n )\n return arg_parser", "def setup_parser():\n parser = HelpfulParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('infile', type=str, help=\"input data file\")\n\n parser.add_argument('-u', '--usage', action=\"help\",\n help=\"show this help message and exit\")\n parser.add_argument('-h', '--host', metavar='HOST', type=str,\n default='localhost', help='Server hostname')\n parser.add_argument('-p', '--port', metavar='PORT', type=int,\n default='3000', help='Server port')\n parser.add_argument('-U', '--user', metavar='USER', type=str,\n default=None, help='Username')\n parser.add_argument('-P', '--passwd', metavar='PW', type=str,\n default=None, help='Password')\n parser.add_argument('-n', '--nspace', metavar='NS', type=str,\n default='test', help='Namespace')\n parser.add_argument('-s', '--set', metavar='SET', type=str,\n default='osm', help='Set name')\n return parser", "def _CreateParser():\n parser = commandline.ArgumentParser(description=__doc__, caching=True)\n\n # TODO(rcui): Have this use the UI-V2 format of having source and target\n # device be specified as positional arguments.\n parser.add_argument('--force', action='store_true', default=False,\n help='Skip all prompts (i.e., for disabling of rootfs '\n 'verification). This may result in the target '\n 'machine being rebooted.')\n sdk_board_env = os.environ.get(cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV)\n parser.add_argument('--board', default=sdk_board_env,\n help=\"The board the Chrome build is targeted for. When \"\n \"in a 'cros chrome-sdk' shell, defaults to the SDK \"\n \"board.\")\n parser.add_argument('--build-dir', type='path',\n help='The directory with Chrome build artifacts to '\n 'deploy from. Typically of format '\n '<chrome_root>/out/Debug. When this option is used, '\n 'the GYP_DEFINES environment variable must be set.')\n parser.add_argument('--target-dir', type='path',\n default=None,\n help='Target directory on device to deploy Chrome into.')\n parser.add_argument('-g', '--gs-path', type='gs_path',\n help='GS path that contains the chrome to deploy.')\n parser.add_argument('--nostartui', action='store_false', dest='startui',\n default=True,\n help=\"Don't restart the ui daemon after deployment.\")\n parser.add_argument('--nostrip', action='store_false', dest='dostrip',\n default=True,\n help=\"Don't strip binaries during deployment. Warning: \"\n 'the resulting binaries will be very large!')\n parser.add_argument('-p', '--port', type=int, default=remote.DEFAULT_SSH_PORT,\n help='Port of the target device to connect to.')\n parser.add_argument('-t', '--to',\n help='The IP address of the CrOS device to deploy to.')\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Show more debug output.')\n parser.add_argument('--mount-dir', type='path', default=None,\n help='Deploy Chrome in target directory and bind it '\n 'to the directory specified by this flag.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n parser.add_argument('--mount', action='store_true', default=False,\n help='Deploy Chrome to default target directory and bind '\n 'it to the default mount directory.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n\n group = parser.add_argument_group('Advanced Options')\n group.add_argument('-l', '--local-pkg-path', type='path',\n help='Path to local chrome prebuilt package to deploy.')\n group.add_argument('--sloppy', action='store_true', default=False,\n help='Ignore when mandatory artifacts are missing.')\n group.add_argument('--staging-flags', default=None, type=ValidateGypDefines,\n help=('Extra flags to control staging. Valid flags are - '\n '%s' % ', '.join(chrome_util.STAGING_FLAGS)))\n # TODO(stevenjb): Remove --strict entirely once removed from the ebuild.\n group.add_argument('--strict', action='store_true', default=False,\n help='Deprecated. Default behavior is \"strict\". Use '\n '--sloppy to omit warnings for missing optional '\n 'files.')\n group.add_argument('--strip-flags', default=None,\n help=\"Flags to call the 'strip' binutil tool with. \"\n \"Overrides the default arguments.\")\n group.add_argument('--ping', action='store_true', default=False,\n help='Ping the device before connection attempt.')\n group.add_argument('--mash', action='store_true', default=False,\n help='Copy additional files for mus+ash. Will not fit in '\n 'the default target-dir.')\n\n group = parser.add_argument_group(\n 'Metadata Overrides (Advanced)',\n description='Provide all of these overrides in order to remove '\n 'dependencies on metadata.json existence.')\n group.add_argument('--target-tc', action='store', default=None,\n help='Override target toolchain name, e.g. '\n 'x86_64-cros-linux-gnu')\n group.add_argument('--toolchain-url', action='store', default=None,\n help='Override toolchain url format pattern, e.g. '\n '2014/04/%%(target)s-2014.04.23.220740.tar.xz')\n\n # GYP_DEFINES that Chrome was built with. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GYP_DEFINES\n # enviroment variable. WILL BE DEPRECATED.\n parser.add_argument('--gyp-defines', default=None, type=ValidateGypDefines,\n help=argparse.SUPPRESS)\n\n # GN_ARGS (args.gn) used to build Chrome. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GN_ARGS env variable.\n # CURRENLY IGNORED, ADDED FOR FORWARD COMPATABILITY.\n parser.add_argument('--gn-args', default=None, type=ValidateGnArgs,\n help=argparse.SUPPRESS)\n\n # Path of an empty directory to stage chrome artifacts to. Defaults to a\n # temporary directory that is removed when the script finishes. If the path\n # is specified, then it will not be removed.\n parser.add_argument('--staging-dir', type='path', default=None,\n help=argparse.SUPPRESS)\n # Only prepare the staging directory, and skip deploying to the device.\n parser.add_argument('--staging-only', action='store_true', default=False,\n help=argparse.SUPPRESS)\n # Path to a binutil 'strip' tool to strip binaries with. The passed-in path\n # is used as-is, and not normalized. Used by the Chrome ebuild to skip\n # fetching the SDK toolchain.\n parser.add_argument('--strip-bin', default=None, help=argparse.SUPPRESS)\n return parser", "def build_parser(self, parser: ArgumentParser) -> None:", "def command_line_parse(iargs=None):\n\n parser = create_parser()\n inps = parser.parse_args(args=iargs)\n\n return inps", "def setup_parser():\n\n psr_desc=\"cfdi engine service interface\"\n psr_epi=\"select a config profile to specify defaults\"\n\n psr = argparse.ArgumentParser(\n description=psr_desc, epilog=psr_epi)\n\n psr.add_argument('-nmp', action='store_true', dest='nmp',\n help='unique process approach (useful in development)')\n\n psr.add_argument('-d', action='store_true', dest='debug',\n help='print debug information')\n\n psr.add_argument('-c', '--config', action='store',\n dest='config',\n help='load an specific config profile')\n\n psr.add_argument('-p', '--port', action='store',\n dest='port',\n help='launches service on specific port')\n\n return psr.parse_args()", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.5,\n help=\"Probability threshold for detections filtering\"\n \"(0.5 by default)\")\n return parser", "def get_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=f\"mobile_modem_exporter version {__version__}. Exports signal quality information for mobile modems. See the manpage or ReadTheDocs for more info.\"\n )\n\n parser.add_argument(\n \"PROMPATH\",\n type=str,\n help=\"The path to the prometheus node_exporter textfile collector file to write output to.\",\n )\n\n parser.add_argument(\n \"SERIALDEVICE\",\n nargs=\"+\",\n type=str,\n help=\"The path to a serial device to get signal quality from. Can be specified multiple times.\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=\"DEBUG\",\n help=\"Debug mode. Equal to setting --log-level=DEBUG.\",\n default=argparse.SUPPRESS,\n )\n\n parser.add_argument(\n \"-l\",\n \"--log-level\",\n dest=\"loglevel\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Logging level. One of DEBUG, INFO, WARNING, ERROR, CRITICAL. Defaults to INFO.\",\n default=\"INFO\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--sleep\",\n type=int,\n nargs=\"?\",\n help=\"Sleep this many seconds between runs, default: %(default)s\",\n default=10,\n )\n\n parser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=\"WARNING\",\n help=\"Quiet mode. No output at all if no errors are encountered. Equal to setting --log-level=WARNING.\",\n default=argparse.SUPPRESS,\n )\n\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=f\"%(prog)s version {__version__}\",\n help=\"Show mobile_modem_exporter version and exit.\",\n )\n\n return parser", "def build_parser():\n parser = argparse.ArgumentParser(usage='$ python recentfeed.py http://domain.com/rss/',\n description='''Takes a list of URLs passed as args.\n Returns the items published today unless otherwise specified.''',\n epilog='')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\")\n parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0, action=\"count\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str)\n parser.add_argument(\"urls\", action=\"append\", nargs=\"*\")\n return parser", "def _create_parser():\n parser = ArgumentParser(description=\"A CLI that sends messages to an Azure event hub.\")\n\n parser.add_argument(\"--connection-string\", type=str, required=True,\n help=\"The Azure event hub connection string\")\n\n parser.add_argument(\"--name\", type=str, required=True,\n help=\"The Azure event hub name\")\n\n parser.add_argument(\"--interval\", type=int, required=False,\n help=\"The number of seconds to wait between sends. Defaults to 10 seconds.\")\n\n parser.add_argument(\"--what-if\", type=bool, required=False,\n help=\"Run the program without sending messages to the Event Hub. \"\n \"The app will log what would have been sent to the Event Hub.\")\n\n return parser", "def build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=True, type=str,\n help=\"Path to an xml file with a trained model.\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to image or video file\")\n parser.add_argument(\"-l\", \"--cpu_extension\", required=False, type=str,\n default=None,\n help=\"MKLDNN (CPU)-targeted custom layers.\"\n \"Absolute path to a shared library with the\"\n \"kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", type=str, default=\"CPU\",\n help=\"Specify the target device to infer on: \"\n \"CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device \"\n \"specified (CPU by default)\")\n parser.add_argument(\"-pt\", \"--prob_threshold\", type=float, default=0.3,\n help=\"Probability threshold for detections filtering\"\n \"(0.3 by default)\")\n return parser", "def get_cmd_line_parser(version=None, *args, **kwargs):\n parser = argparse.ArgumentParser(*args, **kwargs)\n if version:\n parser.add_argument(\n '--version', help='Print the version and exit.', action='version',\n version='%(prog)s {}'.format(version))\n DebugAction.add_parser_argument(parser)\n VerboseAction.add_parser_argument(parser)\n\n return parser", "def argParser():\n parser = ArgumentParser(description=('Downloads problems from Project Euler'\n ' and saves copies locally.'))\n parser.add_argument('-s', '--start', type=int, default=1,\n help='The problem number to start the downloads at, default 1.')\n parser.add_argument('-e', '--end', type=int, default=None,\n help='The problem number to end the downloads at, default None.')\n return parser", "def parse_arguments():\n parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n dest=\"verbosity\",\n choices=(\"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"CRITICAL\"),\n default=\"ERROR\",\n help=\"Verbosity/Log level. Defaults to ERROR\",\n )\n parser.add_argument(\n \"-l\", \"--logfile\", dest=\"logfile\", help=\"Store log to this file.\"\n )\n parser.add_argument(\n \"--username\",\n dest=\"username\",\n required=True,\n help=\"GitHub username.\",\n )\n parser.add_argument(\n \"--pat\",\n dest=\"pat\",\n required=True,\n help=\"GitHub PAT.\",\n )\n return parser", "def makeParser():\n parser = argparse.ArgumentParser(\n description=(\n \"Print a JSON object containing reference to read \"\n \"distances extracted from a SAM file.\"\n )\n )\n\n parser.add_argument(\n \"--samFile\",\n action=\"append\",\n required=True,\n help=\"The SAM file(s) to load. May be repeated.\",\n )\n\n parser.add_argument(\n \"--minMatchingReads\",\n type=int,\n help=(\n \"The minimum number of reads that must match a reference for it \"\n \"to be included.\"\n ),\n )\n\n parser.add_argument(\n \"--scoreTag\",\n help=(\n \"The score tag to use for the alignment score. If not given, \"\n \"1 will be used to indicate that a read matched a reference \"\n \"(non-matches are not included). The default is no score tag, \"\n 'which is not that useful. A good choice is \"AS\", for the '\n \"alignment score, but that has to be present in the SAM file, \"\n \"which means that the aligner (bowtie2, bwa, etc. has to have \"\n \"produced such a tag.\"\n ),\n )\n\n parser.add_argument(\n \"--verbose\", action=\"store_true\", help=\"Print extra information.\"\n )\n\n return parser", "def create_parser(self, prog_name, subcommand):\n return OptionParser(prog=prog_name,\n usage=self.usage(subcommand),\n version=self.get_version(),\n option_list=self.option_list)", "def make_parser():\n parser = argparse.ArgumentParser(description='Parse Wiki Page')\n parser.add_argument('wikipage',\n help='the name of the wiki page to parse')\n parser.add_argument('output_file_name', nargs='?',\n help='the name of the file to upload/write to')\n parser.add_argument('-r', '--redirect', dest='redirect',\n help='the name of the remote page to redirect to')\n parser.add_argument('--s3',action='store_true',\n help='upload file to S3? (Default = False)')\n parser.add_argument('--dryrun',action='store_true')\n #ToDo: add arguments --dryrun and --tofile? --verbose? --s3 --category\n return parser", "def init_parser():\n parser = OptionParser()\n parser.add_option(\"-n\", \"--interactive\", action=\"store_true\", help=\"run in interactive (non-daemon) mode\")\n parser.add_option(\"-r\", \"--run\", action=\"store_true\", help=\"starts process identified by -app parameter\")\n parser.add_option(\"-k\", \"--kill\", action=\"store_true\", help=\"kill process identified by -app parameter\")\n parser.add_option(\"-a\", \"--app\", action=\"store\", help=\"application to start (process name)\")\n parser.add_option(\"-q\", \"--query\", action=\"store_true\", help=\"query application's state\")\n parser.add_option(\"-i\", \"--install_ve\", action=\"store_true\", help=\"install a virtualenv for the runtime to use\")\n parser.add_option(\"-s\", \"--shell\", action=\"store_true\", help=\"run an ipython shell within the virtualenv\")\n parser.add_option(\"-t\", \"--tests\", action=\"store_true\", help=\"run tests\")\n parser.add_option(\"-x\", \"--xunit\", action=\"store_true\", help=\"run tests with coverage and xunit output for Jenkins\")\n parser.add_option(\"-z\", \"--analyze\", action=\"store_true\", help=\"run pylint on project\")\n parser.add_option(\"-l\", \"--list\", action=\"store_true\", help=\"list available applications\")\n parser.add_option(\"-o\", \"--outfile\", action=\"store\", help=\"save results from a report to a file\")\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(description='Encode ')\n parser.add_argument('rgi_out', type=str, help='Folder containing RGI output')\n parser.add_argument('ast_tsv', type=str, help='Filepath to AST TSV')\n parser.add_argument('--perfect', dest='perfect_only', default=False,\n action='store_true', help='Only use perfect RGI results')\n return parser", "def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser", "def make_parser():\n parser_ = argparse.ArgumentParser(\n description=\"\"\"\n A tool to retrieve history from\n (almost) any browser on (almost) any platform\n\n██████╗ ██████╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██╗ ██╗██╗███████╗████████╗ ██████╗ ██████╗ ██╗ ██╗\n██╔══██╗██╔══██╗██╔═══██╗██║ ██║██╔════╝██╔════╝██╔══██╗ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗╚██╗ ██╔╝\n██████╔╝██████╔╝██║ ██║██║ █╗ ██║███████╗█████╗ ██████╔╝█████╗███████║██║███████╗ ██║ ██║ ██║██████╔╝ ╚████╔╝\n██╔══██╗██╔══██╗██║ ██║██║███╗██║╚════██║██╔══╝ ██╔══██╗╚════╝██╔══██║██║╚════██║ ██║ ██║ ██║██╔══██╗ ╚██╔╝\n██████╔╝██║ ██║╚██████╔╝╚███╔███╔╝███████║███████╗██║ ██║ ██║ ██║██║███████║ ██║ ╚██████╔╝██║ ██║ ██║\n╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝\n \"\"\", # noqa: E501\n epilog=\"\"\"\n Checkout the GitHub repo\n https://github.com/pesos/browser-history\n if you have any issues or want to help contribute\"\"\",\n formatter_class=RawDescriptionHelpFormatter,\n )\n\n parser_.add_argument(\n \"-t\",\n \"--type\",\n default=\"history\",\n help=f\"\"\"\n argument to decide whether to retrieve history or bookmarks.\n Should be one of {AVAILABLE_TYPES}.\n Default is history.\"\"\",\n )\n parser_.add_argument(\n \"-b\",\n \"--browser\",\n default=\"all\",\n help=f\"\"\"\n browser to retrieve history or bookmarks from. Should be one\n of all, default, {AVAILABLE_BROWSERS}.\n Default is all (gets history or bookmarks from all browsers).\n \"\"\",\n )\n\n parser_.add_argument(\n \"-f\",\n \"--format\",\n default=\"infer\",\n help=f\"\"\"\n Format to be used in output. Should be one of {AVAILABLE_FORMATS}.\n Default is infer (format is inferred from the output file's\n extension. If no output file (-o) is specified, it defaults to csv)\"\"\",\n )\n\n parser_.add_argument(\n \"-o\",\n \"--output\",\n default=None,\n help=\"\"\"\n File where history output or bookmark output is to be written.\n If not provided, standard output is used.\"\"\",\n )\n\n parser_.add_argument(\n \"-p\",\n \"--profile\",\n default=None,\n help=\"\"\"\n Specify the profile from which to fetch history or bookmarks. If\n not provided all profiles are fetched\n \"\"\",\n )\n\n parser_.add_argument(\n \"--show-profiles\",\n default=None,\n metavar=\"BROWSER\",\n help=f\"\"\"\n List all available profiles for a given browser where browser\n can be one of default, {AVAILABLE_BROWSERS}. The browser\n must always be provided.\n \"\"\",\n )\n\n parser_.add_argument(\n \"-v\", \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n\n return parser_", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def cmdline_parser(cls):\n\n # Add arguments that user can specify and put info you can discuss what each options are \n parser = argparse.ArgumentParser(description=\"Produces a plot of a computed potential energy surface.\")\n parser.add_argument('-i', '--input', type=str, default='input.dat', help=\"Name of the input file to be read in\")\n parser.add_argument('-o', '--output', type=str, default='pes.pdf', help=\"Name of the output file to be created.\")\n\n # Store all the arguments from argparse into class variabes\n args = parser.parse_args()\n cls.infile_name = args.input\n cls.outfile_name = args.output\n\n return None", "def get_parser():\n # parse parameters\n parser = argparse.ArgumentParser(description=\"Evaluate sentences with RTTL\")\n\n # main parameters\n parser.add_argument(\"--dump_path\", type=str, default=\"./dumped/\", help=\"Experiment dump path\")\n parser.add_argument(\"--exp_name\", type=str, default=\"\", help=\"Experiment name\")\n parser.add_argument(\"--exp_id\", type=str, default=\"\", help=\"Experiment ID\")\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"Number of sentences per batch\")\n\n # model / output paths\n parser.add_argument(\"--model_path\", type=str, default=\"\", help=\"Model path\")\n parser.add_argument(\"--output_path\", type=str, default=\"\", help=\"Output path for scores\")\n parser.add_argument(\"--input_path\", type=str, default=\"\", help=\"Input path for source sentences\")\n\n # parser.add_argument(\"--max_vocab\", type=int, default=-1, help=\"Maximum vocabulary size (-1 to disable)\")\n # parser.add_argument(\"--min_count\", type=int, default=0, help=\"Minimum vocabulary count\")\n\n # source language / target language\n parser.add_argument(\"--src_lang\", type=str, default=\"\", help=\"Source language\")\n parser.add_argument(\"--tgt_lang\", type=str, default=\"\", help=\"Target language\")\n\n return parser", "def parse() -> Namespace:\n parser = ArgumentParser()\n parser.add_argument(\n \"--config\",\n \"-c\",\n default=\"qwauto.cfg\",\n help=\"Config file. Defaults to qwauto.cfg.\",\n )\n return parser.parse_args()" ]
[ "0.7608149", "0.75529045", "0.7468674", "0.7456568", "0.7440838", "0.7423374", "0.7418182", "0.7364865", "0.7351196", "0.7348984", "0.73461956", "0.7296354", "0.72922224", "0.72769743", "0.7264185", "0.7261001", "0.72606117", "0.7259605", "0.72396505", "0.7237962", "0.7237962", "0.7228536", "0.7228536", "0.7217271", "0.7209794", "0.7209791", "0.720152", "0.71837586", "0.71748567", "0.7171061", "0.716123", "0.7160293", "0.71598417", "0.71557313", "0.71446735", "0.7140574", "0.71365", "0.7132725", "0.71281886", "0.7120523", "0.71187145", "0.7118517", "0.7111028", "0.7110445", "0.71023333", "0.70945144", "0.7093135", "0.7083882", "0.70808214", "0.70780015", "0.7072352", "0.7061662", "0.70591015", "0.7045481", "0.70448625", "0.7037906", "0.7029236", "0.7027411", "0.70268387", "0.70264643", "0.6997543", "0.69960123", "0.6978938", "0.69724727", "0.6964311", "0.69609183", "0.6958134", "0.6943388", "0.69393116", "0.69390166", "0.69279456", "0.69228476", "0.69215906", "0.6916116", "0.69127375", "0.6908005", "0.690769", "0.6903063", "0.6901097", "0.6886711", "0.6882429", "0.6882429", "0.6882429", "0.68797064", "0.6879066", "0.68757874", "0.68744946", "0.6874464", "0.68730146", "0.6871271", "0.6867213", "0.6866756", "0.6864173", "0.68640834", "0.68632597", "0.6852751", "0.6849868", "0.6840402", "0.6837431", "0.68367857", "0.6834035" ]
0.0
-1
Fetch Forex datasets. Fetches the ECB Forex and Coindesk Bitcoin datasets. More info at
def fetch(start=date(2015, 1, 1), end=date.today(), currency_1='USD', currency_2='EUR'): if currency_1 == 'BTC': X = _load_bitcoin(start=start, end=end, currency=currency_2) descr = 'BTC-' + str(currency_2) elif currency_2 == 'BTC': X = _load_bitcoin(start=start, end=end, currency=currency_1) descr = 'BTC-' + str(currency_1) else: X = _load_forex(start=start, end=end, currency_1=currency_1, currency_2=currency_2) descr = str(currency_1) + '-' + str(currency_2) descr = descr + start.strftime('%Y-%m-%d') + '-' + end.strftime('%Y-%m-%d') return Bunch(data=X, target=None, data_test=None, target_test=None, inner_cv=None, outer_cv=None, DESCR=descr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch(tickers: List[str], limit:Optional[int]=None):\n srs = [tck.split(\".\")[1] for tck in tickers]\n resp = requests.get(URL, verify=False)\n if resp.ok:\n df = pd.read_csv(StringIO(resp.text), delimiter=\";\", decimal=\",\")\n else:\n logger.error(f\"Data from {resp.url} not availbe at the moment\")\n print(\"Data not available\")\n df[\"dates\"] = df.loc[:, [\"CO_ANO\", \"CO_MES\"]].apply(lambda x: dt(x[0], x[1], 1), axis=1)\n df[\"series\"] = df.loc[:, [\"TIPO\", \"TIPO_INDICE\"]].apply(lambda x: f\"{x[0]}_{x[1]}\", axis=1)\n dff = df.pivot(index=\"dates\", columns=\"series\", values=\"INDICE\").loc[:, srs]\n df_final = dff if limit is None else dff.tail(limit)\n for col in df_final:\n for ind in df_final.index:\n add_obs(f\"COMEX.{col}\", ind, df_final.loc[ind, col])", "def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def download_data():\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n FacebookAdsApi.init(config.app_id(),\n config.app_secret(),\n config.access_token())\n ad_accounts = _get_ad_accounts()\n target_accounts = list(filter(None, config.target_accounts().split(',')))\n if len(target_accounts) > 0:\n logging.info('the app can see %s accounts but the configuration specified only %s target accounts: %s', len(ad_accounts), len(target_accounts), ', '.join(target_accounts))\n ad_accounts = [ad_account for ad_account in ad_accounts if ad_account['account_id'] in config.target_accounts()]\n logging.info('after filtering %s accounts will be downloaded: %s', len(target_accounts), ', '.join(target_accounts))\n download_data_sets(ad_accounts)", "def download_data_sets(ad_accounts: [adaccount.AdAccount]):\n download_account_structure(ad_accounts)\n download_ad_performance(ad_accounts)", "def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def fetch_census_data(self, states):\n print('Fetching census data')\n for table in CensusTable.objects.all():\n api = self.get_series(table.series)\n for variable in table.variables.all():\n estimate = '{}_{}'.format(\n table.code,\n variable.code\n )\n print('>> Fetching {} {} {}'.format(\n table.year,\n table.series,\n estimate\n ))\n for state in tqdm(states):\n self.get_county_estimates_by_state(\n api=api,\n table=table,\n variable=variable,\n estimate=estimate,\n state=state,\n )", "def download_all(): #@save\n for name in DATA_HUB:\n download(name)", "def _retrieve_data(self, log, progressbar, files):\n # ESGF frequently doesn't work. Until I get a document from them\n # that specifies a reliable API, I'm giving up.\n msg = \"ESGF has become too unreliable, so it's temporarily unsupported.\"\n raise NotImplementedError(msg)\n# login_successful = self._authenticator.login()\n# if not login_successful:\n# self._app.logger.warn(\"Failed to login.\")\n# session = self._authenticator.session\n\n temp_ds = []\n url_length = len(files)\n session = None\n\n # Add two to the progress bar. One for just starting, and another\n # for when it's all finished. Without these extra, the user can be\n # looking at a blank progress bar for the whole time, since _clean()\n # takes so long.\n progressbar.start(2*url_length)\n for i, remotefile in files:\n\n # The remotefile is just the filename, which is nicer for display.\n # Need the full url.\n url = self._url_from_file(remotefile)\n if session is None and self._authenticator.login(url):\n session = self._authenticator.session\n \n if session is not None:\n xdataset = xr.open_dataset(url,\n decode_cf=False,\n engine='pydap',\n session=session)\n msg = \"Cleaning: {0}.\".format(remotefile)\n# # Normalize it.\n# # FIX ME: Consider moving this to another place. This\n# # operation is the biggest bottleneck of this searching and\n# # retrieving data.\n self._clean(x)\n\n temp_ds.append(xdataset)\n msg = \"Retained: {0}\".format(filename)\n log.debug(msg) \n progressbar.update(msg)\n \n else:\n msg = \"Login failed.\"\n print msg\n log.debug(msg)\n progressbar.update(msg)\n\n # Don't stay logged on.\n self._authenticator.logout()\n\n # Return the list of xarray Dataset objects. The Data_repospecset data\n # structure can't hold the datasets thus far collected because, in\n # general, their coordinates will be defined on different lattices.\n return temp_ds", "def download_datasets():\n if not os.path.exists(\"__data__/cornell/movie_conversations.txt\") \\\n or not os.path.exists(\"__data__/cornell/movie_lines.txt\"):\n subprocess.call(['scripts/download_cornell.sh'])\n if not os.path.isdir('__data__/opensubs'):\n subprocess.call(['scripts/download_opensubs.sh'])", "def _fetch_data(self, samples):\n pass", "def _fetch_data(self):\n pass", "def fetch_email_tickets(data_home=None, subset='train', categories=None,\n shuffle=True, random_state=42,\n download_if_missing=True):\n\n data_home = get_data_home(data_home=data_home)\n cache_path = _pkl_filepath(data_home, CACHE_NAME)\n email_tickets_home = os.path.join(data_home, \"email_tickets_home\")\n print(data_home)\n print(cache_path)\n print(email_tickets_home)\n cache = None\n if os.path.exists(cache_path):\n try:\n with open(cache_path, 'rb') as f:\n compressed_content = f.read()\n uncompressed_content = codecs.decode(\n compressed_content, 'zlib_codec')\n cache = pickle.loads(uncompressed_content)\n except Exception as e:\n print(80 * '_')\n print('Cache loading failed')\n print(80 * '_')\n print(e)\n\n if cache is None:\n if download_if_missing:\n logger.info(\"Downloading email tickets dataset. \"\n \"This may take a few minutes.\")\n cache = download_email_tickets(target_dir=email_tickets_home,\n cache_path=cache_path)\n else:\n raise IOError('Email tickets dataset not found')\n\n if subset in ('train', 'test'):\n data = cache[subset]\n elif subset == 'all':\n data_lst = list()\n target = list()\n filenames = list()\n for subset in ('train', 'test'):\n data = cache[subset]\n data_lst.extend(data.data)\n target.extend(data.target)\n filenames.extend(data.filenames)\n\n data.data = data_lst\n data.target = np.array(target)\n data.filenames = np.array(filenames)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset)\n\n data.description = 'the email tickets dataset'\n\n # if 'headers' in remove:\n # data.data = [strip_header(text) for text in data.data]\n # if 'footers' in remove:\n # data.data = [strip_footer(text) for text in data.data]\n # if 'quotes' in remove:\n # data.data = [strip_quoting(text) for text in data.data]\n\n if categories is not None:\n # print(data.target_names)\n labels = [(data.target_names.index(cat), cat) for cat in categories]\n # Sort the categories to have the ordering of the labels\n labels.sort()\n labels, categories = zip(*labels)\n mask = np.in1d(data.target, labels)\n data.filenames = data.filenames[mask]\n data.target = data.target[mask]\n # searchsorted to have continuous labels\n data.target = np.searchsorted(labels, data.target)\n data.target_names = list(categories)\n # Use an object array to shuffle: avoids memory copy\n data_lst = np.array(data.data, dtype=object)\n data_lst = data_lst[mask]\n data.data = data_lst.tolist()\n\n if shuffle:\n random_state = check_random_state(random_state)\n indices = np.arange(data.target.shape[0])\n random_state.shuffle(indices)\n data.filenames = data.filenames[indices]\n data.target = data.target[indices]\n # Use an object array to shuffle: avoids memory copy\n data_lst = np.array(data.data, dtype=object)\n data_lst = data_lst[indices]\n data.data = data_lst.tolist()\n\n return data", "def get_dataset(FOLD, AR_PERCENTAGE, d_type='yelp', AUTHOR='inf', POST='inf'):\n global AR_TYPE\n\n # dataset = loader.load(d_type, AUTHOR, POST)\n first_dataset = loader.unimportant_load(AUTHOR, POST * FOLD, AR_TYPE)\n datasets = first_dataset.fold_to(FOLD)\n \n for i in range(0, len(datasets)):\n dataset = datasets[i]\n dataset.divide_ar_ir(AR_PERCENTAGE)\n texts = []\n\n # check if we have this dataset already calculated.\n \n ir_filename = 'processed/' + get_ir_identifier(d_type, i, AUTHOR, POST)\n ar_filename = 'processed/' + get_ar_identifier(d_type, i, AUTHOR, POST)\n\n ir_features = None\n if os.path.isfile(ir_filename):\n print '@get: we have the file', ir_filename, 'and going to load it.'\n with open(ir_filename, 'rb') as fp:\n ir_features = pickle.load(fp)\n \n ar_features = None\n if os.path.isfile(ar_filename):\n print '@get: we have the file', ar_filename, 'and going to load it.'\n with open(ar_filename, 'rb') as fp:\n ar_features = pickle.load(fp)\n\n\n if ir_features is not None:\n for author in dataset.authors:\n dataset.features[author][-1] = ir_features[author]\n\n if ar_features is not None:\n for author in dataset.authors:\n dataset.features[author][:-1] = ar_features[author]\n\n for author in dataset.authors:\n if ar_features is None:\n texts.extend(dataset.get_ars(author))\n if ir_features is None: \n texts.append(dataset.get_ir(author))\n\n print '@getting_features, #dataset'#, index_fold\n pool = Pool(processes=NUMBER_OF_CORES)\n it = pool.imap(get_dataset_features, texts)\n pool.close()\n pool.join()\n\n print '@getting_features FINISHED, adding features to dictionary'\n for author in dataset.authors:\n # for each ar + ir, get back the features\n if ar_features is None:\n for i in range(0, dataset.get_ar_size(author)):\n dataset.put_feature(author, i, it.next())\n if ir_features is None:\n dataset.put_feature(author, dataset.get_ar_size(author), it.next())\n\n if ir_features is None:\n print '@get: we DONOT have the file', ir_filename, 'is going to be created and saved.'\n with open(ir_filename, 'wb') as fp:\n tmp = dict()\n for key, value in dataset.features.iteritems():\n tmp[key] = value[-1]\n pickle.dump(tmp, fp)\n\n if ar_features is None:\n print '@get: we DONOT have the file', ar_filename, 'is going to be created and saved.'\n with open(ar_filename, 'wb') as fp:\n tmp = defaultdict(list)\n for key, value in dataset.features.iteritems():\n tmp[key] = value[:-1]\n pickle.dump(tmp, fp)\n\n return datasets", "def fetch_data():\n data.fetch_data()\n data.start_updating()", "def download_models_and_data():\n\n for file in DATA_FILES:\n download_file(file[\"url\"], file[\"path\"])", "def download(self, verbose):\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/\")\n # Vaccinations\n v_rec_cols = [\n \"date\", \"location\", \"iso_code\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\"]\n v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)\n v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=[\"location\", \"vaccines\"])\n v_df = v_rec_df.merge(v_loc_df, how=\"left\", on=\"location\")\n # Tests\n pcr_rec_cols = [\"ISO code\", \"Date\", \"Daily change in cumulative total\", \"Cumulative total\"]\n pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)\n pcr_df = pcr_df.rename(columns={\"ISO code\": \"iso_code\", \"Date\": \"date\"})\n pcr_df[\"cumsum\"] = pcr_df.groupby(\"iso_code\")[\"Daily change in cumulative total\"].cumsum()\n pcr_df = pcr_df.assign(tests=lambda x: x[\"Cumulative total\"].fillna(x[\"cumsum\"]))\n # Combine data (vaccinations/tests)\n df = v_df.set_index([\"iso_code\", \"date\"])\n df = df.combine_first(pcr_df.set_index([\"iso_code\", \"date\"]).loc[:, [\"tests\"]])\n df = df.reset_index()\n # Location (country/province)\n df[\"location\"] = df[\"location\"].replace(\n {\n # COG\n \"Congo\": \"Republic of the Congo\",\n }\n )\n df = df.loc[~df[\"iso_code\"].str.contains(\"OWID_\")]\n df[\"location\"] = df.groupby(\"iso_code\")[\"location\"].bfill()\n df.loc[df[\"location\"] == df[\"iso_code\"], \"location\"] = None\n df.loc[df[\"location\"].isna(), \"location\"] = df.loc[df[\"location\"].isna(), \"iso_code\"].apply(\n lambda x: coco.convert(x, to=\"name_short\", not_found=None))\n df[self.PROVINCE] = self.UNKNOWN\n return df", "async def fetch_deposit_withdraw_fees(self, codes: Optional[List[str]] = None, params={}):\n await self.load_markets()\n assets = await self.publicGetWalletAssets(params)\n #\n # [\n # {\n # asset: 'XBT',\n # currency: 'XBt',\n # majorCurrency: 'XBT',\n # name: 'Bitcoin',\n # currencyType: 'Crypto',\n # scale: '8',\n # enabled: True,\n # isMarginCurrency: True,\n # minDepositAmount: '10000',\n # minWithdrawalAmount: '1000',\n # maxWithdrawalAmount: '100000000000000',\n # networks: [\n # {\n # asset: 'btc',\n # tokenAddress: '',\n # depositEnabled: True,\n # withdrawalEnabled: True,\n # withdrawalFee: '20000',\n # minFee: '20000',\n # maxFee: '10000000'\n # }\n # ]\n # },\n # ...\n # ]\n #\n return self.parse_deposit_withdraw_fees(assets, codes, 'asset')", "def fetch_trading_fees(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalance(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # ...\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # ...\n # }\n # }\n #\n fees = self.safe_value(response, 'Fees', {})\n keys = list(fees.keys())\n result = {}\n for i in range(0, len(keys)):\n marketId = keys[i]\n symbol = self.safe_symbol(marketId)\n fee = self.safe_value(fees, marketId)\n makerString = self.safe_string(fee, 'FeeMaker')\n takerString = self.safe_string(fee, 'FeeTaker')\n maker = self.parse_number(Precise.string_div(makerString, '100'))\n taker = self.parse_number(Precise.string_div(takerString, '100'))\n result[symbol] = {\n 'info': fee,\n 'symbol': symbol,\n 'taker': taker,\n 'maker': maker,\n 'percentage': True,\n 'tierBased': True,\n }\n return result", "def fetch_data(self):", "def ez_get_datasets(auth_token, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_get_datasets\"\n payload = {\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def download(self, verbose):\n\n # Download datasets\n if verbose:\n print(\"Retrieving datasets from COVID-19 Open Data by Google Cloud Platform https://github.com/GoogleCloudPlatform/covid-19-open-data\")\n # Index\n i_cols = [\"location_key\", \"country_name\", \"subregion1_name\", \"subregion2_name\", \"iso_3166_1_alpha_3\"]\n i_df = pd.read_csv(self.URL_I, usecols=i_cols)\n # Mobility\n m_df = pd.read_csv(self.URL_M)\n m_df = (m_df.set_index([\"date\", \"location_key\"]) + 100).reset_index()\n # Combine data\n df = m_df.merge(i_df, how=\"left\", on=\"location_key\")\n # Location (country/province)\n df = df.loc[df[\"subregion2_name\"].isna()]\n df[self.PROVINCE] = df[\"subregion1_name\"].fillna(self.UNKNOWN).apply(unidecode)\n df[\"country_name\"] = df[\"country_name\"].replace(\n {\n # CIV\n \"Ivory Coast\": \"Cote d'Ivoire\",\n }\n )\n return df", "def download_coco_dataset():\n # Create file structure\n os.makedirs(os.path.join(\"data\", \"coco\", \"train\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"dev\"), exist_ok=True)\n os.makedirs(os.path.join(\"data\", \"coco\", \"test\"), exist_ok=True)\n # Download the train, dev and test datasets\n print(\"Downloading COCO dataset.\")\n url = \"http://images.cocodataset.org/zips/train2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"train2014.zip\"))\n url = \"http://images.cocodataset.org/zips/val2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"val2014.zip\"))\n url = \"http://images.cocodataset.org/zips/test2014.zip\"\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, os.path.join(\"data\", \"coco\", \"test2014.zip\"))\n print(\"Done downloading COCO dataset.\")\n # Unzip the files\n print(\"Extracting COCO dataset.\")\n # Extract Train dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"train2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"train2014\"),\n os.path.join(\"data\", \"coco\", \"train\", \"dummy\"),\n )\n # Extract Validation dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"val2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"val2014\"),\n os.path.join(\"data\", \"coco\", \"dev\", \"dummy\"),\n )\n # Extract Test dataset\n zip_ref = zipfile.ZipFile(os.path.join(\"data\", \"coco\", \"test2014.zip\", \"r\"))\n zip_ref.extractall(os.path.join(\"data\", \"coco\"))\n shutil.move(\n os.path.join(\"data\", \"coco\", \"test2014\"),\n os.path.join(\"data\", \"coco\", \"test\", \"dummy\"),\n )\n print(\"Done extracting COCO dataset.\")", "def get_datasets():\n data = request.get_json()\n return jsonify(result=Tree.datasets(data['field']))", "def get_data(self,tickers,alpha,max_retries = 5):\n data = []\n skipped = []\n for ticker in tqdm(tickers,desc = \"Acquiring data\"):\n try:\n company = Company(ticker,alpha = alpha)\n data.append(company)\n except Exception as e:\n time.sleep(10)\n skipped.append(ticker)\n\n for i in range(max_retries):\n new_data,skipped = self.get_data(skipped,alpha = alpha,max_retries = 0)\n data.extend(new_data)\n if len(skipped) == 0:\n break\n\n return data,skipped", "def fxempire1(site):\n url = \"https://www.fxempire.com/api/v1/en/markets/list\"\n headers = {\n \"authority\": \"www.fxempire.com\",\n \"method\": \"GET\",\n \"path\": \"/api/v1/en/markets/list\",\n \"scheme\": \"https\",\n \"accept\": (\n \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,\"\n + \"image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n ),\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"dnt\": \"1\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\"\n + \" (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36 OPR/66.0.3515.27\"\n ),\n }\n try:\n session = requests.Session()\n session.headers = headers\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()\n data = {}\n for item in ret[\"forex\"]:\n if item:\n try:\n pair = item[\"name\"].replace(\"/\", \":\")\n price = item[\"value\"]\n data[pair] = float(price)\n except:\n pass\n for item in ret[\"commodities\"]:\n try:\n if item[\"symbol\"] in [\"XAUUSD\", \"XAGUSD\"]:\n pair = \"USD:\" + item[\"symbol\"].replace(\"USD\", \"\")\n price = 1 / float(item[\"value\"])\n data[pair] = price\n except:\n pass\n data = {k: v for k, v in data.items() if \"RUB\" not in k} # RUBLE is stale\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def download_fermi_crab_3fhl():\n download_data_files(FILENAMES_FERMI_3FHL_CRAB)", "def fetch_wallet_balances(wallets, fiat, **modes):\n price_fetch = set([x[0] for x in wallets])\n balances = {}\n prices = {}\n\n fetch_length = len(wallets) + len(price_fetch)\n\n if not modes.get('async', False):\n # synchronous fetching\n for crypto in price_fetch:\n prices[crypto] = get_current_price(crypto, fiat, report_services=True, **modes)\n\n for crypto, address in wallets:\n balances[address] = get_address_balance(crypto, address.strip(), **modes)\n\n else:\n # asynchronous fetching\n if modes.get('verbose', False):\n print(\"Need to make\", fetch_length, \"external calls\")\n\n with futures.ThreadPoolExecutor(max_workers=int(fetch_length / 2)) as executor:\n future_to_key = dict(\n (executor.submit(\n get_current_price, crypto, fiat, report_services=True, **modes\n ), crypto) for crypto in price_fetch\n )\n\n future_to_key.update(dict(\n (executor.submit(\n get_address_balance, crypto, address.strip(), **modes\n ), address) for crypto, address in wallets\n ))\n\n done, not_done = futures.wait(future_to_key, return_when=futures.FIRST_EXCEPTION)\n if len(not_done) > 0:\n raise not_done.pop().exception()\n\n for future in done:\n key = future_to_key[future]\n if len(key) > 5: # this will break if a crypto symbol is longer than 5 chars.\n which = balances\n else:\n which = prices\n\n res = future.result()\n which[key] = res\n\n ret = []\n\n for crypto, address in wallets:\n crypto_value = balances[address]\n sources, fiat_price = prices[crypto]\n ret.append({\n 'crypto': crypto,\n 'address': address,\n 'crypto_value': crypto_value,\n 'fiat_value': crypto_value * fiat_price,\n 'conversion_price': fiat_price,\n 'price_source': sources[0].name\n })\n\n return ret", "def fetch_fuel_data():\n\treturn requests.get('http://www.fueleconomy.gov/ws/rest/fuelprices').text", "def get_datasets(config: ModelSettings, df: pd.DataFrame):\n train_filenames = df.loc[df.train_data == 1, \"filename\"].values\n val_filenames = df.loc[df.val_data == 1, \"filename\"].values\n test_filenames = df.loc[df.test_data == 1, \"filename\"].values\n\n train_zspacings = df.loc[df.train_data == 1, \"pixel_spacingz\"].values\n val_zspacings = df.loc[df.val_data == 1, \"pixel_spacingz\"].values\n test_zspacings = df.loc[df.test_data == 1, \"pixel_spacingz\"].values\n\n train_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=train_filenames,\n z_spacings=train_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-train\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n val_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=val_filenames,\n z_spacings=val_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-val\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n test_dataset = BPRDataset(\n data_path=config.data_path,\n filenames=test_filenames,\n z_spacings=test_zspacings,\n landmark_path=config.landmark_path,\n landmark_sheet_name=\"landmarks-test\",\n random_seed=config.random_seed,\n custom_transform=config.custom_transform,\n albumentation_transform=config.albumentation_transform,\n equidistance_range=config.equidistance_range,\n num_slices=config.num_slices,\n )\n\n return train_dataset, val_dataset, test_dataset", "def get_datasets(request):\n from seed.models import obj_to_dict\n org = Organization.objects.get(pk=request.GET.get('organization_id'))\n datasets = []\n for d in ImportRecord.objects.filter(super_organization=org):\n importfiles = [obj_to_dict(f) for f in d.files]\n dataset = obj_to_dict(d)\n dataset['importfiles'] = importfiles\n if d.last_modified_by:\n dataset['last_modified_by'] = d.last_modified_by.email\n dataset['number_of_buildings'] = BuildingSnapshot.objects.filter(\n import_file__in=d.files,\n canonicalbuilding__active=True,\n ).count()\n dataset['updated_at'] = convert_to_js_timestamp(d.updated_at)\n datasets.append(dataset)\n\n return {\n 'status': 'success',\n 'datasets': datasets,\n }", "def get_data(datadir):\n return sklearn.datasets.fetch_california_housing(\n datadir,\n return_X_y=True)", "def fetch_tides(start_date,end_date,cache_dir,\n station=None,label=None,\n days_per_request='5D',cache_only=False):\n if station is not None:\n station_meta=find_station(station=station,cache_dir=cache_dir)\n else:\n assert label is not None,\"Specify one of station or label\"\n station_meta=find_station(label=label,cache_dir=cache_dir)\n if station_meta is not None:\n station=station_meta['stationReference']\n \n fmt_date=lambda d: utils.to_datetime(d).strftime(\"%Y-%m-%d\")\n\n datasets=[]\n\n for interval_start,interval_end in periods(start_date,end_date,days_per_request):\n if cache_dir is not None:\n begin_str=utils.to_datetime(interval_start).strftime('%Y-%m-%d')\n end_str =utils.to_datetime(interval_end).strftime('%Y-%m-%d')\n\n cache_fn=os.path.join(cache_dir,\n 'uk_tides',\n \"%s_%s_%sa.nc\"%(station,\n begin_str,\n end_str))\n else:\n cache_fn=None\n\n ds=None\n log.debug(\"cache_fn: %s\"%cache_fn)\n \n if (cache_fn is not None) and os.path.exists(cache_fn):\n log.debug(\"Cached %s -- %s\"%(interval_start,interval_end))\n ds=xr.open_dataset(cache_fn)\n elif cache_only:\n continue\n if (not cache_only) and (ds is None):\n log.info(\"Fetching %s -- %s\"%(interval_start,interval_end))\n\n params=dict(startdate=fmt_date(interval_start),\n enddate=fmt_date(interval_end),\n _limit=2000)\n\n url=f\"{root}/id/stations/{station}/readings\"\n \n req=requests.get(url,params=params)\n try:\n data=req.json()\n except ValueError: # thrown by json parsing\n log.warning(\"Likely server error retrieving JSON data from environment.data.gov.uk\")\n data=dict(error=dict(message=\"Likely server error\"))\n break\n\n if 'error' in data:\n msg=data['error']['message']\n if \"No data was found\" in msg:\n # station does not have this data for this time.\n log.warning(\"No data found for this period\")\n else:\n # Regardless, if there was an error we got no data.\n log.warning(\"Unknown error - got no data back.\")\n log.debug(data)\n \n log.debug(\"URL was %s\"%(req.url))\n continue\n\n ds=json_to_ds(data,params)\n\n if ds is not None:\n if cache_fn is not None:\n dname=os.path.dirname(cache_fn)\n if not os.path.exists(dname):\n os.makedirs(dname)\n ds.to_netcdf(cache_fn)\n else:\n continue\n # seems these don't come in order\n ds=ds.sortby(ds.time)\n\n if len(datasets)>0:\n # avoid duplicates in case they overlap\n ds=ds.isel(time=ds.time.values>datasets[-1].time.values[-1])\n datasets.append(ds)\n\n if len(datasets)==0:\n # could try to construct zero-length dataset, but that sounds like a pain\n # at the moment.\n return None \n\n if len(datasets)>1:\n dataset=xr.concat( datasets, dim='time')\n else:\n dataset=datasets[0].copy(deep=True)\n # better not to leave these lying around open\n for d in datasets:\n d.close()\n\n # add in metadata\n dataset['lat']=(), station_meta['lat']\n dataset['lon']=(), station_meta['long']\n dataset['label']=(), station_meta['label']\n dataset['station']=(),station_meta['stationReference']\n dataset['value'].attrs['units']=station_meta['measures'][0]['unitName']\n\n return dataset", "def get_datasets(business_data_file, enter_data_file, politics_data_file, sport_data_file, tech_data_file):\n # Load data from files\n business_examples = list(open(business_data_file, \"r\").readlines())\n business_examples = [s.strip() for s in business_examples]\n enter_examples = list(open(enter_data_file, \"r\").readlines())\n enter_examples = [s.strip() for s in enter_examples]\n politics_examples = list(open(politics_data_file, \"r\").readlines())\n politics_examples = [s.strip() for s in politics_examples]\n sport_examples = list(open(sport_data_file, \"r\").readlines())\n sport_examples = [s.strip() for s in sport_examples]\n tech_examples = list(open(tech_data_file, \"r\").readlines())\n tech_examples = [s.strip() for s in tech_examples]\n\n datasets = dict()\n datasets['data'] = business_examples + enter_examples + politics_examples + sport_examples + tech_examples\n target = [0 for x in business_examples] + [1 for x in enter_examples] + [2 for x in politics_examples] + [3 for x in sport_examples] + [4 for x in tech_examples]\n datasets['target'] = target\n datasets['target_names'] = ['business_examples', 'enter_examples', 'politics_examples', 'sport_examples', 'tech_examples']\n return datasets", "def download_dataset(self):\n dataset_name = ADE20K_URL.split(\"/\")[-1].split(\".\")[0]\n req = urllib.request.Request(ADE20K_URL, method=\"HEAD\")\n size_file = urllib.request.urlopen(req).headers[\"Content-Length\"]\n download = \"n\"\n while download != \"y\":\n if not self.yes_all:\n download = input(f\"You are about to download {dataset_name} ({size_file} bytes) to the temporary folder {self.tmp_path}. Do you want to continue? [y/n] \\n\")\n if self.yes_all or download == \"y\":\n logger.info(f\"Downloading dataset {dataset_name} at {ADE20K_URL} to temporary folder {self.tmp_path}...\")\n zip_path, hdrs = urllib.request.urlretrieve(ADE20K_URL, f\"{self.tmp_path}/{dataset_name}.zip\")\n logger.info(f\"Extracting {zip_path} to temporary folder {self.tmp_path}...\")\n with zipfile.ZipFile(f\"{zip_path}\", 'r') as z:\n z.extractall(f\"{self.tmp_path}\")\n self.input_data_path = zip_path[:-4]\n break\n elif download == \"n\":\n logger.error(f\"Cannot pursue without downloading the dataset.\")\n sys.exit()\n else:\n logger.error(\"Please enter a valid answer (y or n).\")", "def fetch_data():\n log = logging.getLogger(__name__)\n log.info('Checking data files...')\n if not os.path.isfile('CGN.txt'):\n params_cgn = {\n 'institute.code': ['NLD037'],\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n cgn = GenesysParser(params_cgn)\n cgn.fetch2json('CGN.txt')\n log.info('CGN data has been saved.')\n else:\n log.info('CGN data file already exists.')\n\n if not os.path.isfile('USDA.txt'):\n params_usda = {\n 'institute.code': usda_all,\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n usda = GenesysParser(params_usda)\n usda.fetch2json('USDA.txt')\n log.info('USDA data has been saved.')\n else:\n log.info('USDA data file already exists.')", "def _datasets_request(self, method, dataset_id=None, versions_request=False, version_id=None, state=None):\n path = ['datasets']\n if dataset_id is not None:\n path.append(dataset_id)\n if versions_request or version_id is not None:\n if dataset_id is not None:\n path.append('versions')\n else:\n Mixpanel.LOGGER.warning('dataset_id parameter required to make a request to /versions')\n return\n if version_id is not None:\n path.append(str(version_id))\n assert self.token, 'token required for /datasets API calls!'\n arguments = {'token': self.token}\n if state is not None:\n arguments['state'] = json.dumps(state)\n response = self.request(self.BETA_IMPORT_API, path, arguments, method=method)\n Mixpanel.LOGGER.debug('Response: ' + response)\n json_response = json.loads(response)\n if 'error' not in response:\n if 'data' in response:\n data = json_response['data']\n if data is None:\n return True\n else:\n return data\n else:\n Mixpanel.LOGGER.warning(\"Response doesn't contain a data key, got: \" + str(response))\n return False\n else:\n Mixpanel.LOGGER.warning('Dataset operation error: ' + response['error'])\n return False", "def download_dataset(self):\n raise NotImplementedError", "def fetch_scil_b0():\n zipname = 'datasets_multi-site_all_companies'\n url = 'http://scil.dinf.usherbrooke.ca/wp-content/data/'\n uraw = url + zipname + '.zip'\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, zipname)\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading SCIL b=0 datasets from multiple sites and multiple companies (9.2MB)...')\n opener = urlopen(uraw)\n open(folder+'.zip', 'wb').write(opener.read())\n\n print('Unziping '+folder+'.zip ...')\n zip = zipfile.ZipFile(folder+'.zip', 'r')\n zip.extractall(dipy_home)\n\n print('Done.')\n print('Files copied in folder %s' % dipy_home)\n else:\n print('Dataset already in place. If you want to fetch again please first remove folder %s ' % dipy_home)", "def get_available_datasets():\n files = [file for file in glob.glob(os.path.join(MODULE_ROOT, \"datasets/*.json\"))]\n datasets = []\n for file in files:\n with open(file, \"r\") as f:\n dataset_info = json.load(f)\n datasets.append(dataset_info)\n return datasets", "def download_france_data():\n start = time.time()\n oc19_file = \"opencovid19-fr-chiffres-cles.csv\"\n gouv_file = \"data-gouv-fr-chiffres-cles.csv\"\n oc19_url = \"https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv\"\n gouv_url = \"https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617\"\n # run requests to download and save the data\n myfile = requests.get(oc19_url)\n with open(oc19_file, \"wb\") as f:\n f.write(myfile.content)\n file = requests.get(gouv_url)\n with open(gouv_file, \"wb\") as f:\n f.write(file.content)\n # Load both csv into pandas\n data = pd.read_csv(oc19_file)\n data_gouv = pd.read_csv(gouv_file)\n # Fill in some of the metadata that is not present in the government data\n data_gouv[\"granularite\"] = \"pays\"\n data_gouv[\"maille_code\"] = \"FRA\"\n data_gouv[\"maille_nom\"] = \"France\"\n data[\"source_nom\"] = \"Santé publique France Data\"\n data_gouv[\"source_url\"] = \"https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617\"\n data_gouv.rename(DATA_GOUV_2_OPEN, axis=\"columns\", inplace=True)\n end = time.time()\n print(\"Time spent on download_france_data: {0:.5f} s.\".format(end - start)) \n return pd.concat((data, data_gouv), join=\"outer\")", "def get_all_data():\n \n # open the data stored in a file called \"data.json\"\n try:\n fp = open(\"data/data.json\")\n response = simplejson.load(fp)\n # but if that file does not exist, download the data from fusiontables\n except IOError:\n logging.info(\"failed to load file\")\n service = build('fusiontables', 'v1', developerKey=API_KEY)\n query = \"SELECT * FROM \" + TABLE_ID + \" WHERE Animal_Type = 'DOG'\"\n response = service.query().sql(sql=query).execute()\n \n return response", "def download_entire_dataset(dataset_name, num_data, labels, method, cache_dir):\n\n print('Downloading {}...'.format(dataset_name))\n preprocessor = preprocess_method_dict[method]()\n\n # Select the first `num_data` samples from the dataset.\n target_index = numpy.arange(num_data) if num_data >= 0 else None\n dataset_parts = D.molnet.get_molnet_dataset(dataset_name, preprocessor,\n labels=labels,\n target_index=target_index)\n dataset_parts = dataset_parts['dataset']\n\n # Cache the downloaded dataset.\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n for i, part in enumerate(['train', 'valid', 'test']):\n filename = dataset_part_filename(part, num_data)\n path = os.path.join(cache_dir, filename)\n NumpyTupleDataset.save(path, dataset_parts[i])\n return dataset_parts", "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n descrips, sql_paths, sql_names, loaded,\n table_size),\n data_names=loaded_names)", "def fxcm(site):\n timestamp = int(time.time() * 1000) - 1000\n url = f\"https://ratesjson.fxcm.com/DataDisplayer?t={timestamp}\"\n headers = {\n \"authority\": \"www.fxcm.com\",\n \"method\": \"GET\",\n \"path\": \"/api/v1/en/markets/list\",\n \"scheme\": \"https\",\n \"accept\": (\n \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,\"\n + \"image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n ),\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"dnt\": \"1\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, \"\n + \"like Gecko) Chrome/79.0.3945.79 Safari/537.36 OPR/66.0.3515.27\"\n ),\n }\n try:\n # fails during some hours of day\n session = requests.Session()\n session.headers = headers\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).text\n # print (ret)\n data = (\n ret.replace(\" \", \"\")\n .replace('null({\"Rates\":', \"\")\n .replace(\",}]});\", \"}]\")\n .replace(\",}\", \"}\")\n )\n # print(data)\n # {\"Symbol\":\"CHFJPY\",\"Bid\":\"1.1\",\"Ask\":\"1.2\",\"Spread\":\"0.1\",\"ProductType\":\"1\",}\n raw = json_loads(data)\n data = {}\n for item in raw:\n symbol = item[\"Symbol\"]\n if symbol.isupper() and (len(symbol) == 6):\n symbol = symbol[:3] + \":\" + symbol[-3:]\n data[symbol] = (float(item[\"Ask\"]) + float(item[\"Bid\"])) / 2\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")", "def download(dataset_name,dataset_url):\n directory = \"tmp\"\n if not os.path.exists(os.path.join(directory,dataset_name)):\n os.makedirs(os.path.join(directory,dataset_name))\n for url, filename in get_all_data(dataset_url):\n if not os.path.exists(os.path.join(directory,dataset_name,filename)):\n print(\"Downloading \"+filename+\":\",)\n ul.urlretrieve(url,os.path.join(directory,dataset_name,filename),reporthook)\n unzip_ecco_tcp_xmls(os.path.join(directory, dataset_name), os.path.join(directory, dataset_name + \"_unzipped\"))\n shutil.rmtree(os.path.join(directory, dataset_name))\n shutil.move(os.path.join(directory, dataset_name + \"_unzipped\"), os.path.join(directory, dataset_name))\n headers_to_csv(directory, dataset_name)\n corpus_to_csv(directory, dataset_name)\n erase_all_files_with_extension(directory, dataset_name, \".hdr\")\n erase_all_files_with_extension(directory, dataset_name, \".xml\")", "def _download_all(update_path=True, verbose=None):\n\n # iterate over dataset\n for ds in dataset_list:\n # call download\n ds().download(update_path=True, verbose=verbose, accept=True)", "def get(datasets_identifiers, identifier_type='hid', history_id=None, retrieve_datatype=None):\n history_id = history_id or os.environ['HISTORY_ID']\n # The object version of bioblend is to slow in retrieving all datasets from a history\n # fallback to the non-object path\n gi = get_galaxy_connection(history_id=history_id, obj=False)\n file_path_all = []\n datatypes_all = []\n\n if type(datasets_identifiers) is not list:\n datasets_identifiers = [datasets_identifiers]\n\n if identifier_type == \"regex\":\n datasets_identifiers = find_matching_history_ids(datasets_identifiers)\n identifier_type = \"hid\"\n\n\n for dataset_id in datasets_identifiers:\n file_path = '/import/%s' % dataset_id\n log.debug('Downloading gx=%s history=%s dataset=%s', gi, history_id, dataset_id)\n # Cache the file requests. E.g. in the example of someone doing something\n # silly like a get() for a Galaxy file in a for-loop, wouldn't want to\n # re-download every time and add that overhead.\n if not os.path.exists(file_path):\n hc = HistoryClient(gi)\n dc = DatasetClient(gi)\n history = hc.show_history(history_id, contents=True)\n datasets = {ds[identifier_type]: ds['id'] for ds in history}\n if retrieve_datatype:\n datatypes_all.append({ds[identifier_type]: ds['extension'] for ds in history})\n if identifier_type == 'hid':\n dataset_id = int(dataset_id)\n dc.download_dataset(datasets[dataset_id], file_path=file_path, use_default_filename=False)\n else:\n hc = HistoryClient(gi)\n dc = DatasetClient(gi)\n history = hc.show_history(history_id, contents=True)\n datatypes_all.append({ds[identifier_type]: ds['extension'] for ds in history})\n log.debug('Cached, not re-downloading')\n\n file_path_all.append(file_path)\n\n ## First path if only one item given, otherwise all paths.\n ## Should not break compatibility.\n if retrieve_datatype:\n if len(file_path_all) == 1:\n dataset_number = int(file_path_all[0].strip().split(\"/\")[-1])\n return file_path_all, datatypes_all[0][dataset_number]\n else:\n datatype_multi = dict()\n for i in file_path_all:\n dataset_number = int(i.strip().split(\"/\")[-1])\n datatype_multi[dataset_number] = datatypes_all[0][dataset_number]\n return file_path_all, datatype_multi\n else:\n return file_path_all[0] if len(file_path_all) == 1 else file_path_all", "def get_datasets() -> List[Dataset]:\n\n amzn = Dataset(\n id='amzn', name='Amazon Reviews', language='en',\n description=\"This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.\")\n\n cnn = Dataset(\n id='cnn_dailymail', name='CNN/ DailyMail', language='en',\n description='The well-known CNN/ DailyMail data set for text summarization (version 3.0.0). The data has been fetched via HuggingFace Datasets')\n\n swisstext = Dataset(\n id='swisstext', name='SwissText 2019', language='de',\n description='The dataset was published for the SwissText conference 2019. ')\n\n return [amzn, cnn, swisstext]", "def fetch_series(tickers: List[str]) -> List[dict]:\n with requests.Session() as session:\n c = suds.client.Client(\n 'https://www3.bcb.gov.br/sgspub/JSP/sgsgeral/FachadaWSSGS.wsdl',\n transport=suds_requests.RequestsTransport(session))\n \n def _fetch(tck):\n try:\n resp = c.service.getUltimoValorVO(tck)\n if resp is not None:\n return _process_info(resp)\n except:\n tcks_off.append(tck)\n\n with executor() as e:\n ls = list(e.map(_fetch, tickers))\n return ls", "def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)", "def _datasets(self):\n return self._flat_data._datasets", "def download_data(key):\n #\"K*%4t3VK0ab%gn\"\n \n # this is a list of all the columns in the busniness database, we only want to grab\n # the ones that are not commented out, for various reasons.\n columns_business = [\n 'business_id',\n # 'name',\n # 'address',\n #'city',\n 'state',\n #'postal_code',\n 'latitude',\n 'longitude',\n # 'stars',\n 'review_count',\n 'attributes_goodforkids',\n #'categories', further processing\n 'is_open',\n # 'hours_monday',\n # 'hours_tuesday',\n # 'hours_wednesday',\n # 'hours_thursday',\n # 'hours_friday',\n # 'hours_saturday',\n # 'hours_sunday',\n 'attributes_restaurantsreservations',\n # 'attributes_goodformeal',\n # 'attributes_businessparking',\n 'attributes_caters',\n 'attributes_noiselevel',\n 'attributes_restaurantstableservice',\n 'attributes_restaurantstakeout',\n 'attributes_restaurantspricerange2',\n 'attributes_outdoorseating',\n 'attributes_bikeparking',\n # 'attributes_ambience',\n 'attributes_hastv',\n 'attributes_wifi',\n 'attributes_alcohol',\n 'attributes_restaurantsattire',\n 'attributes_restaurantsgoodforgroups',\n 'attributes_restaurantsdelivery',\n 'attributes_businessacceptscreditcards',\n 'attributes_businessacceptsbitcoin',\n #'attributes_byappointmentonly',\n #'attributes_acceptsinsurance',\n # 'attributes_music',\n 'attributes_goodfordancing',\n 'attributes_coatcheck',\n 'attributes_happyhour',\n # 'attributes_bestnights',\n 'attributes_wheelchairaccessible',\n 'attributes_dogsallowed',\n # 'attributes_byobcorkage',\n 'attributes_drivethru',\n 'attributes_smoking',\n #'attributes_agesallowed',\n #'attributes_hairspecializesin',\n #'attributes_corkage',\n #'attributes_byob',\n #'attributes_dietaryrestrictions', further processing\n #'attributes_open24hours',\n #'attributes_restaurantscounterservice',\n 'restaurant',\n 'meanfunny',\n 'meanuseful',\n 'avgwordcount',\n 'maxwordcount',\n 'minwordcount',\n 'avgfunnywordcount',\n 'maxfunnywordcount',\n 'avgusefulwordcount',\n 'maxusefulwordcount',\n 'medianwordcount',\n 'upperquartilewordcount',\n 'lowerquartilewordcount',\n 'target']\n \n #this sets up the query we will make to the data base, the upper and lower number of review cut off\n #points, and the columns we want\n list_as_string =', '.join(columns_business)\n u_cutoff=87\n l_cutoff=10\n \n # this connects to the database, gets our data into a dataframe and closes the connection\n con = pg.connect(database=\"postgres\", user=\"flatiron_user_1\", password=key, host=\"34.74.239.44\", port=\"5432\")\n cur = con.cursor()\n \n cur.execute(f\"SELECT {list_as_string} FROM business WHERE restaurant IS true AND REVIEW_COUNT < {u_cutoff} AND REVIEW_COUNT > {l_cutoff}\")\n business_data=cur.fetchall()\n business_data=pd.DataFrame(business_data)\n business_data.columns=columns_business\n return business_data", "def fetch_all(data_sources):\n\n #import pdb;pdb.set_trace()\n retrieved_data_map = {}\n\n for key, datasource in data_sources.items():\n retrieved_data_map[key] = datasource.fetch_data()\n\n return retrieved_data_map", "def __get_all_data(self,tickr):\n self.__csvurl=f\"https://query1.finance.yahoo.com/v7/finance/download/{tickr}?period1=1092873600&period2={int(datetime.now().timestamp())}&interval=1d&events=history&includeAdjustedClose=true\"\n s=get_historic_data(self.__csvurl)\n\n \"\"\"you should not be able to access dataframe from outside the class\"\"\"\n df=pd.read_csv(io.StringIO(s.decode('utf-8')))\n df=df.dropna()\n df_columns=['Date','High','Low','Close','Adj Close']\n\n if not set(df_columns).issubset(df.columns):\n raise ValueError(f\"One or more columns are missing {df_columns}\")\n\n if len(df.index)<5:\n raise ValueError(f\"Cannot calculate EMA 5\")\n\n if len(df.index)<20:\n raise ValueError(f\"Cannot calculate SMA 20\")\n\n \"\"\"set date as index (required for filtering,sorting,grouping etc etc\"\"\"\n df['Date'] = pd.to_datetime(df['Date'], format = '%Y-%m-%d')\n\n df.set_index(['Date'], inplace=True)\n\n\n return df", "def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalanceV2(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # \"AVAILABLE_ETH\": 0.0,\n # \"ETH\": 0.0,\n # \"LOCKED_ETH\": 0.0,\n # \"AVAILABLE_BCHSV\": 0.0,\n # \"BCHSV\": 0.0,\n # \"LOCKED_BCHSV\": 0.0,\n # \"AVAILABLE_BCHABC\": 0.0,\n # \"BCHABC\": 0.0,\n # \"LOCKED_BCHABC\": 0.0,\n # \"AVAILABLE_LTC\": 0.0,\n # \"LTC\": 0.0,\n # \"LOCKED_LTC\": 0.0,\n # \"AVAILABLE_ETC\": 0.0,\n # \"ETC\": 0.0,\n # \"LOCKED_ETC\": 0.0,\n # \"AVAILABLE_BTG\": 0.0,\n # \"BTG\": 0.0,\n # \"LOCKED_BTG\": 0.0,\n # \"AVAILABLE_GRIN\": 0.0,\n # \"GRIN\": 0.0,\n # \"LOCKED_GRIN\": 0.0,\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchabcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BtgNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcBtc\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchsvNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"GrinNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0}\n # }\n # }\n #\n return self.parse_balance(response)", "def download_data(dataset_name=None):\r\n\r\n dr = data_resources[dataset_name]\r\n if not authorize_download(dataset_name):\r\n raise Exception(\"Permission to download data set denied.\")\r\n\r\n if dr.has_key('suffices'):\r\n for url, files, suffices in zip(dr['urls'], dr['files'], dr['suffices']):\r\n for file, suffix in zip(files, suffices):\r\n download_url(os.path.join(url,file), dataset_name, dataset_name, suffix=suffix)\r\n else:\r\n for url, files in zip(dr['urls'], dr['files']):\r\n for file in files:\r\n download_url(os.path.join(url,file), dataset_name, dataset_name)\r\n return True", "def pull_usafacts_data(base_url: str, metric: str, logger: Logger, cache: str=None) -> pd.DataFrame:\n # Read data\n df = fetch(base_url.format(metric=metric), cache)\n date_cols = [i for i in df.columns if i.startswith(\"2\")]\n logger.info(\"data retrieved from source\",\n metric=metric,\n num_rows=df.shape[0],\n num_cols=df.shape[1],\n min_date=min(date_cols),\n max_date=max(date_cols),\n checksum=hashlib.sha256(pd.util.hash_pandas_object(df).values).hexdigest())\n df.columns = [i.lower() for i in df.columns]\n # Clean commas in count fields in case the input file included them\n df[df.columns[4:]] = df[df.columns[4:]].applymap(\n lambda x: int(x.replace(\",\", \"\")) if isinstance(x, str) else x)\n # Check missing FIPS\n null_mask = pd.isnull(df[\"countyfips\"])\n assert null_mask.sum() == 0\n\n unexpected_columns = [x for x in df.columns if \"Unnamed\" in x]\n unexpected_columns.extend(DROP_COLUMNS)\n\n # Assign Grand Princess Cruise Ship a special FIPS 90000\n # df.loc[df[\"FIPS\"] == 6000, \"FIPS\"] = 90000\n # df.loc[df[\"FIPS\"] == 6000, \"stateFIPS\"] = 90\n\n # Ignore Grand Princess Cruise Ship and Wade Hampton Census Area in AK\n df = df[\n (df[\"countyfips\"] != 6000)\n & (df[\"countyfips\"] != 2270)\n ]\n\n # Change FIPS from 0 to XX000 for statewise unallocated cases/deaths\n unassigned_index = (df[\"countyfips\"] == 0)\n df.loc[unassigned_index, \"countyfips\"] = df[\"statefips\"].loc[unassigned_index].values * 1000\n\n # Conform FIPS\n df[\"fips\"] = df[\"countyfips\"].apply(lambda x: f\"{int(x):05d}\")\n\n\n\n # Drop unnecessary columns (state is pre-encoded in fips)\n try:\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n except KeyError as e:\n raise ValueError(\n \"Tried to drop non-existent columns. The dataset \"\n \"schema may have changed. Please investigate and \"\n \"amend DROP_COLUMNS.\"\n ) from e\n # Check that columns are either FIPS or dates\n try:\n columns = list(df.columns)\n columns.remove(\"fips\")\n # Detects whether there is a non-date string column -- not perfect\n # USAFacts has used both / and -, so account for both cases.\n _ = [int(x.replace(\"/\", \"\").replace(\"-\", \"\")) for x in columns]\n except ValueError as e:\n raise ValueError(\n \"Detected unexpected column(s) \"\n \"after dropping DROP_COLUMNS. The dataset \"\n \"schema may have changed. Please investigate and \"\n \"amend DROP_COLUMNS.\"\n ) from e\n # Reshape dataframe\n df = df.melt(\n id_vars=[\"fips\"],\n var_name=\"timestamp\",\n value_name=\"cumulative_counts\",\n )\n # timestamp: str -> datetime\n df[\"timestamp\"] = pd.to_datetime(df[\"timestamp\"])\n # Add a dummy first row here on day before first day\n min_ts = min(df[\"timestamp\"])\n df_dummy = df.loc[df[\"timestamp\"] == min_ts].copy()\n df_dummy.loc[:, \"timestamp\"] = min_ts - pd.Timedelta(days=1)\n df_dummy.loc[:, \"cumulative_counts\"] = 0\n df = pd.concat([df_dummy, df])\n # Obtain new_counts\n df.sort_values([\"fips\", \"timestamp\"], inplace=True)\n df[\"new_counts\"] = df[\"cumulative_counts\"].diff() # 1st discrete difference\n # Handle edge cases where we diffed across fips\n mask = df[\"fips\"] != df[\"fips\"].shift(1)\n df.loc[mask, \"new_counts\"] = np.nan\n df.reset_index(inplace=True, drop=True)\n\n # Final sanity checks\n days_by_fips = df.groupby(\"fips\").count()[\"cumulative_counts\"].unique()\n unique_days = df[\"timestamp\"].unique()\n # each FIPS has same number of rows\n if (len(days_by_fips) > 1) or (days_by_fips[0] != len(unique_days)):\n raise ValueError(\"Differing number of days by fips\")\n min_timestamp = min(unique_days)\n max_timestamp = max(unique_days)\n n_days = (max_timestamp - min_timestamp) / np.timedelta64(1, \"D\") + 1\n if n_days != len(unique_days):\n raise ValueError(\n f\"Not every day between {min_timestamp} and \"\n \"{max_timestamp} is represented.\"\n )\n return df.loc[\n df[\"timestamp\"] >= min_ts,\n [ # Reorder\n \"fips\",\n \"timestamp\",\n \"new_counts\",\n \"cumulative_counts\",\n ],\n ]", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "async def get_datasets(db_pool, query_parameters, include_dataset):\n hit_datasets = []\n miss_datasets = []\n response = []\n dataset_ids = query_parameters[-2]\n\n # Fetch datasets where the variant is found\n hit_datasets = await fetch_resulting_datasets(db_pool, query_parameters)\n LOG.debug(f\"hit_datasets: {hit_datasets}\")\n\n # If the response has to include the datasets where the variant is not found, \n # we want to fetch info about them and shape them to be shown\n if include_dataset in ['ALL', 'MISS']:\n list_all = list(map(int, dataset_ids.split(\",\")))\n LOG.debug(f\"list_all: {list_all}\")\n list_hits = [dict[\"internalId\"] for dict in hit_datasets]\n LOG.debug(f\"list_hits: {list_hits}\")\n accessible_missing = [int(x) for x in list_all if x not in list_hits]\n LOG.debug(f\"accessible_missing: {accessible_missing}\")\n miss_datasets = await fetch_resulting_datasets(db_pool, query_parameters, misses=True, accessible_missing=accessible_missing)\n response = hit_datasets + miss_datasets\n return response", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def datasets(apiKey, payload):\r\n if apiKey is None and os.path.exists(KEY_FILE):\r\n apiKey = _get_saved_key(apiKey)\r\n url = '{}/datasets'.format(USGS_API_ENDPOINT)\r\n payload = {\r\n \"jsonRequest\": payloads.datasets(apiKey, **payload)\r\n }\r\n logger.debug(\"API call URL: {}\".format(url))\r\n logger.debug(\"API call payload: {}\".format(payload))\r\n response = requests.post(url, payload).json()\r\n logger.debug(\"Received response:\\n{}\".format(json.dumps(response, indent=4)))\r\n _catch_usgs_error(response)\r\n\r\n return response", "def main():\n\n data = get_data(URL)\n\n if not data:\n raise ValueError('No data to process')\n\n datacenters = [\n Datacenter(key, value)\n for key, value in data.items()\n ]\n\n pass # the rest of your logic here", "def fetch_dataset(url, pandas_impl=pandas):\n\n print(f'fetching dataset at {url}')\n return pandas_impl.read_csv(url)", "def fetch_cifar100_efficient_kd_dataloaders(args):\n\n loaders = {}\n for mode in [\"train\", \"test\"]:\n dataset = CachedKDDataset(mode=mode)\n loaders[mode] = \\\n torch.utils.data.DataLoader(\n dataset,\n batch_size=args.batch_size,\n shuffle=(mode == \"train\"),\n num_workers=4,\n collate_fn=dict_collate\n )\n\n return loaders[\"train\"], loaders[\"test\"]", "def _get_data_in_api(url: str) -> list:\n\n try:\n resp = requests.request('GET', url, timeout=10)\n resp.raise_for_status\n\n return Froxy._data_filter(resp.text)\n\n except (\n requests.ConnectionError,\n requests.ConnectTimeout,\n requests.HTTPError,\n requests.ReadTimeout\n ) as err:\n sys.exit(err)", "def get_datasets(FIELDS='all'):\n dsinfostr = fork_and_get_output(\"zfs list -H -o {0}\".format(FIELDS).split())\n header = get_zfs_ds_header()\n dsinfo = dsinfostr.splitlines()\n dsobjs = []\n for dsstr in dsinfo:\n dsobjs.append(DataZFS(dsstr, header, 'dataset'))\n return dsobjs", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def get(log, session, args):\n url = \"{}datasets/{}\".format(\n http.get_api_url(args.url, args.project),\n args.id)\n log.debug('GET: {}'.format(url))\n response_json = http.get(session, url)\n log.print_json(response_json, \"dataset\", \"get\")", "async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # success: '1',\n # data: {\n # pairs: [\n # {\n # name: 'btc_jpy',\n # base_asset: 'btc',\n # quote_asset: 'jpy',\n # maker_fee_rate_base: '0',\n # taker_fee_rate_base: '0',\n # maker_fee_rate_quote: '-0.0002',\n # taker_fee_rate_quote: '0.0012',\n # unit_amount: '0.0001',\n # limit_max_amount: '1000',\n # market_max_amount: '10',\n # market_allowance_rate: '0.2',\n # price_digits: '0',\n # amount_digits: '4',\n # is_enabled: True,\n # stop_order: False,\n # stop_order_and_cancel: False\n # },\n # ...\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n pairs = self.safe_value(data, 'pairs', [])\n result = {}\n for i in range(0, len(pairs)):\n pair = pairs[i]\n marketId = self.safe_string(pair, 'name')\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = {\n 'info': pair,\n 'symbol': symbol,\n 'maker': self.safe_number(pair, 'maker_fee_rate_quote'),\n 'taker': self.safe_number(pair, 'taker_fee_rate_quote'),\n 'percentage': True,\n 'tierBased': False,\n }\n return result", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def covid_fetch():\n #Sets the structure of the data retrieved from the API\n cases_and_deaths = {\n \"date\": \"date\",\n \"areaName\": \"areaName\",\n \"areaCode\": \"areaCode\",\n \"newCasesByPublishDate\": \"newCasesByPublishDate\",\n \"cumCasesByPublishDate\": \"cumCasesByPublishDate\",\n \"newDeathsByDeathDate\": \"newDeathsByDeathDate\",\n \"cumDeathsByDeathDate\": \"cumDeathsByDeathDate\"\n }\n #Sets the filter for the API using config.json\n covid_nation = ['areaType=nation']\n nation = 'areaName=' + str(config_fetcher(\"covid_region\"))\n covid_nation.append(nation)\n\n #Gets API latest data\n covid_api = Cov19API(\n filters = covid_nation,\n structure = cases_and_deaths,\n )\n #Gets data in form of dictionary\n covid_json = covid_api.get_json()\n #Gets timestamp for last update\n covid_timestamp = covid_api.last_update\n #Assign data to variables\n covid_data = covid_json['data'] #This formats the data as a list, while I want a dictionary, hence the next line.\n return covid_data", "def fetch_bank_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"bank-marketing\", \"classification\", astype=\"pandas\", preprocess=preprocess\n )\n orig_X = pd.concat([train_X, test_X]).sort_index()\n orig_y = pd.concat([train_y, test_y]).sort_index().astype(np.float64)\n column_map = {\n \"v1\": \"age\",\n \"v2\": \"job\",\n \"v3\": \"marital\",\n \"v4\": \"education\",\n \"v5\": \"default\",\n \"v6\": \"balance\",\n \"v7\": \"housing\",\n \"v8\": \"loan\",\n \"v9\": \"contact\",\n \"v10\": \"day\",\n \"v11\": \"month\",\n \"v12\": \"duration\",\n \"v13\": \"campaign\",\n \"v14\": \"pdays\",\n \"v15\": \"previous\",\n \"v16\": \"poutcome\",\n }\n if preprocess:\n\n def map_col(col):\n if col.find(\"_\") == -1:\n return column_map[col]\n prefix, suffix = col.split(\"_\")\n return column_map[prefix] + \"_\" + suffix\n\n orig_X.columns = [map_col(col) for col in orig_X.columns]\n age = pd.Series(orig_X[\"age\"] >= 25, dtype=np.float64)\n encoded_X = orig_X.assign(age=age)\n encoded_y = pd.Series(orig_y == 0, dtype=np.float64, name=orig_y.name)\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"age\", \"reference_group\": [1]},\n ],\n }\n return encoded_X, encoded_y, fairness_info\n else:\n orig_X.columns = [column_map[col] for col in orig_X.columns]\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"age\", \"reference_group\": [[25, 1000]]},\n ],\n }\n return orig_X, orig_y, fairness_info", "def downloadDatasets(datasets: Iterable) -> Generator[tuple, None, None]:\n\n for ds in datasets:\n with urllib.request.urlopen(ds) as response:\n\n with tempfile.NamedTemporaryFile(delete=False) as tmp_file:\n shutil.copyfileobj(response, tmp_file)\n\n yield (response.url, tmp_file.name)", "def train(self, force=False):\n return self._fetch_base_data(force)", "def fetchEquityData(from_date,to_date, useCache=False):\n PARAMS = INSIDER_TRADING_PARAM.copy()\n PARAMS[\"from_date\"] = from_date\n PARAMS[\"to_date\"] = to_date\n\n file_path = f\"data/{from_date}__{to_date}.json\"\n\n print(f\"\\nGetting Data from {from_date} to {to_date}\")\n\n data = None\n if os.path.isfile(file_path) and useCache:\n print(\"Using cached Data\")\n with open(file_path, \"r\") as f:\n data = json.load(f)\n else:\n sess = requests.Session()\n print(\"Intializing\")\n r = sess.get(\"https://www.nseindia.com/companies-listing/corporate-filings-insider-trading\", headers=HEADERS)\n print(\"Init Done\")\n\n print(\"Fetching Data From API\")\n res = sess.get(INSIDER_TRADING_BASE_URL, params=PARAMS, headers=HEADERS)\n print(\"Fetch Done\")\n print(res.url)\n if res.status_code == 200:\n data = res.json()[\"data\"]\n if not data:\n return None\n \n if useCache:\n print(\"Saving data for future use\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path, \"w\") as f:\n json.dump(data,f)\n else:\n return None\n\n df = pd.DataFrame.from_dict(data)\n df = df[df[\"secType\"] == \"Equity Shares\"]\n \n numberColumns = ['secAcq','befAcqSharesNo', 'befAcqSharesPer', 'secVal','afterAcqSharesNo', 'afterAcqSharesPer']\n df[numberColumns] = df[numberColumns].apply(pd.to_numeric, errors=\"coerce\")\n\n dataTimeColumns = ['acqfromDt','acqtoDt','intimDt']\n df[dataTimeColumns] = df[dataTimeColumns].apply(pd.to_datetime, errors=\"coerce\")\n\n df = df[['symbol', 'company', 'acqName', 'secType', 'secAcq', 'tdpTransactionType',\n 'personCategory', 'befAcqSharesNo', 'befAcqSharesPer', 'secVal',\n 'afterAcqSharesNo', 'afterAcqSharesPer', 'acqfromDt', 'acqtoDt',\n 'intimDt', 'acqMode']]\n\n return df", "def _getDataSetForFCSFileSample(self):\n\n # Get the dataset for current FCS file sample\n dataSets = searchService.getDataSet(self._entityId)\n if dataSets is None:\n self._message = \"Could not retrieve datasets for \" \\\n \"FCS file with identifier \" + self._entityId + \"!\"\n self._logger.error(self._message)\n else:\n dataSets = [dataSets]\n\n # Return\n return dataSets", "def download(args):\n with_dataset(args, Dataset._download)", "def _get_financials_by_chunk(self, args):\n (istart, iend) = args\n comp_index = self.components.index\n # download financials\n browser=webdriver.Chrome()\n for sym in comp_index[istart:iend]:\n print('Chunk %s-%s: downloading financial data for %s' %(comp_index[istart], comp_index[iend], sym))\n stock = Symbol(sym)\n if 'Exchange' in self.components.columns:\n exch = self.components['Exchange'][sym]\n if type(exch) == pd.Series:\n # unexpected duplicates, e.g. AMOV\n exch = exch.iloc[0]\n if type(exch) == str:\n stock.exch = exch\n stock.get_financials(browser=browser)\n stock.save_financial_data()\n browser.quit()\n return", "def portfolio_download_data(tickers: List[str], dates: List[str],\n time_step: str) -> None:\n\n try:\n function_name: str = portfolio_download_data.__name__\n download_data_tools \\\n .function_header_print_data(function_name, tickers, dates,\n time_step)\n\n init_year = int(dates[0].split('-')[0])\n init_month = int(dates[0].split('-')[1])\n fin_year = int(dates[1].split('-')[0])\n fin_month = int(dates[1].split('-')[1])\n last_day = monthrange(fin_year, fin_month)[1]\n\n init_date: dt = dt(year=init_year, month=init_month, day=1)\n fin_date: dt = dt(year=fin_year, month=fin_month, day=last_day)\n\n # Not all the periods can be combined with the time steps.\n raw_data: pd.DataFrame = \\\n yf.download(tickers=tickers, start=init_date, end=fin_date,\n interval=time_step)['Adj Close']\n # Order DataFrame columns by sector\n raw_data = raw_data[tickers]\n\n if raw_data.isnull().values.any():\n # Remove stocks that do not have data from the initial date\n raw_data = raw_data.dropna(axis=1, thresh=len(raw_data) - 10) \\\n .fillna(method='ffill')\n\n download_data_tools.save_data(raw_data, dates, time_step)\n\n except AssertionError as error:\n print('No data')\n print(error)", "def get_taxii(self, args: Dict[str, Any]):\n taxii_data = []\n save_fetch_time = None\n count = 0\n try:\n for data in self.fetch(args.get('begin'), args.get('end'), args.get('collection')):\n response = self.parse_to_json(data)\n\n if response.get('indicators') or False:\n content = response.get('indicators')\n elif response.get('ttps') or False:\n content = response.get('ttps').get('ttps')\n else:\n raise ValueError(\"Last fetch time retrieval failed.\")\n\n for eachone in content:\n save_fetch_time = parser.parse(eachone['timestamp']).replace(tzinfo=pytz.UTC).strftime(\n DATETIME_FORMAT)\n\n taxii_data.append(response)\n\n count += 1\n if count == arg_to_number(args.get('limit', 1)):\n break\n except Exception as e:\n demisto.error(\"Failed to fetch feed details, exception:{}\".format(e))\n raise e\n\n return taxii_data, save_fetch_time", "def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)", "def fetch_data(self, begin=None, end=None, delete_rawdata=None):\n\n if delete_rawdata is None:\n delete_rawdata = self.delete_rawdata\n\n for source in self.sources.keys():\n if source in self.staticsources:\n continue\n src = self.sources[source]\n print '[INFO] Download data for source ' + source\n src.download_and_resample(begin=begin, end=end,\n shapefile=self.shapefile,\n delete_rawdata=delete_rawdata)\n\n print '[SUCCESS] Download and resampling complete!'", "def set_training_fetches(self, fetched):\n self_fetched = fetched[self._name]\n self._loss = self_fetched['loss']\n\n names = ['loss', 'output', 'target', 'degraded']\n\n if self._hparams.use_batch_transformer:\n names = names + ['bt_input', 'bt_output']\n\n self._dual.set_fetches(fetched, names)\n\n if self._summary_op is not None:\n self._summary_values = fetched[self._name]['summaries']", "def load_api_data(prnt=False, subsectie=None):\n x_coordinates = []\n y_coordinates = []\n aantal = []\n volumes = []\n adresses = []\n buurt = []\n\n link = 'https://api.data.amsterdam.nl/vsd/afvalclusters'\n\n try: # Online scraping is preferred\n while link is not None: # This is the case on the last page of the API\n if prnt: # Can be used for some kind of monitoring of progres\n print(link)\n response = requests.get(link)\n output = response.json()\n for result in output['results']:\n # Als het cluster nog actief is\n if result['cluster_datum_einde_cluster'] is None:\n x_coordinates.append(str(result['cluster_geometrie']\n ['coordinates'][0]))\n y_coordinates.append(str(result['cluster_geometrie']\n ['coordinates'][1]))\n aantal.append(result['cluster_fractie_aantal'])\n volumes.append(result['cluster_fractie_volume'])\n adresses.append(result['bag_adres_openbare_ruimte_naam'])\n buurt.append(result['gbd_buurt_code'])\n\n link = output['_links']['next']['href'] # Link for next page\n\n df_clusters = pd.DataFrame([x_coordinates, y_coordinates, aantal,\n volumes, adresses, buurt]).T\n df_clusters = df_clusters.rename(columns={0: 'cluster_x',\n 1: 'cluster_y',\n 2: 'aantal_per_fractie',\n 3: 'volume_per_fractie',\n 4: 'street_name',\n 5: 'buurt'})\n except OSError: # backup is online scraping is not working\n df_api = pd.read_csv('../Data/afval_cluster.csv', delimiter=';')\n df_api = df_api[df_api['cluster_datum_einde_cluster'].isna()]\n df_api = df_api[['cluster_geometrie', 'cluster_fractie_aantal',\n 'cluster_fractie_volume',\n 'bag_adres_openbare_ruimte_naam', 'gbd_buurt_code']]\n df_api['cluster_x'] = df_api['cluster_geometrie'] \\\n .apply(lambda x: x.split('(')[1].split(' ')[0])\n df_api['cluster_y'] = df_api['cluster_geometrie'] \\\n .apply(lambda x: x.split()[1][:-1])\n df_clusters = df_api.drop(['cluster_geometrie'], axis=1) \\\n .rename(columns={'cluster_fractie_aantal': 'aantal_per_fractie',\n 'cluster_fractie_volume': 'volume_per_fractie',\n 'bag_adres_openbare_ruimte_naam': 'street_name',\n 'gbd_buurt_code': 'buurt'})\n\n # Transform coordinates of clusters to ints, as this helps easing join\n df_clusters['cluster_x'] = df_clusters['cluster_x'].astype('float')\\\n .round(0).astype('int')\n df_clusters['cluster_y'] = df_clusters['cluster_y'].astype('float')\\\n .round(0).astype('int')\n df_clusters['wijk'] = df_clusters['buurt'].str[:3]\n df_clusters['stadsdeel'] = df_clusters['buurt'].str[0]\n\n if subsectie:\n df_clusters = df_clusters[df_clusters['stadsdeel']\n .isin(list(subsectie))]\n return df_clusters", "def fetch_dataset(data_root_dir):\n pattern = \"winemag_dataset_*.csv\"\n\n file_list = glob.glob(os.path.join(data_root_dir, pattern))\n\n df_list = [pd.read_csv(fname) for fname in file_list]\n\n full_df = pd.concat(df_list)\n\n # give unique row names to all\n full_df.index = range(full_df.shape[0])\n\n print(\"Dataset fetched.\")\n return full_df", "def get_data_without_transactions():\n _, res = DBX.files_download(c.io.FILE_DATA)\n\n dfs = {x: pd.read_excel(io.BytesIO(res.content), sheet_name=x) for x in c.dfs.ALL_FROM_DATA}\n\n return dfs", "def get_tickers():\n\turl = \"https://api.iextrading.com/1.0/ref-data/symbols\"\n\t\n\ttry:\n\t\tresponse = requests.get(url)\n\t\tif str(response.status_code) == \"200\":\n\t\t\tprint(\"[UPDATE]: Downlaoding Tickers from iextrading API\")\n\t\t\tjson_stock_data = response.json()\n\n\t\t\tpd_stock = pandas.DataFrame(json_stock_data)\n\t\t\t# DataFrame Format\n\t\t\t# date iexId isEnabled name symbol type\n\t\t\t# 0 2019-02-12 2 True Agilent Technologies Inc. A cs\n\n\t\t\tprint(\"[SUCCESS]: Downloaded {} symbols from IEX.\".format(len(pd_stock.index)))\n\n\t\t\treturn pd_stock\n\n\t\telse:\n\t\t\tprint(\"[ERROR]: Download from IEX failed.\")\n\t\t\treturn \"ERROR\"\n\texcept Exception as e:\n\t\tprint(\"[ERROR]: {}\".format(e))\n\t\treturn \"ERROR\"", "def _fetch_large():\n # Large training data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"train\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TRAIN.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TRAIN.tar.gz\",\n \"mv SMNI_CMI_TRAIN train\",\n \"find train | grep gz$ | xargs gunzip\",\n ],\n )\n # Large test data:\n resource(\n target=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n url=\"https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz\",\n )\n dependency(\n target=data_path(\"eeg\", \"test\"),\n source=data_path(\"eeg\", \"SMNI_CMI_TEST.tar.gz\"),\n commands=[\n \"tar xzf SMNI_CMI_TEST.tar.gz\",\n \"mv SMNI_CMI_TEST test\",\n \"find test | grep gz$ | xargs gunzip\",\n ],\n )", "def fetch_data(data_url):\n return requests.get(data_url).content", "def get_data(url, seed):\n available_fields = {\n 'boro': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'cd': {'fieldtype': 'C', 'categories': range(1, 19)},\n 'uf1_1': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_2': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_3': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_4': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_5': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_6': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_7': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_8': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_9': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_10': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_11': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_12': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_13': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_14': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_15': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_16': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_17': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_18': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_19': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_20': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_21': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_22': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'sc24': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc36': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'sc37': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc38': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'sc114': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'uf48': {'fieldtype': 'N'},\n 'sc147': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'uf11': {'fieldtype': 'C', 'categories': range(1, 8)},\n 'sc149': {'fieldtype': 'B', 'codes': [1, 2, None]},\n 'sc173': {'fieldtype': 'C', 'categories': [1, 2, 3, 9]},\n 'sc171': {'fieldtype': 'B', 'codes': [1, 2]},\n 'sc150': {'fieldtype': 'N'},\n 'sc151': {'fieldtype': 'N'},\n 'sc154': {'fieldtype': 'C', 'categories': [1, 2, 3, 9]},\n 'sc157': {'fieldtype': 'C', 'categories': [1, 2, 9]},\n 'sc158': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc185': {'fieldtype': 'B', 'codes': [0, 1, 8]},\n 'sc186': {'fieldtype': 'C', 'categories': [2, 3, 4, 5, 9]},\n 'sc197': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc198': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc187': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc188': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc571': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'sc189': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'sc190': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc191': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc192': {'fieldtype': 'B', 'codes': [0, 1, 8]},\n 'sc193': {'fieldtype': 'C', 'categories': [2, 3, 9]},\n 'sc194': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc196': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc199': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'rec15': {'fieldtype': 'C', 'categories': range(1, 14)},\n 'sc26': {'fieldtype': 'C', 'categories': [12, 13, 15, 16]},\n 'uf23': {'fieldtype': 'N'},\n 'rec21': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'rec62': {'fieldtype': 'C', 'categories': [1, 2, 4, 5]},\n 'rec64': {'fieldtype': 'C', 'categories': [1, 2, 4, 5]},\n 'rec54': {'fieldtype': 'C', 'categories': range(1, 8)},\n 'rec53': {'fieldtype': 'N'},\n 'new_csr': {'fieldtype': 'C', 'categories': [1, 2, 5, 12, 20,\n 21, 22, 23, 30, 31,\n 80, 85, 90, 95]}\n }\n selected_fields = [\n # The borough where the apartment is located\n 'boro',\n\n # Building type: public housing, new construction,\n # \"In Rem\" foreclosure, old construction\n 'sc26',\n\n # Number of bedrooms\n 'sc151',\n\n # Dilapidated / Not Dilapidated\n 'rec21',\n\n # Complete plumbing facilities in unit\n 'rec62',\n\n # Complete kitchen facilities in unit\n 'rec64',\n\n # Maintenance deficiencies\n 'rec53',\n\n # Building age\n 'uf23',\n\n # Rent control/stabilization category\n 'new_csr',\n\n # Neighborhood rating\n 'sc196',\n\n # Wheelchair accessibility of unit\n 'sc38',\n\n # Presence of elevator\n 'sc149',\n\n # Building height\n 'uf11',\n\n # Air conditioning\n 'sc197',\n\n # Walkup\n 'sc171',\n ]\n mini_fields = {k: available_fields[k]\n for k in available_fields\n if k in selected_fields}\n y_field = 'uf17'\n # s = requests.get(url).content\n # raw_df = pd.read_csv(StringIO(s.decode('utf-8')))\n raw_df = pd.read_csv('homework2_data.csv')\n valid_renters, validated_features, validated_rents = \\\n preprocess_data(raw_df, mini_fields, y_field)\n X_train, X_test, y_train, y_test = train_test_split(\n validated_features, validated_rents, random_state=seed)\n cats = [k\n for (k, v) in mini_fields.items()\n if v[\"fieldtype\"] == \"C\"]\n catnums = [i\n for (i, x) in enumerate([c in cats\n for c in validated_features.columns])\n if x]\n return X_train, X_test, y_train, y_test, catnums, raw_df", "def _get_data(self):\n\n # Grab the data. Note, the separator is actually ', ', not just a\n # comma, so specify. Also, recognize the \"?\" as an NA value\n # (I think it is easier to have pandas catch the NA values instead\n # of manually searching for and parsing these in the future).\n # Finally, set the engine to python, since having a separator greater\n # than one character automatically does this, and prints a warning\n # message. By explicitly telling it to use python, we suppress the\n # warning.\n self.train_df = pd.read_csv(self.train_url, sep=', ', header=None,\n na_values='?', engine='python')\n\n # For the training data, have one comment row, so need to ignore\n self.test_df = pd.read_csv(self.test_url, sep=', ', header=None,\n skiprows=1, na_values='?', engine='python')\n\n # Get the header data\n response = requests.get(self.head_url)\n header = response.text.split('\\n')\n\n # Now, filter to grab the header lines:\n # First, make sure there is at least one character for the line, and\n # ignore lines that start with the comment character for the file \"|\"\n header = [row for row in header if len(row) > 0 and row[0] != '|']\n\n # Ignore the first row, since it is just identifying the classifier\n # task and, get just the header values\n header = [head.split(':')[0] for head in header[1:]]\n\n # Finally, we need to add a header name for the last column (if <= or >\n # income of 50k)\n header.append('income')\n\n # Now, set the header for the data sets\n self.train_df.columns = header\n self.test_df.columns = header", "def getData(self):\n\n url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip'\n try:\n file, _ = urlretrieve(url)\n zip_file_object = zipfile.ZipFile(file, 'r')\n first_file = zip_file_object.namelist()[0]\n file = zip_file_object.open(first_file)\n\n file_handler = []\n for row in file:\n file_handler.append(row.decode())\n\n # getting the currency headers into header_list\n header_list = []\n notFound = True\n x = 0\n while notFound:\n if file_handler[x].startswith('Date'):\n header = file_handler[x].split(',')\n for col in header:\n header_list.append(col.strip())\n notFound = False\n x += 1\n self.currencies = list(filter(None, header_list))\n self.currencies.append('EUR')\n self.currencies = self.currencies[1:] # Removing the \"Date\" entry\n\n data = []\n for row in file_handler[x:]:\n if row.startswith('`\\n'):\n break\n else:\n data.append(list(filter(None, [x.replace('\\n', '') for x in row.split(',')]))) # Removing any empty extra columns at the end of each rows\n\n # filling my self.rates with the currency in the format {CURR: {date: rate, ...}, ...}\n for row in data:\n for i in range(len(self.currencies)):\n try:\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: row[i + 1]}\n else:\n self.rates[self.currencies[i]].update({row[0]: row[i + 1]})\n except IndexError:\n # We reached the EUR section\n if self.currencies[i] not in self.rates:\n self.rates[self.currencies[i]] = {row[0]: '1.0000'}\n else:\n self.rates[self.currencies[i]].update({row[0]: '1.0000'})\n\n self.currencies.sort()\n\n except Exception as e:\n print('Failed to process the data')\n print(e)\n finally:\n file.close()", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "async def fetch_resulting_datasets(db_pool, query_parameters, misses=False, accessible_missing=None):\n async with db_pool.acquire(timeout=180) as connection:\n datasets = []\n try: \n if misses:\n if accessible_missing:\n query = f\"\"\"SELECT id as \"datasetId\", access_type as \"accessType\", stable_id as \"stableId\"\n FROM beacon_dataset\n WHERE id IN ({create_prepstmt_variables(len(accessible_missing))});\n \"\"\"\n # LOG.debug(f\"QUERY to fetch accessible missing info: {query}\")\n statement = await connection.prepare(query)\n db_response = await statement.fetch(*accessible_missing)\n else:\n return []\n else:\n query = f\"\"\"SELECT * FROM {DB_SCHEMA}.query_data_summary_response({create_prepstmt_variables(13)});\"\"\"\n LOG.debug(f\"QUERY to fetch hits: {query}\")\n statement = await connection.prepare(query)\n db_response = await statement.fetch(*query_parameters) \n\n for record in list(db_response):\n processed = transform_misses(record) if misses else await transform_record(db_pool, record)\n datasets.append(processed)\n return datasets\n except Exception as e:\n raise BeaconServerError(f'Query resulting datasets DB error: {e}')", "def get_all_casks(self):", "def ez_fetch(auth_token, dataset_id, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_fetch\"\n payload = {\n \"dataset_id\": dataset_id,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def download_datasets_csv(url):\n # dataset = pd.read_csv(url, sep='\\t')\n dataset = pd.read_csv(url, sep=\",\")\n dataset.columns = dataset.columns.str.replace(\" \", \"_\")\n return dataset", "def get_edinburgh_bike_counter_data(datapath='data',\n force_download=False):\n r = requests.get('https://data.edinburghopendata.info/dataset/bike-counter-data-set-cluster')\n soup = BeautifulSoup(r.content, 'html.parser')\n\n dfs = []\n for a in soup.find_all('a', attrs={'href': re.compile(\"\\.csv$\")}):\n\n # Download data\n url = a.get('href')\n filename = os.path.basename(url)\n filepath = os.path.join(datapath, filename + '.gz')\n if force_download or not os.path.exists(filepath):\n r = requests.get(url)\n with gzip.open(filepath, 'wb') as f:\n f.write(r.content)\n\n # Read data and set datetime as index\n df = pd.read_csv(filepath, index_col='date')\n try: # specify format to speed up datetime parsing\n df.index = pd.to_datetime(df.index, format='%d/%m/%Y')\n except TypeError: # infer format\n df.index = pd.to_datetime(df.index)\n df.index += pd.to_timedelta(df['time'], unit='h')\n\n # Only keep the total number of bikes across all channels\n counter = os.path.splitext(filename)[0]\n df = pd.DataFrame(df.filter(regex='channel', axis=1).sum(axis=1),\n columns=[counter])\n\n # Remove days on which no bikes were counted\n daily = df.resample('D').sum()\n operating_days = daily.loc[daily[counter] > 0].index\n df = df[df.index.to_series().dt.date.isin(\n operating_days.to_series().dt.date)]\n\n dfs.append(df)\n\n data = pd.concat(dfs, axis=1)\n return data" ]
[ "0.63306177", "0.6272984", "0.62559444", "0.62323636", "0.6168453", "0.6152495", "0.5925996", "0.5868122", "0.58555883", "0.5853049", "0.5850573", "0.58333457", "0.5734732", "0.57281804", "0.57072836", "0.56839794", "0.5660408", "0.56425864", "0.562225", "0.56212765", "0.56190795", "0.5614248", "0.556928", "0.55680233", "0.5538897", "0.5515964", "0.5511601", "0.55079406", "0.5506223", "0.5506188", "0.5505916", "0.5497532", "0.54745066", "0.5472709", "0.5469753", "0.54652745", "0.54639673", "0.54629487", "0.5455188", "0.54494095", "0.54467374", "0.5430473", "0.5430364", "0.5429952", "0.5425747", "0.5415785", "0.54097354", "0.54081714", "0.54011554", "0.5400415", "0.53995", "0.5398484", "0.5396319", "0.5391462", "0.538514", "0.53764516", "0.53692394", "0.53681946", "0.53665036", "0.53599614", "0.5346234", "0.53451", "0.53446215", "0.53395784", "0.53373754", "0.5328223", "0.5327771", "0.53221613", "0.5319889", "0.5318374", "0.5308514", "0.5307602", "0.5303081", "0.5300299", "0.5299556", "0.5297892", "0.5295611", "0.5287152", "0.5286378", "0.5286051", "0.5284772", "0.5284179", "0.52816993", "0.5278829", "0.52786064", "0.5278176", "0.5263685", "0.52626765", "0.52614886", "0.5254656", "0.5252849", "0.525227", "0.5249915", "0.5248131", "0.5235519", "0.52329624", "0.5231409", "0.52295166", "0.52237225", "0.5222201" ]
0.5893806
7
Create metrics of gauge type for filesystem replica link lag, with the local filesystem name, replication direction, remote array name, remote filesystem name and replication status as labels.
def _replica_links_lag(self): for f in self.fb.get_filesystem_replica_links(): self.replica_links_lag.add_metric([f.local_file_system.name, f.direction, f.remote.name, f.remote_file_system.name, f.status], -1 if f.lag is None else f.lag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def recreate_metrics():\n all = monitor_client.list_metric_descriptors(\n project_path, filter_='metric.type=starts_with(\"custom.\")'\n )\n for a in all:\n if \"accumulator\" in str(a) or \"biquery\" in str(a):\n metric_name = monitor_client.metric_descriptor_path(\n settings.PROJECT_ID, a.type\n )\n\n try:\n monitor_client.delete_metric_descriptor(metric_name)\n except Exception as e:\n print(e)\n\n metric_descriptor = {\n \"type\": f\"custom.googleapis.com/{Monitoring.PING}\",\n \"labels\": [\n {\n \"key\": \"operation\",\n \"valueType\": \"STRING\",\n # \"description\": \"Performed operation name\"\n }\n ],\n \"metricKind\": \"GAUGE\",\n \"valueType\": \"DOUBLE\",\n \"unit\": \"items\",\n \"description\": \"Function performed in a loop with hard limit\",\n \"displayName\": \"Repeated Function Execution\",\n }\n\n return monitor_client.create_metric_descriptor(\n settings.PROJECT_ID, metric_descriptor\n )", "def test_gauge(self):\n # Create a metrics with no metric instances\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name, self.gauge_metric_help, self.gauge_metric_type, []\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(len(mf.metric), 0)\n\n # Create it with metrics\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(mf.name, self.gauge_metric_name)\n self.assertEqual(mf.help, self.gauge_metric_help)\n self.assertEqual(mf.type, self.gauge_metric_type)\n\n # Create another and check equal\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n self.assertEqual(mf, mf_)\n\n for m in mf_.metric:\n self.assertEqual(m.timestamp_ms, 0)\n\n # Create another with timestamp\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n timestamp=True,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n for m in mf_.metric:\n self.assertNotEqual(m.timestamp_ms, 0)\n\n self.assertNotEqual(mf, mf_)\n\n # Create Gauge with const_labels\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n const_labels=self.const_labels,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n # Check that const_label is present in the LabelPair associated\n # with each metric instance.\n for m in mf_.metric:\n labels = [lp.name for lp in m.label]\n self.assertIn(\"app\", labels)\n\n self.assertNotEqual(mf, mf_)\n\n # Check Gauge can be round-tripped through encode and decode\n payload = pmp.encode(mf)\n self.assertIsInstance(payload, bytes)\n _mf = pmp.decode(payload)[0]\n self.assertEqual(mf, _mf)", "def _report_metrics(self, total_bytes, time_delta, num_files):\n # This recreates the gsutil throughput calculation so that metrics are 1:1.\n avg_speed = round(float(total_bytes) / float(time_delta))\n report(\n source_scheme=self._source_scheme,\n destination_scheme=self._destination_scheme,\n num_files=num_files,\n size=total_bytes,\n avg_speed=avg_speed,\n disk_io_time=self._calculate_disk_io())", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def _create_gauge(self, name: str, attributes: Attributes = None):\n otel_safe_name = _get_otel_safe_name(name)\n key = _generate_key_name(name, attributes)\n\n gauge = self.meter.create_observable_gauge(\n name=otel_safe_name,\n callbacks=[partial(self.read_gauge, _generate_key_name(name, attributes))],\n )\n self.map[key] = Observation(DEFAULT_GAUGE_VALUE, attributes)\n\n return gauge", "def check_gauge(params, match):\n gauge_no = match.group(1)\n stats_url = USGS_STATS_URL_TEMPLATE % gauge_no\n graph_url = USGS_GRAPH_URL_TEMPLATE % gauge_no\n\n response = requests.get(stats_url)\n last_measurement = response.text.strip().split(\"\\n\")[-1]\n _, _, _, mtime, tz, cfs, _ = re.split('\\s+', last_measurement)\n\n return lambda_response(None, {\n \"text\": \"Last measurement: %s cfs @ %s %s\" % (cfs, mtime, tz),\n \"attachments\": [{ \"image_url\": graph_url }]\n })", "def replication_info():\n\n def _get_last_packet_name(location, pattern):\n try:\n entries = [os.path.join(location, e) for e in os.listdir(location)]\n except OSError as e:\n logging.warning(e)\n return None\n pattern = re.compile(pattern)\n entries = filter(lambda x: pattern.search(x), entries)\n entries = filter(os.path.isfile, entries)\n entries = _sort_natural(entries, reverse=True) # latest first\n return os.path.split(entries[0])[-1] if entries else None\n\n # TODO(roman): Cache this response:\n return jsonify({\n 'last_packet': _get_last_packet_name(\n current_app.config['REPLICATION_PACKETS_DIR'],\n \"replication-[0-9]+.tar.bz2$\"\n ),\n })", "def update_gauge(self):\n gauge_metrics = self._fetch_gauge_metrics_and_clear()\n self._logger.info('update_gauge. gauge_metrics = %s',\n build_metrics_gauge_data(gauge_metrics))", "def _solaris_balloon_stat(label):", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def generate_latest(registry=Registry):\n\n def sample_line(line, metric_type):\n if line.labels:\n labelstr = '{{{0}}}'.format(','.join(\n ['{0}=\"{1}\"'.format(\n k, v.replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace('\"', r'\\\"'))\n for k, v in sorted(line.labels.items())]))\n else:\n labelstr = ''\n timestamp = ''\n if line.timestamp is not None:\n # Convert to milliseconds.\n timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))\n name = line.name\n if metric_type == 'counter' and name.endswith('_total'):\n name = name[:-6]\n return '{0}{1} {2}{3}\\n'.format(\n name, labelstr, int(line.value), timestamp)\n\n output = []\n for metric in registry.collect():\n try:\n mname = metric.name\n mtype = metric.type\n # Munging from OpenMetrics into Prometheus format.\n if mtype == 'counter':\n mname = mname\n elif mtype == 'info':\n mname = mname + '_info'\n mtype = 'gauge'\n elif mtype == 'stateset':\n mtype = 'gauge'\n elif mtype == 'gaugehistogram':\n # A gauge histogram is really a gauge,\n # but this captures the structure better.\n mtype = 'histogram'\n elif mtype == 'unknown':\n mtype = 'untyped'\n help_str = '# HELP {0} {1}\\n'.format(mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n'))\n if 'Multiprocess' not in help_str:\n continue\n output.append('# HELP {0} {1}\\n'.format(\n mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n')))\n output.append('# TYPE {0} {1}\\n'.format(mname, mtype))\n\n for s in metric.samples:\n for suffix in ['_created', '_gsum', '_gcount']:\n if s.name == metric.name + suffix:\n break\n else:\n line = sample_line(s, mtype)\n if not line:\n continue\n output.append(line)\n except Exception as exception:\n exception.args = (exception.args or ('',)) + (metric,)\n raise\n\n return ''.join(output).encode('utf-8')", "def __init__(self, replication_num, metric_name_array, metric_collection_types = None, detailed_metric_assembly = False):\n self.replication_num = replication_num\n self.metrics = metric_name_array\n self.metric_collection_types = metric_collection_types # can be a string array elements of which can be one of ('STRING_LIST', 'COUNT_MAX', 'MEAN_STD','MIN','MAX', 'MIN_MAX') \n self.detailed_metric_assembly = detailed_metric_assembly\n self.replication_counter = 0\n self.metric_final_results = {}\n # initialize results array for each metric\n for metric in metric_name_array:\n self.metric_final_results[metric] = []", "def main(self):\n debug(\"Using %s\" % (self.PROC_DISKSTATS))\n\n initial = self.get_status()\n time.sleep(self.interval)\n final = self.get_status()\n\n # Get bytes/sec\n for d in self.partitions:\n r_diff = ((final[d].r_sectors - initial[d].r_sectors) * self.sector_size) / self.interval\n w_diff = ((final[d].w_sectors - initial[d].w_sectors) * self.sector_size) / self.interval\n final[d].r_rate = r_diff\n final[d].w_rate = w_diff\n \n # Status string\n msg = \" \".join([ \"%s (r: %d KB/s, w: %d KB/s)\" % (i.dev, i.r_rate / 1024, i.w_rate / 1024) for i in sorted(final.values(), key=lambda x:x.dev) ])\n performance = \" \".join([ \"'%s read'=%d '%s write'=%d\" % (i.dev, i.r_rate, i.dev, i.w_rate) for i in sorted(final.values(), key=lambda x:x.dev) ])\n\n return (EX_OK, msg, performance)", "def _get_ganglia_metrics(hostname, port, file_):\n if file_:\n f = open(file_, 'r')\n return \"\".join(f.readlines())\n else:\n return netcat(hostname, port, '')", "def create_network_and_stats(\r\n dir_path, map_lines, otu_table_fp, prefs, data, background_color, label_color):\r\n cat_by_sample, sample_by_cat, num_meta, meta_dict, labels, node_labels,\\\r\n label_list = get_sample_info(map_lines)\r\n con_by_sample, node_file, edge_file, red_node_file,\\\r\n red_edge_file, otu_dc, degree_counts, sample_dc, \\\r\n = get_connection_info(otu_table_fp, num_meta, meta_dict)\r\n num_con_cat, num_con = get_num_con_cat(con_by_sample, cat_by_sample)\r\n num_cat = get_num_cat(sample_by_cat, con_by_sample.keys())\r\n dir_path = os.path.join(dir_path, \"otu_network\")\r\n make_table_file(edge_file, labels, dir_path, \"real_edge_table.txt\")\r\n make_table_file(node_file, node_labels, dir_path, \"real_node_table.txt\")\r\n make_table_file(red_edge_file, labels, dir_path,\r\n \"real_reduced_edge_table.txt\")\r\n make_table_file(red_node_file, node_labels, dir_path,\r\n \"real_reduced_node_table.txt\")\r\n make_stats_files(\r\n sample_dc,\r\n otu_dc,\r\n degree_counts,\r\n num_con_cat,\r\n num_con,\r\n num_cat,\r\n cat_by_sample,\r\n dir_path)\r\n if background_color == 'white':\r\n background_color = Color('white', (255, 255, 255))\r\n elif background_color == 'black':\r\n background_color = Color('black', (0, 0, 0))\r\n else:\r\n try:\r\n background_color = data_colors[background_color]\r\n except KeyError:\r\n raise KeyError(\"background_color unknown\")\r\n\r\n if label_color == 'white':\r\n label_color = Color('white', (255, 255, 255))\r\n elif label_color == 'black':\r\n label_color = Color('black', (0, 0, 0))\r\n else:\r\n try:\r\n label_color = data_colors[label_color]\r\n except KeyError:\r\n raise KeyError(\"label_color unknown\")\r\n\r\n make_props_files(\r\n labels,\r\n label_list,\r\n dir_path,\r\n data,\r\n background_color,\r\n label_color,\r\n prefs)", "def derive_newrelic_slaves(self):\n if self.has_slave_data is True:\n self.update_metric(\"newrelic/replication_lag\", self.sum_of([\"slave/seconds_behind_master\"]))\n\n # both need to be YES, which is 1\n running = self.sum_of([\"slave/slave_io_running\", \"slave/slave_sql_running\"])\n if running is not None:\n replication_status = 1.0\n if running == 2:\n replication_status = 0.0\n self.update_metric(\"newrelic/replication_status\", replication_status)\n self.update_metric(\"newrelic/slave_relay_log_bytes\", self.sum_of([\"slave/relay_log_pos\"]))\n self.update_metric(\"newrelic/master_log_lag_bytes\", self.diff_of([\"slave/read_master_log_pos\",\n \"slave/exec_master_log_pos\"]))\n else: # This is a hack because the NR UI can't handle it missing for graphs\n self.update_metric(\"newrelic/replication_lag\", 0.0)\n self.update_metric(\"newrelic/replication_status\", 0.0)\n self.update_metric(\"newrelic/slave_relay_log_bytes\", 0.0)\n self.update_metric(\"newrelic/master_log_lag_bytes\", 0.0)", "def update_gauge(self):\n pass # Do nothing", "def feature_dynamic_filesystem(self):\n def flatten_list(structured):\n \"\"\"Flatten nested list.\"\"\"\n flat = []\n for i in structured:\n flat += i\n return flat\n\n # Get file operations and their number\n self.features[\"file_read\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_read\", [])\n self.features[\"files_read\"] = len(self.features[\"file_read\"])\n self.features[\"file_written\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_written\", [])\n self.features[\"files_written\"] = len(self.features[\"file_written\"])\n self.features[\"file_deleted\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_deleted\", [])\n self.features[\"files_deleted\"] = len(self.features[\"file_deleted\"])\n self.features[\"file_copied\"] = flatten_list(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])\n )\n self.features[\"files_copied\"] = len(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])\n )\n self.features[\"file_renamed\"] = flatten_list(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_moved\", [])\n )\n self.features[\"files_renamed\"] = len(self.features[\"file_renamed\"])\n\n # Get other file operations numbers\n self.features[\"files_opened\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_opened\", [])\n )\n self.features[\"files_exists\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_exists\", [])\n )\n self.features[\"files_failed\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_failed\", [])\n )\n\n # Get total number of unique touched files\n file_operations = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_read\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_written\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_deleted\", []) + \\\n flatten_list(self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])) + \\\n flatten_list(self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_moved\", [])) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_recreated\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_opened\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_exists\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_failed\", [])\n # remove duplicates\n self.features[\"files_operations\"] = len(list(set(file_operations)))", "def init_metric_definitions():\n metric_definitions = []\n\n # add info to list in memory, one by one, following signature values\n metric_def_ID = 1\n metric_def_name = \"Recovery Time\"\n metric_def_info = \"Measures time taken by ONAP to restore a VNF\"\n metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n metric_def_ID = 2\n metric_def_name = \"Uptime Percentage\"\n metric_def_info = \"Measures ratio of uptime to reference time, not counting planned downtime\"\n metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n\n # write list to binary file\n write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)\n\n return metric_definitions", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "def add_gauge_server(self, data, feed_id, server_id, metric_enum):\n metric_id = self._metric_id_gauge_server(feed_id=feed_id, server_id=server_id,\n metric_enum=metric_enum)\n self.add_gauge(data=data, metric_id=metric_id)", "def process_data(base_path, gauge_data_dir, job_name): \n \n # Collect all gauge files and put them into one array\n storm_gauges_files = os.listdir(gauge_data_dir) \n g = numpy.zeros((len(gauges), len(storm_gauges_files))) \n \n #for storm_gauges in storm_gauges_files: \n # data_path = os.join(gauage_data_dir, storm_gauges)\n # for i in range(0, len(gauges)): \n \n for (index, storm_gauges) in enumerate(storm_gauges_file): \n with open(os.path.join(gauge_data_dir ,storm_gauges), 'r') as gauges_data: \n data = numpy.loadtxt(gauges_data, delimiter = ',', skiprows=1) \n g[:, index] = data[:, 1]\n\n return g", "def test_metric_namespace(self):\n self.statsd.namespace = \"foo\"\n self.statsd.gauge('gauge', 123.4)\n self.assert_equal_telemetry('foo.gauge:123.4|g\\n', self.recv(2))", "def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary", "def tracking(main_params, link_params, check_params, manual_log_path=''):\n \n vect_path, csv_path, dest_path, verbose = main_params\n createCSV, forced_matching, search_range, memory, adaptive_stop = link_params\n params = {'r':search_range, 'm':memory}\n check, img_path, size = check_params\n \n # Log path determination\n if manual_log_path:\n log_path = manual_log_path\n else: \n log_path = os.path.join(dest_path, 'log.txt')\n log_txt = ''\n \n # Creation of the dest directory if it doesn't exist\n if not os.path.exists(dest_path):\n os.mkdir(dest_path)\n \n start = time.time()\n previous_step = start\n txt = 'TRACK> Preparing the data...'\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n time.sleep(1) # dirty hack to wait for the console output\n \n # Loading graphs\n files = nu.preloadFiles(vect_path, ['.gpickle'], debug=False) # finding all the gpickles\n graphs = [nx.read_gpickle(graph[0]) for graph in files] # creation of a list containing all the graphs, in order\n \n # Creation of the csv if necessary, and loading as DataFrame\n if createCSV:\n nu.createNodesCSVForTracking(files, csv_path, verbose)\n df = pd.read_csv(csv_path)\n \n timer = time.time()\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Tracking...'\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n \n # Tracking \n df_track = tp.link(df, search_range, memory=memory, t_column='t', \n adaptive_stop=adaptive_stop)\n\n timer = time.time()\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt += txt + '\\n'\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Updating graphs...'.format(timer-previous_step)\n log_txt += txt + '\\n'\n \n # Adding nodes ID to the graphs \n graphs = matchAndInsert(df_track, graphs, verbose, forced_matching, \n log_path, log_txt)\n \n timer = time.time()\n end = timer-previous_step\n txt = ('TRACK> ...done in {:.0f} min {:.4f} s.'\n .format(end // 60, end % 60))\n log_txt += txt + '\\n'\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Saving graphs...'.format(timer-previous_step)\n log_txt += txt + '\\n' \n \n # Saving the updated graphs\n desc = 'TRACK> Saving graphs'\n for i, graph in enumerate(tqdm(graphs, total=len(graphs), desc=desc,\n unit='graph', disable=not verbose)): \n \n txt = 'TRACK> graph {} / {}'.format(i+1, len(graphs))\n log_txt += txt + '\\n'\n \n # New graph name: [old name]_track_r[search_range]_m[memory].gpickle'\n graph_name = files[i][1] + '_track'\n for key, value in params.items():\n graph_name += '_' + key + str(value)\n \n path = os.path.join(dest_path, graph_name + '.gpickle') \n nx.write_gpickle(graph, path, protocol=2) \n\n timer = time.time()\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt += txt + '\\n'\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Processing check image...'.format(timer-previous_step)\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n time.sleep(1) # dirty hack to wait for the console output\n \n # Saving an image to check the results \n if check:\n img = color.gray2rgb(io.imread(img_path)) # loading the image as rgb\n colors = {} # dictionary in which we save the colors by node tag\n \n # Drawing each node with a random color for each node tag\n desc = 'TRACK> drawing graphs nodes'\n for i, graph in enumerate(tqdm(graphs, total=len(graphs), desc=desc,\n unit='graph', disable=not verbose)):\n txt = 'TRACK> drawing nodes from graph {}'.format(files[i][1])\n log_txt += txt + '\\n'\n nu.drawNodesRandomColors(graph, img, size, colors) \n \n txt = 'TRACK> writing image...'\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n io.imsave(os.path.join(dest_path, 'tracking_check.png'), img)\n\n timer = time.time()\n end = timer-start\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt += txt + '\\n'\n txt = ('TRACK> DONE in {:.0f} min {:.4f} s.'\n .format(end // 60, end % 60))\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n \n # Writing the log \n with open(log_path, 'a+') as log:\n log.write(log_txt)", "def create_system_metrics(system):\n pass", "def gcp_create_metric_descriptor(project_id: str):\n client = monitoring_v3.MetricServiceClient()\n project_name = client.project_path(project_id)\n\n for desc_type, desc_desc in [\n [\"buildbots_percent_failed\", \"Percentage of failed builds\"],\n [\"buildbots_builds_successful\", \"Number of successful builds in the last 24h.\"],\n [\"buildbots_builds_failed\", \"Number of failed builds in the last 24h.\"],\n [\"buildbots_builds_total\", \"Total number of builds in the last 24h.\"],\n ]:\n\n descriptor = monitoring_v3.types.MetricDescriptor()\n descriptor.type = 'custom.googleapis.com/buildbots_{}'.format(desc_type)\n descriptor.metric_kind = (\n monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)\n descriptor.value_type = (\n monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE)\n descriptor.description = desc_desc\n descriptor = client.create_metric_descriptor(project_name, descriptor)\n print('Created {}.'.format(descriptor.name))", "def _track_data_statistics(self, info_l, last_info, episode_len,\n all_stats, maxlen_stats):\n maxlen = get_max_episode_len(self.path)\n start = info_l[0]['extras']\n last_ex = last_info['extras']\n\n if 'cable-shape' in self.path or 'cable-line-notarget' in self.path:\n nb_sides = start['nb_sides']\n frac_beads = last_ex['nb_zone'] / last_ex['nb_beads']\n if episode_len == maxlen:\n maxlen_stats[f'done_{nb_sides}'].append( last_ex['task.done'] )\n maxlen_stats[f'frac_{nb_sides}'].append( frac_beads )\n all_stats[f'done_{nb_sides}'].append( last_ex['task.done'] )\n all_stats[f'frac_{nb_sides}'].append( frac_beads )\n all_stats[f'len_{nb_sides}'].append( episode_len )\n\n elif 'cable-ring' in self.path:\n delta = last_ex['fraction'] - start['fraction']\n percent = last_ex['convex_hull_area'] - start['convex_hull_area']\n percent = 100 * percent / start['convex_hull_area']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['fraction'].append( last_ex['fraction'] )\n maxlen_stats['fraction_delta'].append( delta )\n maxlen_stats['percent_improve'].append( percent )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['fraction'].append( last_ex['fraction'] )\n all_stats['fraction_delta'].append( delta )\n all_stats['percent_improve'].append( percent )\n\n elif 'cloth-flat' in self.path:\n delta = last_ex['cloth_coverage'] - start['cloth_coverage']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['coverage_delta'].append( delta )\n maxlen_stats['cloth_coverage'].append( last_ex['cloth_coverage'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['coverage_delta'].append( delta )\n all_stats['cloth_coverage'].append( last_ex['cloth_coverage'] )\n\n elif 'cloth-cover' in self.path:\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n\n elif 'bag-alone-open' in self.path:\n delta = last_ex['fraction'] - start['fraction']\n percent = last_ex['convex_hull_area'] - start['convex_hull_area']\n percent = 100 * percent / start['convex_hull_area']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['fraction'].append( last_ex['fraction'] )\n maxlen_stats['fraction_delta'].append( delta )\n maxlen_stats['percent_improve'].append( percent )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['fraction'].append( last_ex['fraction'] )\n all_stats['fraction_delta'].append( delta )\n all_stats['percent_improve'].append( percent )\n\n elif 'bag-items-easy' in self.path or 'bag-items-hard' in self.path:\n # For this it'd be interesting to see what task stage we're at.\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['task_stage'].append( last_ex['task_stage'] )\n maxlen_stats['zone_items_rew'].append( last_ex['zone_items_rew'] )\n maxlen_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['task_stage'].append( last_ex['task_stage'] )\n all_stats['zone_items_rew'].append( last_ex['zone_items_rew'] )\n all_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] )\n\n elif 'bag-color-goal' in self.path:\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['task_stage'].append( last_ex['task_stage'] )\n maxlen_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] )\n maxlen_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['task_stage'].append( last_ex['task_stage'] )\n all_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] )\n all_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] )\n\n else:\n print(f'For: {self.path}, we are not tracking extra stats.')", "def gauge(self, gauge, value):\n pass", "def getFiletypeFromReplicas(gpfn, replicas_dic):\n\n filetype = \"\"\n for guid in replicas_dic.keys():\n replicas = replicas_dic[guid]\n for replica in replicas:\n if replica.sfn == gpfn:\n filetype = replica.filetype\n break\n\n tolog(\"Will use filetype=\\'%s\\' for surl=%s\" % (filetype, gpfn))\n\n return filetype", "def compute_path_metric(self, sw, path, util, time_now, local_contrib):\n pathmetric = 1\n linkmetrics = []\n links = zip(path[:-1], path[1:])\n # calculate available capacity for each link in path\n for link in links:\n u, v = link\n # Use the last-learned-via-sync value for a link\n if (not local_contrib) and 'sync_learned' in self.graph[u][v]:\n used1 = self.graph[u][v]['sync_learned'] + util\n used2 = self.graph[u][v]['used'] + util\n # ['used'] is a strict lower bound for ['sync_learned']\n if used1 > used2: \n used = used1\n logging.debug(\"CS [%s] using sync_learned value 1 [%f]\", str(self.name), used1)\n else:\n used = used2\n logging.debug(\"CS [%s] using sync_learned value 2 [%f]\", str(self.name), used2)\n else:\n logging.debug(\"CS [%s] using tracking value\", str(self.name))\n used = self.graph[u][v]['used'] + util\n\n capacity = self.graph[u][v]['capacity']\n linkmetric = float(used) / capacity\n # If the controller estimates it would oversubscribe this link\n if linkmetric > 1:\n logging.info(\"[%s] MAY be OVERSUBSCRIBED [%f] at switch [%s]\", str(time_now), linkmetric, str(sw))\n break\n else:\n linkmetrics.append(linkmetric)\n\n # We define pathmetric to be the worst link metric in path\n if len(linkmetrics) > 0:\n pathmetric = max(linkmetrics)\n\n funname = sys._getframe().f_code.co_name\n logging.debug(\"[%s] [%s] [%s] [%s]\", funname, str(time_now), str(self),\n str((path, linkmetrics)))\n return (pathmetric, len(links))", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()", "def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True", "def logging(self, function):\n avg_nms_time_per_step = sum(self.nms_times)/len(self.nms_times)\n avg_total_time_per_step = sum(self.total_times)/len(self.total_times)\n\n avg_min_latency = [x[0] for x in self.inference_times]\n avg_max_latency = [x[1] for x in self.inference_times]\n avg_latency = [x[2] for x in self.inference_times]\n\n function(\"Inference stats: image size {}x{}, batches per step {}, batch size {}, {} steps\".format(\n self.cfg.model.image_size, self.cfg.model.image_size, self.cfg.ipuopts.batches_per_step, self.cfg.model.micro_batch_size, len(self.total_times)\n ))\n function(\"--------------------------------------------------\")\n function(\"Inference\")\n function(\"Average Min Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_min_latency)/len(self.inference_times)))\n function(\"Average Max Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_max_latency)/len(self.inference_times)))\n function(\"Average Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_latency)/len(self.inference_times)))\n function(\"Average Inference Throughput: {:.3f} img/s\".format(sum(self.inference_throughputs)/len(self.inference_throughputs)))\n function(\"--------------------------------------------------\")\n # TODO remove the NMS and end-to-end time report once NMS is on device\n function(\"End-to-end\")\n function(\"Average NMS Latency per Batch: {:.3f} ms\".format(1000 * avg_nms_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"Average End-to-end Latency per Batch: {:.3f} ms\".format(1000 * avg_total_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"End-to-end Throughput: {:.3f} img/s\".format(sum(self.total_throughputs)/len(self.total_throughputs)))\n function(\"==================================================\")\n\n if self.cfg.eval.metrics:\n self.compute_and_print_eval_metrics()", "def get_data():\n current_users = []\n containers = client.containers.list(filters={'name':'jupyter-'})\n users_per_worker.set(len(containers))\n for container in containers:\n username = container.name\n current_users.append(username)\n\n # Get memory data for this user\n try:\n with open('/docker/memory/{}/memory.usage_in_bytes'.format(container.id), 'r') as memfile:\n memory = memfile.read()\n memory = int(memory) / MBFACTOR\n memory_gauge.labels(username).set(memory)\n except Exception as e:\n logger.error(\"Failed to update memory metric. Exception: {}\".format(e))\n\n # Get CPU data for this user\n try:\n with open('/docker/cpu/{}/cpuacct.stat'.format(container.id), 'r') as cpufile:\n user_cpu_line = cpufile.readline().split()\n user_cpu = user_cpu_line[1]\n cpu_gauge_user.labels(username).set(str(user_cpu))\n system_cpu_line = cpufile.readline().split()\n system_cpu = system_cpu_line[1]\n cpu_gauge_system.labels(username).set(str(system_cpu))\n except Exception as e:\n logger.error(\"failed to update CPU metrics. Exception: {}\".format(e))\n # Get memory data for this worker\n try:\n with open('/worker/meminfo') as workerfile:\n total_mem_line = workerfile.readline().split()\n total_mem = total_mem_line[1]\n free_mem_line = workerfile.readline().split()\n free_mem = int(free_mem_line[1]) / MBFACTOR\n available_mem_line = workerfile.readline().split()\n available_mem = int(available_mem_line[1]) / MBFACTOR\n worker_mem_free.set(available_mem)\n except Exception as e:\n logger.error(\"Failed to update worker metrics. Exception: {}\".format(e))\n\n # Check if a user shutdown their notebook\n global previous_users\n shutdown_users = list(set(previous_users) - set(current_users))\n for user in shutdown_users:\n memory_gauge.labels(user).set(0)\n\n previous_users = current_users\n\n return Response(generate_latest(), mimetype=CONTENT_TYPE_LATEST)", "def status():\n used = get_space_used()\n avail = get_space_available()\n allowed = config.download.space_to_use\n print \"Space used by downloaded files: %.2f GB of %.2f GB (%.2f%%)\" % \\\n (used/1024.0**3, allowed/1024.0**3, 100.0*used/allowed)\n print \"Space available on file system: %.2f GB\" % (avail/1024.0**3)\n\n numwait = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='waiting'\", \\\n fetchone=True)\n numfail = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of requests waiting: %d\" % numwait\n print \"Number of failed requests: %d\" % numfail\n\n numdlactive = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='downloading'\", \\\n fetchone=True)\n numdlfail = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of active downloads: %d\" % numdlactive\n print \"Number of failed downloads: %d\" % numdlfail", "def _SnapMetrics(deadline):\n next_deadline = deadline + frequency_seconds\n callback = partial(_SnapMetrics, next_deadline)\n cls._timeouts[group_key] = IOLoop.current().add_timeout(next_deadline, callback)\n\n sample = meter.sample()\n sample_json = json.dumps(sample)\n new_metric = Metric.Create(group_key, machine_id, deadline, sample_json)\n with util.Barrier(_UploadSuccess, _UploadError) as b:\n retry.CallWithRetryAsync(retry_policy, new_metric.Update, client=client, callback=b.Callback())", "def report(config, path, metrics, n, include_message=False):\n logger.debug(\"Running report command\")\n logger.info(f\"-----------History for {metrics}------------\")\n\n data = []\n metric_metas = []\n\n for metric in metrics:\n operator, key = metric.split(\".\")\n metric = resolve_metric(metric)\n # Set the delta colors depending on the metric type\n if metric.measure == MetricType.AimHigh:\n good_color = 32\n bad_color = 31\n elif metric.measure == MetricType.AimLow:\n good_color = 31\n bad_color = 32\n elif metric.measure == MetricType.Informational:\n good_color = 33\n bad_color = 33\n metric_meta = {\n \"key\": key,\n \"operator\": operator,\n \"good_color\": good_color,\n \"bad_color\": bad_color,\n \"title\": metric.description,\n \"type\": metric.type,\n }\n metric_metas.append(metric_meta)\n\n state = State(config)\n for archiver in state.archivers:\n # We have to do it backwards to get the deltas between releases\n history = state.index[archiver].revisions[:n][::-1]\n last = {}\n for rev in history:\n vals = []\n for meta in metric_metas:\n try:\n logger.debug(\n f\"Fetching metric {meta['key']} for {meta['operator']} in {path}\"\n )\n val = rev.get(config, archiver, meta[\"operator\"], path, meta[\"key\"])\n\n last_val = last.get(meta[\"key\"], None)\n # Measure the difference between this value and the last\n if meta[\"type\"] in (int, float):\n if last_val:\n delta = val - last_val\n else:\n delta = 0\n last[meta[\"key\"]] = val\n else:\n # TODO : Measure ranking increases/decreases for str types?\n delta = 0\n\n if delta == 0:\n delta_col = delta\n elif delta < 0:\n delta_col = f\"\\u001b[{meta['good_color']}m{delta:n}\\u001b[0m\"\n else:\n delta_col = f\"\\u001b[{meta['bad_color']}m+{delta:n}\\u001b[0m\"\n\n if meta[\"type\"] in (int, float):\n k = f\"{val:n} ({delta_col})\"\n else:\n k = f\"{val}\"\n except KeyError as e:\n k = f\"Not found {e}\"\n vals.append(k)\n if include_message:\n data.append(\n (\n format_revision(rev.revision.key),\n rev.revision.message[:MAX_MESSAGE_WIDTH],\n rev.revision.author_name,\n format_date(rev.revision.date),\n *vals,\n )\n )\n else:\n data.append(\n (\n format_revision(rev.revision.key),\n rev.revision.author_name,\n format_date(rev.revision.date),\n *vals,\n )\n )\n descriptions = [meta[\"title\"] for meta in metric_metas]\n if include_message:\n headers = (\"Revision\", \"Message\", \"Author\", \"Date\", *descriptions)\n else:\n headers = (\"Revision\", \"Author\", \"Date\", *descriptions)\n print(\n # But it still makes more sense to show the newest at the top, so reverse again\n tabulate.tabulate(\n headers=headers, tabular_data=data[::-1], tablefmt=DEFAULT_GRID_STYLE\n )\n )", "def gauge(\n self,\n stat: str,\n value: int | float,\n rate: float = 1,\n delta: bool = False,\n *,\n tags: Attributes = None,\n back_compat_name: str = \"\",\n ) -> None:\n if _skip_due_to_rate(rate):\n return\n\n if back_compat_name and self.metrics_validator.test(back_compat_name):\n self.metrics_map.set_gauge_value(\n full_name(prefix=self.prefix, name=back_compat_name), value, delta, tags\n )\n\n if self.metrics_validator.test(stat):\n self.metrics_map.set_gauge_value(full_name(prefix=self.prefix, name=stat), value, delta, tags)", "def add_gauge_datasource(self, data, feed_id, server_id, resource_id, metric_enum):\n metric_id = self._metric_id_guage_datasource(feed_id=feed_id, server_id=server_id,\n resource_id=resource_id,\n metric_enum=metric_enum)\n self.add_gauge(data=data, metric_id=metric_id)", "def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()", "def __init__(self):\n super().__init__()\n self.metric = 'GTVOL'", "def global_metadata(paths):\n\n # Weakly group images to partition image set size- crucial optimization step\n if os.path.exists(paths.image_preprocess):\n clumped_paths = json.loads(open(paths.image_preprocess).read())\n else:\n clumped_paths = network.alpha_categorize(paths)\n print(\"Hashed source images\")\n\n with open(paths.image_preprocess, 'w') as json_file:\n json.dump(clumped_paths, json_file)\n\n # Combinatorial image grouping to graph\n image_graph = network.load_graph(paths.image_network_path)\n\n total = len(list(chain(*clumped_paths.values())))\n counter = 0.\n\n for image_paths in clumped_paths.values():\n counter += len(image_paths)\n print(str(int(counter / float(total) * 100)) + \"% complete\")\n\n if len(image_paths) > 1:\n image_grouping = images.load_paths(paths.default_patches, image_paths)\n image_graph = metadata.network.network_images(\n image_grouping, threshold=0, network=image_graph)\n else:\n image_graph.add_node(image_paths[0])\n\n metadata.network.save_graph(paths.image_network_path, image_graph)\n print(\"Updated image graph.\")\n\n # Create informational json files for templates and files\n templates.build(paths, image_graph)\n mappings.build(paths, image_graph)\n print(\"Created JSON metadata files.\")", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def report_meta_metrics(stat_path):\n collectd_stats = get_self_stats(stat_path)\n backend_stats = read_vsys_data('backend_stats', _VSYS_FRONTEND_VERSION)\n submit_meta('collectd', collectd_stats)\n submit_meta('backend', backend_stats)", "def list_gauge_definition(self):\n return self._get(path='gauges')", "def _makeGraphs(cnames=(\"cnt\",\"temp\"), finame=\"graf.png\"):\n colors=[\"660000\",\"ff0000\", \"770000\"]\n if len(cnames)==1: finame= cnames[0]+'.png'\n ri= open(\"graph.txt\",\"w\")\n #ri.write(\"graph graf.png --start %d -e %d --step 60 -w 600 \"%\n # (time0, time0+60*60))\n #ri.write(\"graph graf.png -s teatime --step 60 -w 600 \")\n #ri.write(\"graph graf.png -s 17:55 --step 60 -w 600 \") # -10 hours max.\n # time: -s now-10h -s 1:0 -e 4:0\n #ri.write(\"graph graf.png -s now-10h --step 60 -w 600 \")\n ri.write(\"graph \"+finame+\" -s now-2d --step 60 -w 600 \")\n ix=0\n while ix<len(cnames):\n cn=cnames[ix]\n ri.write(\"DEF:%s=%s:%s:AVERAGE \"% (cn, RRDDB, cn))\n ix=ix+1\n ix=0\n while ix<len(cnames):\n cn=cnames[ix]\n ri.write(\"LINE1:%s#%s:%s \"%(cn,colors[ix],cn))\n ix=ix+1\n ri.close()\n os.system(\"rrdtool - <graph.txt\")", "def update_status_metrics(status: EnodebStatus) -> None:\n # Call every second\n metrics_by_stat_key = {\n 'enodeb_connected': metrics.STAT_ENODEB_CONNECTED,\n 'enodeb_configured': metrics.STAT_ENODEB_CONFIGURED,\n 'opstate_enabled': metrics.STAT_OPSTATE_ENABLED,\n 'rf_tx_on': metrics.STAT_RF_TX_ENABLED,\n 'gps_connected': metrics.STAT_GPS_CONNECTED,\n 'ptp_connected': metrics.STAT_PTP_CONNECTED,\n 'mme_connected': metrics.STAT_MME_CONNECTED,\n }\n\n def get_metric_value(enodeb_status, key):\n # Metrics are \"sticky\" when synced to the cloud - if we don't\n # receive a status update from enodeb, set the metric to 0\n # to explicitly indicate that it was not received, otherwise the\n # metrics collector will continue to report the last value\n if key not in enodeb_status:\n return 0\n\n try:\n return int(enodeb_status[key])\n except ValueError:\n logging.error('Could not cast metric value %s to int',\n enodeb_status[key])\n return 0\n\n for stat_key, metric in metrics_by_stat_key.items():\n metric.set(get_metric_value(status, stat_key))", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def put_gauge(self, *_, **__): # pylint: disable=arguments-differ\n pass", "def update_statistics(self):\n if self.ser != 0:\n # reload number of samples lavel\n self.label_samples_value.setText(str(self.graf_t.buffer_info()[1]))\n # reload number of waiting chars in serial rx buffer\n self.label_rx_buff_value.setText(str(self.ser.inWaiting()))\n \n self.label_fps_value.setText(str(self.fps*2))\n \n self.fps = 0\n \n if self.pushButton_monitor.isChecked() == 0:\n self.force_update_graph()\n \n if self.label_Est_value.text() != '(>_<)':\n self.label_Est_value.setText('(>_<)')\n else:\n self.label_Est_value.setText('(o_o)')\n \n self.label_T_value.setText(str(self.listWidget_link.count()))\n \n self.timer_statistics.start(STAT_REFRESH)", "def __init__(self):\n super().__init__()\n self.metric = 'GCOERR'", "def update_link_statistics(self):\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.BUFFEROCCUPANCY\n globals.statistics[key][globals.systime] = self.buffersize", "def create_metric(self) -> EvalMetric:\n pass", "def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})", "def main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dev', required=True)\n parser.add_argument('-w', '--warn', action='append', type=float,\n required=True)\n parser.add_argument('-c', '--crit', action='append', type=float,\n required=True)\n args = parser.parse_args()\n\n # Derive the device type from sysfs\n ssd = dev_is_ssd(args.dev)\n\n # Get the historical and current statistics\n last = get_last(args.dev)\n curr = get_curr(args.dev)\n\n # Save the historical statistics\n set_last(args.dev, curr)\n\n # Handle the first run after startup\n if not last:\n print 'UNKNOWN: history data not available'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Calculate the current latencies for the check period\n read_latency, write_latency = get_latencies(last, curr)\n\n # Select the correct thresholds based on disk type\n try:\n read_crit = args.crit[2] if ssd else args.crit[0]\n write_crit = args.crit[3] if ssd else args.crit[1]\n except IndexError:\n print 'UNKNOWN: SSD detected but no critcal latencies provided'\n sys.exit(NAGIOS_UNKNOWN)\n\n try:\n read_warn = args.warn[2] if ssd else args.warn[0]\n write_warn = args.warn[3] if ssd else args.warn[1]\n except IndexError:\n print 'UNKNOWN: SSD detected but no warning latencies provided'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Calculate the status based on thresholds\n code = NAGIOS_OK\n if read_latency > read_warn or write_latency > write_warn:\n code = NAGIOS_WARNING\n if read_latency > read_crit or write_latency > write_crit:\n code = NAGIOS_CRITICAL\n\n status = ['OK', 'WARNING', 'CRITICAL'][code]\n print ('{0}: read latency {1:.3f}ms, write latency {2:.3f}ms | '\n 'read={1:.3f}ms;{3:.3f};{4:.3f};; '\n 'write={2:.3f}ms;{5:.3f};{6:.3f};;').\\\n format(status, read_latency, write_latency, read_warn, read_crit,\n write_warn, write_crit)\n sys.exit(code)", "def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)", "def __init__(self, name, gauge, window = Amount(1, Time.SECONDS), clock = time):\r\n self._clock = clock\r\n self._gauge = gauge\r\n self._samples = []\r\n self._window = window\r\n NamedGauge.__init__(self, '%s_per_%s%s' % (name, window.amount(), window.unit()))", "def __init__(self, basedir, basename, isnap,\n SO_VEL_DISPERSIONS=False,\n SO_BAR_INFO=False,\n WRITE_SUB_IN_SNAP_FORMAT=False,\n id_bytes=8, float_bytes=4):\n \n # Store file name info\n self.basedir = basedir\n self.basename = basename\n self.isnap = isnap\n\n # Read the group catalogue\n datasets = (\"GroupLen\", \"GroupOffset\", \"SubLen\", \"SubOffset\")\n self.cat = GroupCatalogue(basedir, isnap, datasets,\n SO_VEL_DISPERSIONS, SO_BAR_INFO,\n WRITE_SUB_IN_SNAP_FORMAT,\n id_bytes, float_bytes)\n\n # Store file format info\n self.SO_VEL_DISPERSIONS = SO_VEL_DISPERSIONS\n self.SO_BAR_INFO = SO_BAR_INFO\n self.WRITE_SUB_IN_SNAP_FORMAT = WRITE_SUB_IN_SNAP_FORMAT\n self.id_bytes = id_bytes\n self.float_bytes = float_bytes\n\n # Find number of snapshot files in this snapshot\n snap = self.open_snap_file(0)\n self.num_snap_files = snap[\"Header\"].attrs[\"NumFilesPerSnapshot\"]\n self.npart_file = -np.ones((self.num_snap_files,), dtype=np.int64)\n\n # Find total particle number\n nptot = (snap[\"Header\"].attrs[\"NumPart_Total\"].astype(np.int64) + \n (snap[\"Header\"].attrs[\"NumPart_Total_HighWord\"].astype(np.int64) << 32))", "def HandleFiles(variables):\n\n # The template file is the html file into which we will write the\n # data from the stats file, formatted correctly for the gviz_api.\n template_file = open(variables[1], \"r\")\n page_template = template_file.read()\n template_file.close()\n\n # This is the path match pattern for finding stats files amongst\n # all the other files it could be. eg: *.stt\n file_pattern = variables[2]\n\n # This is the directory with files that we will use to do the comparison\n # against.\n baseline_dir = variables[3]\n snrs = ''\n filestable = {}\n filestable['dsnr'] = ''\n filestable['drate'] = ''\n filestable['avg'] = ''\n\n # Go through each metric in the list.\n for column in range(1,2):\n\n # Dirs is directories after the baseline to compare to the base.\n dirs = variables[4:len(variables)]\n\n # Find the metric files in the baseline directory.\n dir_list = sorted(fnmatch.filter(os.listdir(baseline_dir), file_pattern))\n\n for metric in ['avg','dsnr','drate']:\n description = {\"file\": (\"string\", \"File\")}\n\n # Go through each directory and add a column header to our description.\n countoverall = {}\n sumoverall = {}\n\n for directory in dirs:\n description[directory] = (\"number\", directory)\n countoverall[directory] = 0\n sumoverall[directory] = 0\n\n # Data holds the data for the visualization, name given comes from\n # gviz_api sample code.\n data = []\n for filename in dir_list:\n row = {'file': splitext(basename(filename))[0] }\n baseline_file_name = baseline_dir + \"/\" + filename\n\n # Read the metric file from each of the directories in our list.\n for directory in dirs:\n metric_file_name = directory + \"/\" + filename\n\n # If there is a metric file in the current directory, open it\n # and calculate its overall difference between it and the baseline\n # directory's metric file.\n if os.path.isfile(metric_file_name):\n overall = FileBetter(baseline_file_name, metric_file_name,\n column, metric)\n row[directory] = overall\n\n sumoverall[directory] += overall\n countoverall[directory] += 1\n\n data.append(row)\n\n # Add the overall numbers.\n row = {\"file\": \"OVERALL\" }\n if countoverall[directory]:\n for directory in dirs:\n row[directory] = sumoverall[directory] / countoverall[directory]\n data.append(row)\n\n # write the tables out\n data_table = gviz_api.DataTable(description)\n data_table.LoadData(data)\n\n filestable[metric] = ( filestable[metric] + \"filestable_\" + metric +\n \"[\" + str(column) + \"]=\" + data_table.ToJSon()\n + \"\\n\" )\n\n filestable_avg = filestable['avg']\n filestable_dpsnr = filestable['dsnr']\n filestable_drate = filestable['drate']\n\n # Now we collect all the data for all the graphs. First the column\n # headers which will be Datarate and then each directory.\n columns = (\"datarate\",baseline_dir)\n description = {\"datarate\":(\"number\", \"Datarate\")}\n for directory in dirs:\n description[directory] = (\"number\", directory)\n\n description[baseline_dir] = (\"number\", baseline_dir)\n\n snrs = snrs + \"snrs[\" + str(column) + \"] = [\"\n\n # Now collect the data for the graphs, file by file.\n for filename in dir_list:\n\n data = []\n\n # Collect the file in each directory and store all of its metrics\n # in the associated gviz metrics table.\n all_dirs = dirs + [baseline_dir]\n for directory in all_dirs:\n\n metric_file_name = directory + \"/\" + filename\n if not os.path.isfile(metric_file_name):\n continue\n\n # Read and parse the metrics file storing it to the data we'll\n # use for the gviz_api.Datatable.\n metrics = ParseMetricFile(metric_file_name, column)\n for bitrate, metric in metrics:\n data.append({\"datarate\": bitrate, directory: metric})\n\n data_table = gviz_api.DataTable(description)\n data_table.LoadData(data)\n snrs = snrs + \"'\" + data_table.ToJSon(\n columns_order=tuple([\"datarate\",baseline_dir]+dirs)) + \"',\"\n\n snrs = snrs + \"]\\n\"\n\n formatters = \"\"\n for i in range(len(dirs)):\n formatters = \"%s formatter.format(better, %d);\" % (formatters, i+1)\n\n print FillForm(page_template, vars())\n return", "def _log_file_processing_stats(self, known_file_paths):\n # File Path: Path to the file containing the DAG definition\n # PID: PID associated with the process that's processing the file. May\n # be empty.\n # Runtime: If the process is currently running, how long it's been\n # running for in seconds.\n # Last Runtime: If the process ran before, how long did it take to\n # finish in seconds\n # Last Run: When the file finished processing in the previous run.\n headers = [\"File Path\", \"PID\", \"Runtime\", \"# DAGs\", \"# Errors\", \"Last Runtime\", \"Last Run\"]\n\n rows = []\n now = timezone.utcnow()\n for file_path in known_file_paths:\n last_runtime = self.get_last_runtime(file_path)\n num_dags = self.get_last_dag_count(file_path)\n num_errors = self.get_last_error_count(file_path)\n file_name = os.path.basename(file_path)\n file_name = os.path.splitext(file_name)[0].replace(os.sep, \".\")\n\n processor_pid = self.get_pid(file_path)\n processor_start_time = self.get_start_time(file_path)\n runtime = (now - processor_start_time) if processor_start_time else None\n last_run = self.get_last_finish_time(file_path)\n if last_run:\n seconds_ago = (now - last_run).total_seconds()\n Stats.gauge(f\"dag_processing.last_run.seconds_ago.{file_name}\", seconds_ago)\n\n rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))\n\n # Sort by longest last runtime. (Can't sort None values in python3)\n rows.sort(key=lambda x: x[3] or 0.0)\n\n formatted_rows = []\n for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:\n formatted_rows.append(\n (\n file_path,\n pid,\n f\"{runtime.total_seconds():.2f}s\" if runtime else None,\n num_dags,\n num_errors,\n f\"{last_runtime:.2f}s\" if last_runtime else None,\n last_run.strftime(\"%Y-%m-%dT%H:%M:%S\") if last_run else None,\n )\n )\n log_str = (\n \"\\n\"\n + \"=\" * 80\n + \"\\n\"\n + \"DAG File Processing Stats\\n\\n\"\n + tabulate(formatted_rows, headers=headers)\n + \"\\n\"\n + \"=\" * 80\n )\n\n self.log.info(log_str)", "def mgf(trange=['2017-03-27', '2017-03-28'],\n datatype='8sec',\n level='l2',\n suffix='',\n get_support_data=False,\n varformat=None,\n varnames=[],\n downloadonly=False,\n notplot=False,\n no_update=False,\n uname=None,\n passwd=None,\n time_clip=False,\n ror=True,\n coord='dsi',\n version=None):\n initial_notplot_flag = False\n if notplot:\n initial_notplot_flag = True\n\n if datatype == '8s' or datatype == '8':\n datatype = '8sec'\n elif datatype == '64':\n datatype = '64hz'\n elif datatype == '128':\n datatype = '128hz'\n elif datatype == '256':\n datatype = '256hz'\n\n prefix = 'erg_mgf_'+level+'_'\n if datatype == '8sec':\n file_res = 3600. * 24\n pathformat = 'satellite/erg/mgf/'+level+'/'+datatype + \\\n '/%Y/%m/erg_mgf_'+level+'_'+datatype+'_%Y%m%d_'\n else:\n file_res = 3600.\n pathformat = 'satellite/erg/mgf/'+level+'/'+datatype + \\\n '/%Y/%m/erg_mgf_'+level+'_'+datatype+'_' + coord + '_%Y%m%d%H_'\n\n if version is None:\n pathformat += 'v??.??.cdf'\n else:\n pathformat += version + '.cdf'\n\n loaded_data = load(pathformat=pathformat, file_res=file_res, trange=trange, level=level, datatype=datatype, prefix=prefix, suffix=suffix, get_support_data=get_support_data,\n varformat=varformat, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update, uname=uname, passwd=passwd)\n\n if (loaded_data is None) or (loaded_data == []):\n return loaded_data\n\n if (len(loaded_data) > 0) and ror:\n\n try:\n if isinstance(loaded_data, list):\n if downloadonly:\n cdf_file = cdflib.CDF(loaded_data[-1])\n gatt = cdf_file.globalattsget()\n else:\n gatt = get_data(loaded_data[-1], metadata=True)['CDF']['GATT']\n elif isinstance(loaded_data, dict):\n gatt = loaded_data[list(loaded_data.keys())[-1]]['CDF']['GATT']\n\n # --- print PI info and rules of the road\n print(' ')\n print(\n '**************************************************************************')\n print(gatt[\"LOGICAL_SOURCE_DESCRIPTION\"])\n print('')\n print('Information about ERG MGF')\n print('')\n print('PI: ', gatt['PI_NAME'])\n print(\"Affiliation: \"+gatt[\"PI_AFFILIATION\"])\n print('')\n print('RoR of ERG project common: https://ergsc.isee.nagoya-u.ac.jp/data_info/rules_of_the_road.shtml.en')\n print(\n 'RoR of MGF L2: https://ergsc.isee.nagoya-u.ac.jp/mw/index.php/ErgSat/Mgf')\n print('Contact: erg_mgf_info at isee.nagoya-u.ac.jp')\n print(\n '**************************************************************************')\n except:\n print('printing PI info and rules of the road was failed')\n\n if initial_notplot_flag or downloadonly:\n return loaded_data\n\n if datatype == '8sec':\n\n # remove -1.0e+30\n\n clip(prefix + 'mag_'+datatype+'_dsi'+suffix, -1e+6, 1e6)\n clip(prefix + 'mag_'+datatype+'_gse'+suffix, -1e+6, 1e6)\n clip(prefix + 'mag_'+datatype+'_gsm'+suffix, -1e+6, 1e6)\n clip(prefix + 'mag_'+datatype+'_sm'+suffix, -1e+6, 1e6)\n\n clip(prefix + 'magt_'+datatype+suffix, -1e+6, 1e6)\n\n clip(prefix + 'rmsd_'+datatype+'_dsi'+suffix, -1e+6, +1e+6)\n clip(prefix + 'rmsd_'+datatype+'_gse'+suffix, -1e+6, +1e+6)\n clip(prefix + 'rmsd_'+datatype+'_gsm'+suffix, -1e+6, +1e+6)\n clip(prefix + 'rmsd_'+datatype+'_sm'+suffix, -1e+6, +1e+6)\n\n clip(prefix + 'rmsd_'+datatype+suffix, 0., 80.)\n\n clip(prefix + 'dyn_rng_'+datatype+suffix, -120., +1e+6)\n\n clip(prefix + 'igrf_'+datatype+'_dsi'+suffix, -1e+6, +1e+6)\n clip(prefix + 'igrf_'+datatype+'_gse'+suffix, -1e+6, +1e+6)\n clip(prefix + 'igrf_'+datatype+'_gsm'+suffix, -1e+6, +1e+6)\n clip(prefix + 'igrf_'+datatype+'_sm'+suffix, -1e+6, +1e+6)\n\n # set yrange\n _, bdata = get_data(prefix + 'mag_'+datatype+'_dsi'+suffix)\n ylim(prefix + 'mag_'+datatype+'_dsi'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n _, bdata = get_data(prefix + 'mag_'+datatype+'_gse'+suffix)\n ylim(prefix + 'mag_'+datatype+'_gse'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n _, bdata = get_data(prefix + 'mag_'+datatype+'_gsm'+suffix)\n ylim(prefix + 'mag_'+datatype+'_gsm'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n _, bdata = get_data(prefix + 'mag_'+datatype+'_sm'+suffix)\n ylim(prefix + 'mag_'+datatype+'_sm'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n\n _, bdata = get_data(prefix + 'magt_'+datatype+suffix)\n ylim(prefix + 'magt_'+datatype+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n\n _, bdata = get_data(prefix + 'rmsd_'+datatype+suffix,)\n ylim(prefix + 'rmsd_'+datatype+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n\n _, bdata = get_data(prefix + 'rmsd_'+datatype+'_dsi'+suffix)\n ylim(prefix + 'rmsd_'+datatype+'_dsi'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n _, bdata = get_data(prefix + 'rmsd_'+datatype+'_gse'+suffix)\n ylim(prefix + 'rmsd_'+datatype+'_gse'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n _, bdata = get_data(prefix + 'rmsd_'+datatype+'_gsm'+suffix)\n ylim(prefix + 'rmsd_'+datatype+'_gsm'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n _, bdata = get_data(prefix + 'rmsd_'+datatype+'_sm'+suffix)\n ylim(prefix + 'rmsd_'+datatype+'_sm'+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n\n _, bdata = get_data(prefix + 'rmsd_'+datatype+suffix)\n ylim(prefix + 'rmsd_'+datatype+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n\n _, bdata = get_data(prefix + 'quality_'+datatype+suffix)\n ylim(prefix + 'quality_'+datatype+suffix,\n np.nanmin(bdata), np.nanmax(bdata))\n _, bdata = get_data(prefix + 'quality_'+datatype+'_gc'+suffix)\n ylim(prefix + 'quality_'+datatype+'_gc' +\n suffix, np.nanmin(bdata), np.nanmax(bdata))\n\n # set labels\n options(prefix + 'mag_'+datatype+'_dsi'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'mag_'+datatype+'_gse'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'mag_'+datatype+'_gsm'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'mag_'+datatype+'_sm'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n\n options(prefix + 'rmsd_'+datatype+'_dsi'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'rmsd_'+datatype+'_gse'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'rmsd_'+datatype+'_gsm'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'rmsd_'+datatype+'_sm'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n\n options(prefix + 'igrf_'+datatype+'_dsi'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'igrf_'+datatype+'_gse'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'igrf_'+datatype+'_gsm'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n options(prefix + 'igrf_'+datatype+'_sm'+suffix,\n 'legend_names', ['Bx', 'By', 'Bz'])\n\n # set color of the labels\n options(prefix + 'mag_'+datatype+'_dsi' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'mag_'+datatype+'_gse' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'mag_'+datatype+'_gsm' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'mag_'+datatype+'_sm' +\n suffix, 'Color', ['b', 'g', 'r'])\n\n options(prefix + 'rmsd_'+datatype+'_dsi' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'rmsd_'+datatype+'_gse' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'rmsd_'+datatype+'_gsm' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'rmsd_'+datatype+'_sm' +\n suffix, 'Color', ['b', 'g', 'r'])\n\n options(prefix + 'quality_'+datatype+suffix, 'Color', ['r', 'g', 'b'])\n\n options(prefix + 'igrf_'+datatype+'_dsi' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'igrf_'+datatype+'_gse' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'igrf_'+datatype+'_gsm' +\n suffix, 'Color', ['b', 'g', 'r'])\n options(prefix + 'igrf_'+datatype+'_sm' +\n suffix, 'Color', ['b', 'g', 'r'])\n else:\n # remove -1.0e+30\n clip(prefix + 'mag_'+datatype+'_' + coord + suffix, -1e+6, 1e6)\n # set yrange\n _, bdata = get_data(prefix + 'mag_'+datatype+'_' + coord + suffix)\n ylim(prefix + 'mag_'+datatype+'_' + coord +\n suffix, np.nanmin(bdata), np.nanmax(bdata))\n # set labels\n options(prefix + 'mag_'+datatype+'_' + coord +\n suffix, 'legend_names', ['Bx', 'By', 'Bz'])\n # set color of the labels\n options(prefix + 'mag_'+datatype+'_' + coord +\n suffix, 'Color', ['b', 'g', 'r'])\n return loaded_data", "def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def metrics_group():", "def draw_all_labels(df,root_folder_path,root_folder_name,logger):\n # df=df[df.type==\"Color\"]\n len_images=df['file_id'].nunique()\n perc_list=[i*0.05 for i in range(0,20,1)]\n grouped_df=df.groupby(['file_id','class'])\n coordinate_names=['x_max','x_min','y_max','y_min']\n group_len=len(grouped_df)\n\n class_label_dict={}\n label_info_list=[]\n for ind,(name, group) in enumerate(grouped_df):\n img_name,class_name=name\n img_type=group['type'].values[0]\n bb_list=group[coordinate_names].values.astype(int)\n if class_name not in class_label_dict.keys():\n class_label_dict[class_name]=get_random_color()\n bb_color=class_label_dict[class_name]\n label_info_list.append([img_name,img_type,class_name,bb_color,bb_list])\n draw_label_on_image(root_folder_path,root_folder_name,img_name,img_type,class_name,bb_color,bb_list)\n perc=float(\"{:.2f}\".format((ind+1)/group_len))\n if perc in perc_list:\n perc_list.remove(perc)\n logger.write(\"Classes annotated: \"+str(ind+1)+\"/\"+str(group_len))\n # print(\"Label list generated.\")\n # pool = Pool(1)\n # pool.starmap(draw_label_on_one_image, zip(\n # label_info_list, itertools.repeat(root_folder_path), itertools.repeat(root_folder_name)))\n # pool.close()\n # pool.join()\n # print(\"Drawing labels is finished.\")", "def define_metrics(config):\n metrics = []\n if config.get(\"data.output.label.choice\") == \"segmentation\":\n metrics = [\n ext_sm.metrics.IOUScore(),\n ext_sm.metrics.FScore(beta=0.5),\n ext_sm.metrics.FScore(beta=2),\n ]\n metrics = []\n elif config.get(\"data.output.label.choice\") == \"inversion\":\n metrics = [\n rmae\n ]\n return metrics", "def monitorInterface(self, interface, prefix, freq):\n\n queuedRegex = re.compile(r'backlog\\s[^\\s]+\\s([\\d]+)p')\n droppedRegex = re.compile(r'dropped\\s([\\d]+),')\n intervalSec = freq\n cmd = \"tc -s qdisc show dev %s\" % (interface)\n fname = os.path.join(self.config.benchPath, '%s_switch_stats_%s.csv' %(prefix, interface))\n open(fname, 'w').write('timestamp,queued_packets,cumulative_dropped_packets\\n')\n info(\"**** [G2]: monitoring stats for\", interface, \"; will save results to\", fname, \"\\n\")\n while 1:\n p = Popen(cmd, shell=True, stdout=PIPE)\n output = p.stdout.read()\n matches1 = queuedRegex.findall(output)\n matches2 = droppedRegex.findall(output)\n if matches1 and matches2 and len(matches1) > 1 and len(matches2) > 1:\n t = \"%f\" %time()\n open(fname, 'a').write(t + ',' + matches1[1] + ',' + matches2[1] + '\\n')\n p.terminate()\n sleep(intervalSec)\n return", "def compute_metrics_on_directories_raw(dir_gt, dir_pred):\n\n lst_gt = sorted(glob(os.path.join(dir_gt, '*')), key=natural_order)\n lst_pred = sorted(glob(os.path.join(dir_pred, '*')), key=natural_order)\n\n res = []\n cardiac_phase = []\n file_names = []\n\n measure_names = ['Dice LV', 'Volume LV', 'Err LV(ml)',\n 'Dice RV', 'Volume RV', 'Err RV(ml)', 'Dice MYO', 'Volume MYO', 'Err MYO(ml)',\n 'Hausdorff LV', 'Hausdorff RV', 'Hausdorff Myo',\n 'ASSD LV', 'ASSD RV', 'ASSD Myo']\n\n res_mat = np.zeros((len(lst_gt), len(measure_names)))\n\n ind = 0\n for p_gt, p_pred in zip(lst_gt, lst_pred):\n if os.path.basename(p_gt) != os.path.basename(p_pred):\n raise ValueError(\"The two files don't have the same name\"\n \" {}, {}.\".format(os.path.basename(p_gt),\n os.path.basename(p_pred)))\n\n\n gt, _, header = load_nii(p_gt)\n pred, _, _ = load_nii(p_pred)\n zooms = header.get_zooms()\n res.append(metrics(gt, pred, zooms))\n cardiac_phase.append(os.path.basename(p_gt).split('.nii.gz')[0].split('_')[-1])\n\n file_names.append(os.path.basename(p_pred))\n\n res_mat[ind, :9] = metrics(gt, pred, zooms)\n\n for ii, struc in enumerate([3,1,2]):\n\n gt_binary = (gt == struc) * 1\n pred_binary = (pred == struc) * 1\n\n res_mat[ind, 9+ii] = hd(gt_binary, pred_binary, voxelspacing=zooms, connectivity=1)\n res_mat[ind, 12+ii] = assd(pred_binary, gt_binary, voxelspacing=zooms, connectivity=1)\n\n ind += 1\n\n return res_mat, cardiac_phase, measure_names, file_names", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def gauge_v6(self, name, value, tags=None, hostname=None, device_name=None):\n # Make sure we get the original arguments back and timestamp is not being received\n assert name == METRIC_NAME\n assert value == METRIC_VALUE\n assert tags == METRIC_TAGS\n assert hostname is None\n assert device_name is None", "def new_opfiles_info(self):\r\n conf = self.func.config_info()\r\n new_folders = self.func.get_folders(conf[\"path_to_new_opfiles\"])\r\n \r\n if len(new_folders) > 1 and len(new_folders) != 0:\r\n raise Exception(\"Only one folder must be in '0 NEW' folder!\")\r\n else:\r\n folder = new_folders[0] \r\n op, ac = self.operator_aircraft_info(folder)\r\n files = self.func.get_files(folder)\r\n files_info = self.get_files_info(files)\r\n\r\n opfiles_info = {\r\n \"Path\": folder,\r\n \"Operator\": op,\r\n \"Aircraft\": ac,\r\n \"FilesInfo\": files_info\r\n }\r\n\r\n return opfiles_info", "def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result", "def __place_statistics_labels(self):\n\n base_x = self.__statistics_coords[\"x\"]\n base_y = self.__statistics_coords[\"y\"]\n active_lines_label = Label(self.__main_window, textvariable=self.__active_lines_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_buses_label = Label(self.__main_window, textvariable=self.__active_buses_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n number_of_people_lable = Label(self.__main_window, textvariable=self.__number_of_people_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 18))\n session_time_lable = Label(self.__main_window, textvariable=self.__session_time_stringvar, fg=\"#1DB954\", bg = \"#000000\", font = (self.__font_name, 23))\n number_of_people_lable.place(x=base_x, y=base_y)\n active_lines_label.place(x=base_x-35, y=base_y + 35)\n number_of_buses_label.place(x=base_x+54, y=base_y + 69)\n session_time_lable.place(x=base_x-70, y=base_y + 116)", "def graph_create(host, host_path):\n graphs = list()\n for name in dash_profile['graphs']:\n log.info(\" Graph: %s\" % name)\n graph = list()\n # Skip undefined graphs\n if name not in graphdef.keys():\n log.error(\"%s not found in graphdef.yml\" % name)\n continue\n # Graph Type #1: Host Metrics\n # Identified by filesytem globbing\n elif 'glob_verify' in graphdef[name].keys():\n # Determine and test metric paths\n if 'glob_metrics' in graphdef[name].keys():\n glob_metrics = graphdef[name]['glob_metrics']\n metric_verify = True\n else:\n glob_metrics = graphdef[name]['glob_verify']\n metric_verify = False\n metric_glob = \"%s/%s\" % (host_path, glob_metrics)\n metric_paths = glob.glob(metric_glob)\n if len(metric_paths) <= 0:\n continue\n metric_paths.sort()\n for metric_path in metric_paths:\n graph_object = dict(graphdef[name])\n # Verify metric path\n if metric_verify:\n verify_glob = \"%s/%s\" % (metric_path,\n graphdef[name]['glob_verify'])\n del(graph_object['glob_metrics'])\n else:\n verify_glob = metric_path\n if len(glob.glob(verify_glob)) != 1:\n continue\n del(graph_object['glob_verify'])\n metric = os.path.basename(metric_path)\n log.debug(\" metric: %s\" % metric)\n graph = graph_compile(host, name, graph_object, metric)\n if len(graph) > 0:\n graphs.append(graph)\n # Graph Type #2: Carbon Match\n # Metrics reported directly by carbon server to itself\n elif ('carbon_match' in graphdef[name].keys() and\n graphdef[name]['carbon_match'] and\n host == dashconf['carbon_match']):\n graph_object = dict(graphdef[name])\n del graph_object['carbon_match']\n graph = graph_compile(dashconf['carbon_server'], name,\n graph_object, None)\n if len(graph) > 0:\n graphs.append(graph)\n return graphs", "def register(self, gauge):\r\n raise NotImplementedError", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def check_oplog(con, warning, critical, perf_data):\n warning = warning or 24 \n critical = critical or 4\n try:\n db = con.local\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) \n if (db.system.namespaces.find_one({\"name\":\"local.oplog.rs\"}) != None) :\n oplog = \"oplog.rs\";\n else :\n ol=db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"})\n if (db.system.namespaces.find_one({\"name\":\"local.oplog.$main\"}) != None) :\n oplog = \"oplog.$main\";\n else :\n message = \"neither master/slave nor replica set replication detected\";\n return check_levels(None,warning,critical,message)\n\n try:\n set_read_preference(con.admin)\n data=con.local.command(pymongo.son_manipulator.SON([('collstats',oplog)]))\n except:\n data = con.admin.command(son.SON([('collstats',oplog)]))\n\n ol_size=data['size']\n ol_storage_size=data['storageSize']\n ol_used_storage=int(float(ol_size)/ol_storage_size*100+1)\n ol=con.local[oplog]\n firstc = ol.find().sort(\"$natural\",pymongo.ASCENDING).limit(1)[0]['ts']\n lastc = ol.find().sort(\"$natural\",pymongo.DESCENDING).limit(1)[0]['ts']\n time_in_oplog= (lastc.as_datetime()-firstc.as_datetime())\n message=\"Oplog saves \"+ str(time_in_oplog) + \" %d%% used\" %ol_used_storage \n try: #work starting from python2.7\n hours_in_oplog= time_in_oplog.total_seconds()/60/60\n except:\n hours_in_oplog= float(time_in_oplog.seconds + time_in_oplog.days * 24 * 3600)/60/60\n approx_level=hours_in_oplog*100/ol_used_storage\n message+=performance_data(perf_data,[(\"%.2f\" % hours_in_oplog,'oplog_time',warning,critical),(\"%.2f \" % approx_level, 'oplog_time_100_percent_used')])\n return check_levels(-approx_level,-warning,-critical,message)\n\n except Exception, e:\n return exit_with_general_critical(e)", "def get_path_and_label(self):\n classes = {'agricultural': 0,\n 'airplane': 1,\n 'baseballdiamond': 2,\n 'beach': 3,\n 'buildings': 4,\n 'chaparral': 5,\n 'denseresidential': 6,\n 'forest': 7,\n 'freeway': 8,\n 'golfcourse': 9,\n 'harbor': 10,\n 'intersection': 11,\n 'mediumresidential': 12,\n 'mobilehomepark': 13,\n 'overpass': 14,\n 'parkinglot': 15,\n 'river': 16,\n 'runway': 17,\n 'sparseresidential': 18,\n 'storagetanks': 19,\n 'tenniscourt': 20}\n image_path = []\n label = []\n for cat, enc in classes.items():\n cat_path = os.path.join(\n self.root, 'UCMerced_LandUse', self.data_mode, cat)\n cat_image = [os.path.join(cat_path, path)\n for path in os.listdir(cat_path)]\n cat_label = [enc] * len(cat_image)\n image_path += cat_image\n label += cat_label\n df = pd.DataFrame({'image': image_path, 'label': label})\n\n return df", "def report_connection(self, file_name):\n conn = self.connection_data['connection']\n notconn = 1 - self.connection_data['connection']\n x = self.connection_data.index\n\n # Calculate the duration\n duration = self.connection_data['duration'].sum()\n # Then prepare it to be printed.\n duration_str = f'{int(duration // 3600):02d}:{int(np.rint(duration % 3600 / 60)):02d}'\n\n # Calculate the percentage of time online.\n online = self.connection_data[self.connection_data['connection'] == True]['duration'].sum() \\\n / self.connection_data['duration'].sum()\n\n # color maps to plots\n div = plt.get_cmap('winter')\n\n # plot time series\n fig, ax = plt.subplots(figsize=(8, 2))\n ax.bar(x, conn, width=1, color=div(1.0))\n ax.bar(x, notconn, width=1, color=div(0.0))\n\n # hide borders\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n # hide ticks and x values\n ax.tick_params(axis='x', bottom=False)\n ax.tick_params(axis='y', left=False)\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Set y axes limits because the max is 1. So the bars will only occupy half the figure. We can then use the\n # other half to put the annotations on.\n ax.set_ylim(0, 2)\n\n # sets legend\n ax.legend(['Online', 'Offline'], loc='upper left')\n\n # Test duration\n fig.text(x=0.5, y=0.75, s='Test Duration', ha='center', va='bottom')\n fig.text(x=0.5, y=0.75, s=duration_str, ha='center', va='top', color=div(duration / 3600), size='xx-large')\n\n # Online time\n fig.text(x=0.85, y=0.75, s='Online Time', ha='center', va='bottom')\n fig.text(x=0.85, y=0.75, s=f'{int(online * 100)} %', ha='center', va='top', color=div(online), size='xx-large')\n\n # Save figure\n plt.savefig(file_name, bbox_inches='tight')\n\n return fig", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def visualize_measurement_on_labels(labels_layer:\"napari.layers.Labels\", column:str = \"label\", viewer:\"napari.Viewer\" = None) -> \"napari.types.ImageData\":\n import pandas as pd\n import dask.array as da\n from dask import delayed\n from functools import partial\n from napari.utils import notifications\n if viewer is not None:\n notifications.show_warning(\"This function is deprecated! To adhere to future behavior and suppress this warning, use 'map_measurements_on_labels' instead (from 'Tools -> Measurement maps -> Measurements on labels (nsr)'.\")\n labels = labels_layer.data\n table = pd.DataFrame(labels_layer.properties)\n\n # special treatment for time series\n if len(labels.shape) == 4:\n # determine how the Frame column is called; in case there is any\n frame_column = None\n for potential_frame_column in ['frame', 'Frame']:\n if potential_frame_column in table.keys():\n frame_column = potential_frame_column\n break\n\n # Relabel one timepoint\n output_sample = relabel_timepoint(labels, table, column, frame_column, 0)\n\n lazy_arrays = []\n for i in range(labels.shape[0]):\n # build a delayed function call for each timepoint\n lazy_processed_image = delayed(\n partial(relabel_timepoint, labels, table, column, frame_column, i)\n )\n lazy_arrays.append(\n lazy_processed_image()\n )\n\n # build an array of delayed arrays\n dask_arrays = [\n [da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype)]\n if len(output_sample.shape) == 2\n else da.from_delayed(\n delayed_reader,\n shape=output_sample.shape,\n dtype=output_sample.dtype\n )\n for delayed_reader in lazy_arrays\n ]\n # Stack into one large dask.array\n stack = da.stack(\n dask_arrays,\n axis=0)\n return stack\n else:\n measurements = np.asarray(table[column]).tolist()\n return relabel(labels, measurements)", "def _create_pd_file_example():\n objects = metrics_pb2.Objects()\n\n # Store all the object classes as a list\n # Store all sequence names from val.txt\n classes = [\"CYCLIST\", \"PEDESTRIAN\", \"SIGN\", \"VEHICLE\"]\n val_seq_list = list()\n val_path = \"/team1/codes/3dObjDet/OpenPCDet_ravi/data/waymo/ImageSets/val_100b.txt\"\n with open(val_path, \"r\") as f:\n val_seq_list = f.readlines()\n val_seq_list = [v.split(\".\")[0] for v in val_seq_list]\n\n # this is the input to the algo\n tracking_output_base_pth = \"/team1/codes/individual/vkonduru/AB3DMOT/results/waymo_100_25/\"\n # print(val_seq_list)\n \n # Loop through each sequence waymo_25_5_val/VEHICLE/trk_withid/{$SEGMENT}\n for seq in val_seq_list:\n # obtain the groundtruth sequence context name, timestamp and camera\n gt_seq_file = \"/waymo-od/training/{}.tfrecord\".format(seq)\n if not os.path.exists(gt_seq_file):\n gt_seq_file = \"/waymo-od/validation/{}.tfrecord\".format(seq)\n dataset = tf.data.TFRecordDataset(str(gt_seq_file), compression_type='')\n tot = 0\n names = list()\n tss = list()\n cams = list()\n for cnt, data in enumerate(dataset):\n tot += 1\n # if cnt > 2:\n # break\n frame = dataset_pb2.Frame()\n frame.ParseFromString(bytearray(data.numpy()))\n names.append(frame.context.name)\n cams.append(frame.camera_labels[0].name)\n if tot > 1:\n assert(tss[-1] <= frame.timestamp_micros)\n tss.append(frame.timestamp_micros)\n # print(\"Total Entries: {}\".format(tot))\n # Loop through each class\n for cl in classes:\n cl_pth = tracking_output_base_pth + cl + \"/trk_withid/\"\n if not os.path.isdir(cl_pth):\n print(\"Outputs for the class {} are not present in this folder\".format(cl))\n continue\n # loop through all frames in each segment\n \n seq_pth = cl_pth + seq\n seq_dir = os.fsencode(seq_pth)\n idx = 0\n for fi in os.listdir(seq_dir):\n frame_no_txt = os.fsdecode(fi)\n fid = int(frame_no_txt.split(\".\")[0])\n #print(fid, idx)\n #assert(fid == idx)\n objs = np.loadtxt(seq_pth + \"/\" + frame_no_txt, dtype=str)\n objs = objs.reshape(-1, 17)\n print(\"Processing class: {}; sequence: {}; file {}\".format(cl, seq, frame_no_txt))\n nobj = objs.shape[0]\n # loop through all objects in given frame\n for i in range(nobj):\n #print(i, nobj, objs.shape)\n curr_obj = objs[i, :]\n o = metrics_pb2.Object()\n # The following 3 fields are used to uniquely identify a frame a prediction\n # is predicted at. Make sure you set them to values exactly the same as what\n # we provided in the raw data. Otherwise your prediction is considered as a\n # false negative.\n #o.context_name = ('context_name for the prediction. See Frame::context::name '\n # 'in dataset.proto.')\n o.context_name = names[fid]\n # The frame timestamp for the prediction. See Frame::timestamp_micros in\n # dataset.proto.\n invalid_ts = -1\n # o.frame_timestamp_micros = invalid_ts\n o.frame_timestamp_micros = tss[fid]\n # This is only needed for 2D detection or tracking tasks.\n # Set it to the camera name the prediction is for.\n # o.camera_name = dataset_pb2.CameraName.FRONT\n o.camera_name = cams[fid]\n if dataset_pb2.CameraName.FRONT != cams[idx]:\n print(\"Different camera!!\")\n # extract x, y, z, l, w, h, ry, score, object_id, type\n # OLD: x - 11, y - 12, z - 13, l - 10, w - 9, h - 8, ry - 14\n # NEW: x- 13, y- -11, z- -12+9/2, l- 9, w- 10, h- 8, ry- -14\n # Populating box and score.\n box = label_pb2.Label.Box()\n box.center_x = float(curr_obj[13])\n box.center_y = -1.0*float(curr_obj[11])\n box.center_z = -1.0*float(curr_obj[12]) + float(curr_obj[8])/2.0\n box.length = float(curr_obj[9])\n box.width = float(curr_obj[10])\n box.height = float(curr_obj[8])\n box.heading = -1.0*float(curr_obj[14])\n o.object.box.CopyFrom(box)\n # This must be within [0.0, 1.0]. It is better to filter those boxes with\n # small scores to speed up metrics computation.\n o.score = float(curr_obj[15])\n # For tracking, this must be set and it must be unique for each tracked\n # sequence.\n o.object.id = curr_obj[16]\n # Use correct type.\n \"\"\"\n enum Type {\n TYPE_UNKNOWN = 0;\n TYPE_VEHICLE = 1;\n TYPE_PEDESTRIAN = 2;\n TYPE_SIGN = 3;\n TYPE_CYCLIST = 4;\n }\n \"\"\"\n # o.object.type = label_pb2.Label.TYPE_PEDESTRIAN\n if cl == \"CYCLIST\":\n o.object.type = label_pb2.Label.TYPE_CYCLIST\n elif cl == \"VEHICLE\":\n o.object.type = label_pb2.Label.TYPE_VEHICLE\n elif cl == \"SIGN\":\n o.object.type = label_pb2.Label.TYPE_SIGN\n elif cl == \"PEDESTRIAN\":\n o.object.type = label_pb2.Label.TYPE_PEDESTRIAN\n else:\n o.object.type = label_pb2.Label.TYPE_UNKNOWN\n\n objects.objects.append(o)\n # increase the frame index now\n idx += 1\n #print(idx, tot)\n #assert(idx == tot)\n\n # Add more objects. Note that a reasonable detector should limit its maximum\n # number of boxes predicted per frame. A reasonable value is around 400. A\n # huge number of boxes can slow down metrics computation.\n\n # file to save the preds.bin to\n save_pth = \"/team1/codes/3dObjDet/OpenPCDet_ravi/output/tracking_bins/waymo_100_25/preds.bin\"\n # Write objects to a file.\n f = open(save_pth, 'wb')\n f.write(objects.SerializeToString())\n f.close()", "def radiator(env):\n envs = environments()\n check_env(env, envs)\n\n if env == '*':\n query_type = ''\n if get_db_version(puppetdb) < (4, 0, 0):\n query_type = 'type=default,'\n query = None\n metrics = get_or_abort(\n puppetdb.metric,\n 'puppetlabs.puppetdb.population:%sname=num-nodes' % query_type)\n num_nodes = metrics['Value']\n else:\n query = AndOperator()\n metric_query = ExtractOperator()\n\n query.add(EqualsOperator(\"catalog_environment\", env))\n query.add(EqualsOperator(\"facts_environment\", env))\n metric_query.add_field(FunctionOperator('count'))\n metric_query.add_query(query)\n\n metrics = get_or_abort(\n puppetdb._query,\n 'nodes',\n query=metric_query)\n num_nodes = metrics[0]['count']\n\n nodes = puppetdb.nodes(\n query=query,\n unreported=app.config['UNRESPONSIVE_HOURS'],\n with_status=True\n )\n\n stats = {\n 'changed_percent': 0,\n 'changed': 0,\n 'failed_percent': 0,\n 'failed': 0,\n 'noop_percent': 0,\n 'noop': 0,\n 'skipped_percent': 0,\n 'skipped': 0,\n 'unchanged_percent': 0,\n 'unchanged': 0,\n 'unreported_percent': 0,\n 'unreported': 0,\n }\n\n for node in nodes:\n if node.status == 'unreported':\n stats['unreported'] += 1\n elif node.status == 'changed':\n stats['changed'] += 1\n elif node.status == 'failed':\n stats['failed'] += 1\n elif node.status == 'noop':\n stats['noop'] += 1\n elif node.status == 'skipped':\n stats['skipped'] += 1\n else:\n stats['unchanged'] += 1\n\n try:\n stats['changed_percent'] = int(100 * (stats['changed'] /\n float(num_nodes)))\n stats['failed_percent'] = int(100 * stats['failed'] / float(num_nodes))\n stats['noop_percent'] = int(100 * stats['noop'] / float(num_nodes))\n stats['skipped_percent'] = int(100 * (stats['skipped'] /\n float(num_nodes)))\n stats['unchanged_percent'] = int(100 * (stats['unchanged'] /\n float(num_nodes)))\n stats['unreported_percent'] = int(100 * (stats['unreported'] /\n float(num_nodes)))\n except ZeroDivisionError:\n stats['changed_percent'] = 0\n stats['failed_percent'] = 0\n stats['noop_percent'] = 0\n stats['skipped_percent'] = 0\n stats['unchanged_percent'] = 0\n stats['unreported_percent'] = 0\n\n if ('Accept' in request.headers and\n request.headers[\"Accept\"] == 'application/json'):\n return jsonify(**stats)\n\n return render_template(\n 'radiator.html',\n stats=stats,\n total=num_nodes\n )", "def generate_gantt_chart(\n logfile,\n cores,\n minute_scale=10,\n space_between_minutes=50,\n colors=[\"#7070FF\", \"#4E4EB2\", \"#2D2D66\", \"#9B9BFF\"],\n):\n\n # add the html header\n html_string = \"\"\"<!DOCTYPE html>\n <head>\n <style>\n #content{\n width:99%;\n height:100%;\n position:absolute;\n }\n\n .node{\n background-color:#7070FF;\n border-radius: 5px;\n position:absolute;\n width:20px;\n white-space:pre-wrap;\n }\n\n .line{\n position: absolute;\n color: #C2C2C2;\n opacity: 0.5;\n margin: 0px;\n }\n\n .time{\n position: absolute;\n font-size: 16px;\n color: #666666;\n margin: 0px;\n }\n\n .bar{\n position: absolute;\n height: 1px;\n opacity: 0.7;\n }\n\n .dot{\n position: absolute;\n width: 1px;\n height: 1px;\n background-color: red;\n }\n .label {\n width:20px;\n height:20px;\n opacity: 0.7;\n display: inline-block;\n }\n </style>\n </head>\n\n <body>\n <div id=\"content\">\n <div style=\"display:inline-block;\">\n \"\"\"\n\n close_header = \"\"\"\n </div>\n <div style=\"display:inline-block;margin-left:60px;vertical-align: top;\">\n <p><span><div class=\"label\" style=\"background-color:#90BBD7;\"></div> Estimated Resource</span></p>\n <p><span><div class=\"label\" style=\"background-color:#03969D;\"></div> Actual Resource</span></p>\n <p><span><div class=\"label\" style=\"background-color:#f00;\"></div> Failed Node</span></p>\n </div>\n \"\"\"\n\n # Read in json-log to get list of node dicts\n nodes_list = log_to_dict(logfile)\n\n # Create the header of the report with useful information\n start_node = nodes_list[0]\n last_node = nodes_list[-1]\n duration = (last_node[\"finish\"] - start_node[\"start\"]).total_seconds()\n\n # Get events based dictionary of node run stats\n events = create_event_dict(start_node[\"start\"], nodes_list)\n\n # Summary strings of workflow at top\n html_string += (\n \"<p>Start: \" + start_node[\"start\"].strftime(\"%Y-%m-%d %H:%M:%S\") + \"</p>\"\n )\n html_string += (\n \"<p>Finish: \" + last_node[\"finish\"].strftime(\"%Y-%m-%d %H:%M:%S\") + \"</p>\"\n )\n html_string += \"<p>Duration: \" + f\"{duration / 60:.2f}\" + \" minutes</p>\"\n html_string += \"<p>Nodes: \" + str(len(nodes_list)) + \"</p>\"\n html_string += \"<p>Cores: \" + str(cores) + \"</p>\"\n html_string += close_header\n # Draw nipype nodes Gantt chart and runtimes\n html_string += draw_lines(\n start_node[\"start\"], duration, minute_scale, space_between_minutes\n )\n html_string += draw_nodes(\n start_node[\"start\"],\n nodes_list,\n cores,\n minute_scale,\n space_between_minutes,\n colors,\n )\n\n # Get memory timeseries\n estimated_mem_ts = calculate_resource_timeseries(events, \"estimated_memory_gb\")\n runtime_mem_ts = calculate_resource_timeseries(events, \"runtime_memory_gb\")\n # Plot gantt chart\n resource_offset = 120 + 30 * cores\n html_string += draw_resource_bar(\n start_node[\"start\"],\n last_node[\"finish\"],\n estimated_mem_ts,\n space_between_minutes,\n minute_scale,\n \"#90BBD7\",\n resource_offset * 2 + 120,\n \"Memory\",\n )\n html_string += draw_resource_bar(\n start_node[\"start\"],\n last_node[\"finish\"],\n runtime_mem_ts,\n space_between_minutes,\n minute_scale,\n \"#03969D\",\n resource_offset * 2 + 120,\n \"Memory\",\n )\n\n # Get threads timeseries\n estimated_threads_ts = calculate_resource_timeseries(events, \"estimated_threads\")\n runtime_threads_ts = calculate_resource_timeseries(events, \"runtime_threads\")\n # Plot gantt chart\n html_string += draw_resource_bar(\n start_node[\"start\"],\n last_node[\"finish\"],\n estimated_threads_ts,\n space_between_minutes,\n minute_scale,\n \"#90BBD7\",\n resource_offset,\n \"Threads\",\n )\n html_string += draw_resource_bar(\n start_node[\"start\"],\n last_node[\"finish\"],\n runtime_threads_ts,\n space_between_minutes,\n minute_scale,\n \"#03969D\",\n resource_offset,\n \"Threads\",\n )\n\n # finish html\n html_string += \"\"\"\n </div>\n </body>\"\"\"\n\n # save file\n with open(logfile + \".html\", \"w\") as html_file:\n html_file.write(html_string)", "def test_gauge(self):\n with patch(\"redis_metrics.templatetags.redis_metric_tags.get_r\") as mock_r:\n inst = mock_r.return_value\n inst.get_gauge.return_value = 100\n\n size = 50\n maximum = 200\n result = taglib.gauge(\"test-slug\", maximum, size)\n expected_result = {\n 'slug': \"test-slug\",\n 'current_value': 100,\n 'max_value': maximum,\n 'size': size,\n 'diff': maximum - 100\n }\n self.assertEqual(result, expected_result)\n mock_r.assert_called_once_with()\n inst.get_gauge.assert_called_once_with(\"test-slug\")", "def detectGate(graph,label_file,file_name):\n input_height = 192\n input_width = 192\n input_mean = 0\n input_std = 255\n input_layer = \"Placeholder\"\n output_layer = \"final_result\"\n \n \n \n\n \n\n \n t = read_tensor_from_image_file(\n file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n \n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name)\n output_operation = graph.get_operation_by_name(output_name)\n\n with tf.Session(graph=graph) as sess:\n results = sess.run(output_operation.outputs[0], {\n input_operation.outputs[0]: t\n })\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n #for i in top_k:\n # print(labels[i], results[i])\n \n gresults = float(\"{:.4f}\".format(results[top_k[0]]))\n \n labelandimage = \"{0} \\r\\n {1} - {2}\".format(file_name,labels[top_k[0]],gresults)\n \n label = tk.Label(canvas,text=labelandimage,bg=\"gray\")\n label.pack()\n\n print (labels[top_k[0]], results[top_k[0]])", "def heatmap_allocations(\n self, id_value, id_type, start_date=None, end_date=None, freq=None\n ):\n\n start_date, end_date, freq = self.get_time_parameters(\n start_date, end_date, freq\n )\n\n try:\n df = self.get_allocations(id_value, id_type, start_date, end_date, freq)\n\n if id_type == \"person\":\n fmt = \".0%\"\n\n if id_value == \"ALL\":\n title = (\n \"Total Person Allocation (% FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n else:\n title = (\n df.columns.name\n + \" Allocation (% FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n\n elif id_type == \"project\":\n fmt = \".1f\"\n\n if id_value == \"ALLOCATED\":\n title = (\n \"Project Allocated Capacity (FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n elif id_value == \"CONFIRMED\":\n title = (\n \"Project Demand (FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n elif id_value == \"PEOPLE_REQ\":\n title = (\n \"Project People Required (FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n else:\n title = (\n df.columns.name\n + \" Allocation (FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n\n elif id_type == \"placeholder\":\n fmt = \".1f\"\n\n if id_value == \"ALL\":\n title = (\n \"Total Placeholder Allocation (FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n else:\n title = (\n df.columns.name\n + \" Allocation (FTE @ \"\n + str(self.wim.work_hrs_per_day)\n + \" hrs/day)\"\n )\n\n else:\n raise ValueError(\"id_type must be person, project or placeholder\")\n\n # change date format for prettier printing\n df = self.format_date_index(df, freq)\n\n fig = plt.figure(figsize=(df.shape[0], df.shape[1]))\n ax = fig.gca()\n # sort by largest values\n sns.heatmap(\n df.T.sort_values(by=[col for col in df.T.columns], ascending=False),\n linewidths=1,\n cmap=\"Reds\",\n cbar=False,\n fmt=fmt,\n annot=True,\n annot_kws={\"fontsize\": 14},\n ax=ax,\n )\n\n ax.set_ylabel(\"\")\n ax.set_title(title)\n\n return fig\n\n except ValueError:\n return None", "def gauge(name, value):\n metric = _get_metric(name) or metrics.new_gauge(name)\n metric.notify(value)", "def list_gauge_server(self, feed_id, server_id, metric_enum, **kwargs):\n metric_id = self._metric_id_guage_server(feed_id=feed_id, server_id=server_id,\n metric_enum=metric_enum)\n return self.list_gauge(metric_id=metric_id, **kwargs)", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def init():\n \n # Check if metric already present in the metric_map\n if system_power_consumption not in metric_map:\n # Create metric and add it to metric_map\n metric_map[system_power_consumption] = Gauge(system_power_consumption, \"System Power Consumption\")\n \n if psu_health not in metric_map:\n metric_map[psu_health] = Gauge(psu_health, \"PSU Overall Health\")\n \n print(\"Initialized Power Exporter...\")", "def stats_compute(self, *args, **kwargs):\n totalElements = 0\n totalKeys = 0\n totalSize = 0\n l_stats = []\n d_report = {}\n str_report = \"\"\n l_range = []\n\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse),\n desc = ' Processing stats')\n else:\n l_range = sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse)\n\n for k, v in l_range:\n try:\n if not self.args['du'] and not self.args['duf']:\n str_report += \"files: %5d│ raw_size: %12d│ human_size: %8s│ dir: %s\\n\" % (\\\n len(self.d_inputTree[k]),\n self.d_inputTreeCallback[k]['diskUsage_raw'],\n self.d_inputTreeCallback[k]['diskUsage_human'],\n k)\n else:\n str_report += '%-10s%s\\n' % (\n self.d_inputTreeCallback[k]['diskUsage_human'], k)\n except:\n pass\n d_report = {\n 'files': len(self.d_inputTree[k]),\n 'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'],\n 'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'],\n 'path': k\n }\n l_stats.append(d_report)\n totalElements += len(v)\n totalKeys += 1\n totalSize += self.d_inputTreeCallback[k]['diskUsage_raw']\n str_totalSize_human = self.sizeof_fmt(totalSize)\n return {\n 'status': True,\n 'report': str_report,\n 'dirs': totalKeys,\n 'files': totalElements,\n 'totalSize': totalSize,\n 'totalSize_human': str_totalSize_human,\n 'l_stats': l_stats,\n 'runTime': other.toc()\n }", "def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats", "def derive_newrelic_volume(self):\n # read and write volume\n self.update_metric(\"newrelic/volume_reads\", self.sum_of([\"status/com_select\", \"status/qcache_hits\"]))\n self.update_metric(\"newrelic/volume_writes\", self.sum_of([\"status/com_insert\", \"status/com_insert_select\",\n \"status/com_update\", \"status/com_update_multi\",\n \"status/com_delete\", \"status/com_delete_multi\",\n \"status/com_replace\", \"status/com_replace_select\"]))", "def __init__ (self, name=\"TrigEFMissingETOnlineMonitoring_alt\"):\n super(TrigEFMissingETOnlineMonitoring_alt, self).__init__(name)\n self.defineTarget(\"Online\")\n # measurement\n self.Histograms = [ hEx_log, hEy_log, hEz_log, hMET_log, hSumEt_log ]\n self.Histograms += [ hMET_lin, hMETStatus ]\n self.Histograms += [ hMETPhi ]", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def create_labels(filepath):\r\n \r\n filelist = os.listdir(filepath)\r\n columns = ['filename','label']\r\n label_df = pd.DataFrame(columns = columns)\r\n count = 0\r\n col1 = []\r\n col2 = []\r\n \r\n for file in filelist:\r\n \r\n name = file[:-4]\r\n imagename = name+'.png'\r\n absolute_path = os.path.join(filepath,file)\r\n \r\n f = open(absolute_path,\"r\")\r\n classname = f.read(3).split(\" \")\r\n print(classname)\r\n print(classname[0])\r\n \r\n col1.append(imagename)\r\n col2.append(classname[0])\r\n count += 1\r\n \r\n \r\n label_df = pd.DataFrame({'filename': col1, 'label': col2}) \r\n return label_df" ]
[ "0.5539488", "0.5088195", "0.507494", "0.5027533", "0.49304163", "0.4900687", "0.4889911", "0.47771588", "0.47663313", "0.47630692", "0.47085527", "0.46948", "0.46515706", "0.45758998", "0.45718196", "0.4553225", "0.45504344", "0.45424002", "0.453815", "0.45122313", "0.45104492", "0.45063064", "0.4500496", "0.44973612", "0.44908875", "0.4483738", "0.44833496", "0.44704238", "0.44648114", "0.44597092", "0.4443394", "0.4399691", "0.43826008", "0.4371322", "0.43591002", "0.43577296", "0.43567547", "0.4356285", "0.43553293", "0.43538824", "0.4351607", "0.43460357", "0.43421078", "0.43405786", "0.4339772", "0.43345064", "0.43337312", "0.43328586", "0.43311217", "0.43307096", "0.4316046", "0.43148246", "0.4302347", "0.4302035", "0.42989698", "0.42984927", "0.42961383", "0.428958", "0.42752007", "0.42686567", "0.42666376", "0.42665434", "0.42664838", "0.4261018", "0.4260995", "0.42528915", "0.42528543", "0.42438117", "0.42334804", "0.42326283", "0.42300493", "0.4229937", "0.42290708", "0.4228543", "0.42249483", "0.42240614", "0.42092785", "0.42044637", "0.4200082", "0.4197534", "0.41941682", "0.418922", "0.418164", "0.41804042", "0.41801286", "0.41792423", "0.41782552", "0.41780692", "0.417583", "0.41741967", "0.41626233", "0.41604894", "0.41426268", "0.41364965", "0.41259634", "0.41252035", "0.41240224", "0.4123787", "0.4118757", "0.4113852" ]
0.5900812
0
Builds and sends an embed message with new commits information.
async def process_push_hook(push: models.PushHook): repository = push.repository project = push.project commit_str = "commit" if push.total_commits_count == 1 else "commits" # Show link to commit compare if there's more than one commit if push.total_commits_count > 1: embed_url = f"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}" else: embed_url = f"{repository.homepage}/commit/{push.after[:7]}" if push.before == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] New branch created {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) elif push.after == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] Branch deleted {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) # If there are no commits, do not show a message if not push.total_commits_count: return embed = discord.Embed(title=f"[{project.namespace}/{project.name}:{push.branch}] " f"{push.total_commits_count} new {commit_str}", url=embed_url, colour=discord.Colour.blurple()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) embed.description = "" for commit in push.commits: message = commit.message.splitlines()[0] embed.description += f"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\n" print("Sending push message") await send_message(None, embed=embed, avatar_url=push.project.avatar_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(self, bot, comm, groups):\n commit_message = self.plugin.get_commit_message()\n bot.reply(comm, u'{user}: {msg}', kwvars={'msg': commit_message})", "def _generate_commit(\n self, msg: Optional[str] = None, author: Optional[str] = None\n ) -> dict:\n if author:\n mes_author = author\n else:\n mes_author = self._author\n if not msg:\n msg = f\"Commit via python client {__version__}\"\n ci = {\"commit_info\": {\"author\": mes_author, \"message\": msg}}\n return ci", "def commit(self, msg=None):\n self.log.debug(\"committing in git: %s\" % msg)\n completemsg = \"EasyBuild-commit from %s (time: %s, user: %s) \\n%s\" % (socket.gethostname(),\n time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n getpass.getuser(),\n msg)\n self.log.debug(\"git status: %s\" % self.client.status())\n try:\n self.client.commit('-am \"%s\"' % completemsg)\n self.log.debug(\"succesfull commit\")\n except GitCommandError, err:\n self.log.warning(\"Commit from working copy %s (msg: %s) failed, empty commit?\\n%s\" % (self.wc, msg, err))\n try:\n info = self.client.push()\n self.log.debug(\"push info: %s \" % info)\n except GitCommandError, err:\n self.log.warning(\"Push from working copy %s to remote %s (msg: %s) failed: %s\" % (self.wc,\n self.repo,\n msg,\n err))", "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "async def fetch_commits(self):\n for repo in self.config['repos'].split(','):\n since = datetime.min\n async for msg in self.channel.history(limit=None):\n if not msg.embeds:\n continue\n e = msg.embeds[0]\n if e.title == 'github commit' and e.timestamp and repo in e.description: # type: ignore\n since = e.timestamp\n break\n \n await self.update_commit_activity(repo, since)", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def call_git_push():\n print(\"This will commit and push the git repo\")\n today = datetime.datetime.today()\n call([\"git\", \"add\", \".\"])\n call([\"git\", \"commit\", \"-m\", \"Updated notes. {:%Y-%m-%d %H:%M:%S}\".format(today)])\n call([\"git\", \"push\", \"origin\", \"master\"])", "async def changelog(self, ctx: commands.Context):\n status, commits = GitHub().repos.harkonenbade.yutu.commits.get(per_page=10)\n if status == 200:\n await ctx.send(content=\"```Changelog:\\n{}```\".format(\"\\n\".join([\"- {}\".format(c['commit']['message'])\n for c in commits])))\n else:\n await ctx.send(content=\"Error: Cannot reach github\")", "def commit(self):\n run('git', 'add', '.')\n run('git', 'commit', '-a', '-m', 'updates')", "def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes &lt;to branch&gt; on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"&lt;%(branch)s&gt; %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"", "def embed():", "def create_commit(self, event_data_yaml):\n os.chdir(str(self.repository_path))\n sh.git.checkout(self.branch)\n sh.git.add(self.event_dir)\n message_body = (\n '\\n\\nEvent config:\\n~~~yaml\\n{}\\n~~~\\n'.format(event_data_yaml)\n + '\\nScraped with [pyvideo_scrape]'\n + '(https://github.com/pyvideo/pyvideo_scrape)')\n if self.minimal_download:\n message = ('Minimal download: '\n + '{}\\n\\nMinimal download executed for #{}'.format(\n self.title, self.issue)\n + '\\n\\nOnly data that needs [no review](https://'\n + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'\n + '\\nThis event needs further scraping and human '\n + 'reviewing for the description and other data to show.'\n + message_body)\n sh.git.commit('-m', message)\n sh.git.push('--set-upstream', 'origin', self.branch)\n # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)\n sh.git.checkout('master')\n else:\n message = (\n 'Scraped {}\\n\\nFixes #{}'.format(self.branch, self.issue)\n + message_body)\n sh.git.commit('-m', message)\n sh.git.checkout('master')\n logger.debug('Conference {} commited', self.branch)", "async def github(self, ctx):\n\n embed = discord.Embed(color=ctx.me.color)\n embed.set_thumbnail(url='https://cdn2.iconfinder.com/data/icons/black-' +\n 'white-social-media/64/social_media_logo_github-512.png')\n embed.add_field(name='🔗 Github Repo',\n value=f'[Klikk her]({self.bot.misc[\"source_code\"]}) for å se den dritt skrevne kildekoden min')\n await Defaults.set_footer(ctx, embed)\n await ctx.send(embed=embed)", "def commit(self, msg):\n self.runtime.logger.info('Commit config: {}'.format(msg))\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"add\", \".\"])\n exectools.cmd_assert([\"git\", \"commit\", \"--allow-empty\", \"-m\", msg])", "async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)", "def git_commit(self, msg):\n self.git_repo.git.add(all=True)\n self.git_repo.git.commit(message='[dots] {}'.format(msg))", "async def version_command(self, ctx):\n member = ctx.message.server.get_member(self.bot.user.id)\n current_commit = get_current_commit()\n commit_url = member.game.url + '/commit/' + current_commit\n msg = await self.bot.send_message(ctx.message.channel, 'I am currently running on commit `{}`\\n\\n{}'.format(current_commit, commit_url))", "def update(repository, args, **_):\n _log(repository, 'INFO', \"Going to build commit %s\" % args[2][:7])", "async def about(self, ctx):\n embed = Embed(color=self.bot.main_color, timestamp=datetime.utcnow())\n embed.set_author(\n name=\"Modmail - About\",\n icon_url=self.bot.user.avatar_url,\n url=\"https://discord.gg/F34cRU8\",\n )\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n\n desc = \"This is an open source Discord bot that serves as a means for \"\n desc += \"members to easily communicate with server administrators in \"\n desc += \"an organised manner.\"\n embed.description = desc\n\n embed.add_field(name=\"Uptime\", value=self.bot.uptime)\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency * 1000:.2f} ms\")\n embed.add_field(name=\"Version\", value=f\"`{self.bot.version}`\")\n embed.add_field(name=\"Author\", value=\"[`kyb3r`](https://github.com/kyb3r)\")\n\n changelog = await Changelog.from_url(self.bot)\n latest = changelog.latest_version\n\n if parse_version(self.bot.version) < parse_version(latest.version):\n footer = f\"A newer version is available v{latest.version}\"\n else:\n footer = \"You are up to date with the latest version.\"\n\n embed.add_field(\n name=\"GitHub\", value=\"https://github.com/kyb3r/modmail\", inline=False\n )\n\n embed.add_field(\n name=\"Discord Server\", value=\"https://discord.gg/F34cRU8\", inline=False\n )\n\n embed.add_field(\n name=\"Donate\",\n value=\"Support this bot on [`Patreon`](https://patreon.com/kyber).\",\n )\n\n embed.set_footer(text=footer)\n await ctx.send(embed=embed)", "async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "def cmd_commit(message):\n return ['git', 'commit', '-m', message]", "def commit(self, message, author, *args):\n return self.cmd('commit', '-m ' + message, '--author=', *args)", "def git_webhook():\n client = MongoClient(os.getenv('MONGODB_URI', 'mongodb://localhost:27017'))\n database = client.get_database()\n content = {\n \"event\": request.headers['X-GitHub-Event'],\n \"payload\" : request.json,\n \"date\": datetime.utcnow()\n }\n log.info(\"Content Received - \", request.headers['X-GitHub-Delivery'])\n inserted_id = database.events.insert_one(content).inserted_id\n log.info(\"Content Inserted - \", inserted_id)\n return jsonify({\n \"message\": \"Okay!\"\n })", "async def info(self, ctx):\n python = sys.version_info\n\n start = datetime.now()\n await ctx.trigger_typing()\n end = datetime.now()\n\n process = psutil.Process()\n\n embed = discord.Embed(title='Info',\n color=self.bot.color)\n embed.add_field(name='Latest Changelog',\n value='Restructured the project.',\n inline=False)\n embed.add_field(name='Creator',\n value='\\n'.join(self.bot.get_user(owner).mention for owner in self.bot.owner_ids))\n embed.add_field(name='Created on',\n value=f'{self.bot.created_on.strftime(\"%m/%d/%Y\")}\\n'\n f'(~{timeago.format(self.bot.created_on, datetime.utcnow())})')\n embed.add_field(name='Made With',\n value=f'[Python {python.major}.{python.minor}.{python.micro}](https://www.python.org/)\\n'\n f'[discord.py {discord.__version__}](https://discordpy.readthedocs.io/en/latest/)')\n embed.add_field(name='Status',\n value=f'Ping: {(end - start).total_seconds() * 1000:.2f}ms\\n'\n f'CPU: {process.cpu_percent()}%\\n'\n f'RAM: {process.memory_info().rss / 1048576:.2f}MB') # bits to bytes\n embed.add_field(name='Uptime',\n value='Online since:\\n'\n f'{self.bot.uptime.strftime(\"%m/%d/%Y %H:%M UTC\")}\\n'\n f'(~{timeago.format(self.bot.uptime, datetime.utcnow())})')\n embed.add_field(name='Statistics',\n value=f'Commands Run: {1003}\\n'\n f'Guilds: {len(list(self.bot.guilds))}\\n'\n f'Users: {len(list(self.bot.get_all_members()))} '\n f'(Unique: {len(set(self.bot.get_all_members()))})')\n embed.add_field(name='Acknowledgements',\n value='<@113104128783159296> - Answering a lot of questions I had, couldn\\'t have done it with you!\\n'\n '`[RKN]` - Testing! thanks guys :)',\n inline=False)\n\n await ctx.send(embed=embed)", "async def CoMLegendBuilder(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "def git_append(msg):\n pipe = Popen('git log -1 --pretty=%B', stdout=PIPE, shell=True)\n old_msg = pipe.stdout.read()\n new_msg = '%s\\n%s' % (old_msg.rstrip(), msg)\n\n pipe = Popen('git commit --amend --file=-', stdin=PIPE, shell=True)\n pipe.communicate(new_msg)", "def fetch():\n project = get_project(require=True)\n resp = request('post', '/api/v0/projects/{id}/fetch/'.format(id=project.id))\n data = resp.json()\n commits = data.get('commits', ())\n if commits:\n for commit in commits:\n success('Fetched: {ref} ({identifier})'.format(ref=commit['ref'], identifier=commit['identifier']))\n success('{n} new commits were fetched!'.format(n=len(commits)))\n else:\n info('No new commits.')\n errors = data.get('errors', ())\n for error in errors:\n warning(error)", "async def changelog():\n await bot.say('https://discord.gg/y2PcWMM')", "def commit_msg(rev):\n return (\n subprocess.check_output([\"git\", \"show\", \"--pretty=format:%s\", \"-s\", rev])\n .decode()\n .strip()\n )", "def _build_about_embed(self) -> discord.Embed:\n with self.about_aoc_filepath.open(\"r\", encoding=\"utf8\") as f:\n embed_fields = json.load(f)\n\n about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)\n about_embed.set_author(name=\"Advent of Code\", url=self._base_url)\n for field in embed_fields:\n about_embed.add_field(**field)\n\n about_embed.set_footer(text=f\"Last Updated (UTC): {datetime.utcnow()}\")\n\n return about_embed", "async def github_repo_info(self, ctx: commands.Context, *repo: str) -> None:\n repo = \"/\".join(repo)\n if repo.count(\"/\") != 1:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The repository should look like `user/reponame` or `user reponame`.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n async with ctx.typing():\n repo_data = await self.fetch_data(f\"{GITHUB_API_URL}/repos/{quote(repo)}\")\n\n # There won't be a message key if this repo exists\n if \"message\" in repo_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=\"The requested repository was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n embed = discord.Embed(\n title=repo_data[\"name\"],\n description=repo_data[\"description\"],\n colour=discord.Colour.blurple(),\n url=repo_data[\"html_url\"]\n )\n\n # If it's a fork, then it will have a parent key\n try:\n parent = repo_data[\"parent\"]\n embed.description += f\"\\n\\nForked from [{parent['full_name']}]({parent['html_url']})\"\n except KeyError:\n log.debug(\"Repository is not a fork.\")\n\n repo_owner = repo_data[\"owner\"]\n\n embed.set_author(\n name=repo_owner[\"login\"],\n url=repo_owner[\"html_url\"],\n icon_url=repo_owner[\"avatar_url\"]\n )\n\n repo_created_at = datetime.strptime(repo_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y\")\n last_pushed = datetime.strptime(repo_data[\"pushed_at\"], \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%d/%m/%Y at %H:%M\")\n\n embed.set_footer(\n text=(\n f\"{repo_data['forks_count']} ⑂ \"\n f\"• {repo_data['stargazers_count']} ⭐ \"\n f\"• Created At {repo_created_at} \"\n f\"• Last Commit {last_pushed}\"\n )\n )\n\n await ctx.send(embed=embed)", "async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)", "def notify(self, builder):\n\n # Build the <files> section for the template...\n commit = builder.commit\n files = E.files()\n\n commit_msg = commit.message.strip()\n commit_msg = re.sub(r'[\\x00-\\x09\\x0B-\\x1f\\x7f-\\xff]', '', commit_msg)\n\n for filename in commit.files_changed:\n safe_filename = re.sub(r'[\\x00-\\x09\\x0B-\\x1f\\x7f-\\xff]', '', filename)\n file_element = E.file(safe_filename)\n files.append(file_element)\n\n # Build the message\n cia_message = self.MESSAGE()\n cia_message.append(self._generator)\n\n source = self.SOURCE(E.project(\"KDE\"))\n source.append(E.module(self.repository.path))\n source.append(E.branch(self.repository.ref_name))\n\n cia_message.append(source)\n cia_message.append(self.TIMESTAMP(commit.date))\n\n body = self.BODY()\n\n commit_data = self.COMMIT()\n commit_data.append(E.author(commit.author_name))\n commit_data.append(E.revision(commit.description))\n commit_data.append(files)\n commit_data.append(E.log(commit_msg))\n commit_data.append(E.url(commit.url))\n\n body.append(commit_data)\n cia_message.append(body)\n\n # Convert to a string\n commit_xml = etree.tostring(cia_message)\n\n # Craft the email....\n message = MIMEText( commit_xml, 'xml', 'utf-8' )\n message['Subject'] = \"DeliverXML\"\n message['From'] = \"sysadmin@kde.org\"\n message['To'] = \"commits@platna.kde.org\"\n\n # Send email...\n self.smtp.sendmail(\"sysadmin@kde.org\", [\"commits@platna.kde.org\"],\n message.as_string())", "def handle_commits_published(extension=None, **kwargs):\n review_request = kwargs.get('review_request')\n\n if review_request is None:\n return\n\n commit_data = fetch_commit_data(review_request)\n\n if (not is_pushed(review_request, commit_data) or\n not is_parent(review_request, commit_data)):\n return\n\n # Check the change description and only continue if it contains a change\n # to the commit information. Currently change descriptions won't include\n # information about our extra data field, so we'll look for a change to\n # the diff which is mandatory if the commits changed. TODO: Properly use\n # the commit information once we start populating the change description\n # with it.\n #\n # A change description will not exist if this is the first publish of the\n # review request. In that case we know there must be commits since this\n # is a pushed request.\n cd = kwargs.get('changedesc')\n if (cd is not None and ('diff' not in cd.fields_changed or\n 'added' not in cd.fields_changed['diff'])):\n return\n\n # We publish both the review repository url as well as the landing\n # (\"inbound\") repository url. This gives consumers which perform hg\n # operations the option to avoid cloning the review repository, which may\n # be large.\n repo = review_request.repository\n repo_url = repo.path\n landing_repo_url = repo.extra_data.get('landing_repository_url')\n\n child_rrids = []\n commits = []\n ext_commits = json.loads(commit_data.extra_data.get(COMMITS_KEY, '[]'))\n\n for rev, rrid in ext_commits:\n child_rrids.append(int(rrid))\n commits.append({\n 'rev': rev,\n 'review_request_id': int(rrid),\n 'diffset_revision': None\n })\n\n # In order to retrieve the diff revision for each commit we need to fetch\n # their correpsonding child review request.\n review_requests = dict(\n (obj.id, obj) for obj in\n ReviewRequest.objects.filter(pk__in=child_rrids))\n\n for commit_info in commits:\n # TODO: Every call to get_latest_diffset() makes its own query to the\n # database. It is probably possible to retrieve the diffsets we care\n # about using a single query through Django's ORM, but it's not trivial.\n commit_info['diffset_revision'] = review_requests[\n commit_info['review_request_id']\n ].get_latest_diffset().revision\n\n msg = base.GenericMessage()\n msg.routing_parts.append('mozreview.commits.published')\n msg.data['parent_review_request_id'] = review_request.id\n msg.data['parent_diffset_revision'] = review_request.get_latest_diffset().revision\n msg.data['commits'] = commits\n msg.data['repository_url'] = repo_url\n msg.data['landing_repository_url'] = landing_repo_url\n\n # TODO: Make work with RB localsites.\n msg.data['review_board_url'] = get_server_url()\n\n publish_message(extension, msg)", "def main(args):\n\n try:\n repo = RpmGitRepository(args.gitdir)\n except GitRepositoryError, err:\n raise GbsError(str(err))\n\n packaging_dir = get_packaging_dir(args)\n changes_file_list = glob.glob(\"%s/%s/*.changes\" % (repo.path,\n packaging_dir))\n\n if args.spec or not changes_file_list:\n # Create .changes file with the same name as a spec\n specfile = os.path.basename(guess_spec(repo.path,\n packaging_dir, args.spec)[0])\n fn_changes = os.path.splitext(specfile)[0] + \".changes\"\n fn_changes = os.path.join(repo.path, packaging_dir, fn_changes)\n else:\n fn_changes = changes_file_list[0]\n if len(changes_file_list) > 1:\n log.warning(\"Found more than one changes files, %s is taken \"\n % (changes_file_list[0]))\n\n # get the commit start from the args.since\n commitid_since = get_first_commit(repo, fn_changes, args.since)\n\n commits = repo.get_commits(commitid_since, 'HEAD')\n if not commits:\n raise GbsError(\"Nothing found between %s and HEAD\" % commitid_since)\n\n if args.message:\n author = repo.get_author_info()\n lines = [\"- %s\" % line for line in args.message.split(os.linesep) \\\n if line.strip()]\n new_entries = [\"* %s %s <%s> %s\" % \\\n (datetime.datetime.now().strftime(\"%a %b %d %Y\"),\n author.name, author.email,\n get_version(repo, commits[0]))]\n new_entries.extend(lines)\n else:\n new_entries = make_log_entries(commits, repo)\n\n content = get_all_entries(fn_changes, new_entries)\n if edit_file(fn_changes, content):\n log.info(\"Change log has been updated.\")\n else:\n log.info(\"Change log has not been updated\")", "def run(self, args=[]):\n # Assert correct configuration.\n assert self.c.cbb.config, 'An empty configuration was specified.'\n assert self.c.cbb.builddir, 'A build directory name must be specified.'\n\n # Load properties from the commit being processed. This requires both a\n # repository and revision to proceed.\n repository = self.m.properties.get('repository')\n revision = self.m.properties.get('revision')\n if repository and revision:\n # Pull more information from the commit if it came from certain known\n # repositories.\n if (self.c.use_chrome_version and\n self.check_repository('chromium', repository)):\n # If our change comes from a Chromium repository, add the\n # '--chrome_version' flag.\n self.c.cbb.chrome_version = self.m.properties['revision']\n if (self.c.read_cros_manifest and\n self.check_repository('cros_manifest', repository)):\n # This change comes from a manifest repository. Load configuration\n # parameters from the manifest command.\n self.load_manifest_config(repository, revision)\n\n buildroot = self.m.path['root'].join('cbuild', self.c.cbb.builddir)\n cbb_args = [\n '--buildroot', buildroot,\n ]\n if not args:\n cbb_args.append('--buildbot')\n if self.c.chromite_branch and not self.c.cbb.disable_bootstrap:\n cbb_args.extend(['--branch', self.c.chromite_branch])\n if self.c.cbb.build_number is not None:\n cbb_args.extend(['--buildnumber', self.c.cbb.build_number])\n if self.c.cbb.chrome_rev:\n cbb_args.extend(['--chrome_rev', self.c.cbb.chrome_rev])\n if self.c.cbb.debug:\n cbb_args.extend(['--debug'])\n if self.c.cbb.clobber:\n cbb_args.extend(['--clobber'])\n if self.c.cbb.chrome_version:\n cbb_args.extend(['--chrome_version', self.c.cbb.chrome_version])\n if self.c.cbb.config_repo:\n cbb_args.extend(['--config_repo', self.c.cbb.config_repo])\n if self.c.repo_cache_dir and self.c.cbb.supports_repo_cache:\n cbb_args.extend(['--repo-cache', self.c.repo_cache_dir])\n\n # Set the build ID, if specified.\n if self.c.cbb.build_id:\n cbb_args.extend(['--master-build-id', self.c.cbb.build_id])\n\n # Add custom args, if there are any.\n cbb_args.extend(args)\n\n # Run cbuildbot.\n return self.cbuildbot(str('cbuildbot [%s]' % (self.c.cbb.config,)),\n self.c.cbb.config,\n args=cbb_args,\n cwd=self.m.path['slave_build'])", "def add_commit( self\n , cl\n , mark_number\n , parent_commit_list\n , first_branch_from_branch_id\n , first_branch_from_change_number\n , dest_branch\n , branch_name):\n with Timer(OVERALL):\n with Timer(BUILD):\n self.__append(NTR('commit refs/heads/{0}\\n').format(branch_name))\n self.__append(NTR('mark : {0}\\n').format(mark_number))\n desc_info = DescInfo.from_text(cl.description)\n committer_added = False\n if desc_info:\n for key in ('author', 'committer'):\n v = desc_info[key]\n if v:\n self.__append(NTR('{key} {fullname} {email} {time} {timezone}\\n').\n format( key = key\n , fullname = v['fullname']\n , email = v['email' ]\n , time = v['time' ]\n , timezone = v['timezone']))\n committer_added = True\n desc = desc_info.clean_desc\n\n # Convoluted logic gates but avoids duplicating code. The point\n # is that we add the best possible committer data _before_\n # adding the description.\n if not committer_added:\n if desc_info:\n # old change description that lacked detailed author info,\n # deserves a warning, but otherwise push onward even if the\n # commit checksums will likely differ from the originals\n LOG.warn('commit description did not match committer regex: @{} => {}'.\n format(cl.change, desc_info.suffix))\n timezone = self.__get_timezone_offset(cl.time)\n self.__append(NTR('committer {fullname} {email} {time} {timezone}\\n').\n format(fullname=self.__full_name_for_user(cl.user),\n email=self.__email_for_user(cl.user),\n time=cl.time,\n timezone=timezone))\n desc = cl.description\n self.__add_data(desc)\n\n self._add_commit_parent_list(parent_commit_list)\n if first_branch_from_branch_id \\\n and first_branch_from_change_number:\n self.__branch_from( dest_branch\n , cl\n , first_branch_from_branch_id\n , first_branch_from_change_number)\n self.__add_files(cl.files)\n if desc_info and desc_info.gitlinks:\n self.__add_gitlinks(desc_info.gitlinks)", "def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed", "def push():\n files = []\n for i in sp.check_output([\"git\", \"status\"]).decode().split(\"\\n\"):\n nf = \"#\\tnew file:\"\n mf = \"#\\tmodified:\"\n\t# Should have a deleted-files option here too.\n if i[: len(nf)] == nf or i[: len(mf)] == mf:\n f = i.split(\" \")[-1]\n files.append(f)\n files = list(set(files)) # Remove duplicates\n\n print(\"Committing these files: {}\".format(files))\n\n # Run all py scripts through black for formatting.\n# for f in files:\n# if f[-3:] == \".py\":\n# sp.call([\"black\", f])\n\n [sp.call([\"git\", \"add\", \"{}\".format(i)]) for i in files]\n\n commit_message = str(input(\"Enter commit message:\\n\"))\n commit_message = \"Updated\" if commit_message == \"\" else commit_message\n print(\"Committing with commit message of: {}\\n\\n\".format(commit_message))\n sp.call([\"git\", \"commit\", \"-m\", \"{}\".format(commit_message)])\n sp.call([\"git\", \"push\"])", "def push_the_change(self, issue_id: str, commit_msg: List[str] = None) -> None:\n index = self.git.index\n index.add([\"INFO.yaml\"])\n if not commit_msg:\n commit_msg = [\"Edit INFO.yaml file.\"]\n commit_msg_with_m = list(\n chain.from_iterable(zip_longest([], commit_msg, fillvalue=\"-m\"))\n )\n self.git.git.execute(\n [\n \"git\",\n \"commit\",\n *commit_msg_with_m,\n \"-m\",\n \"That change was done by automated integration tool to maintain commiters list in INFO.yaml\",\n \"-m\",\n f\"Issue-ID: {issue_id}\",\n \"-s\",\n ]\n )\n self.git.git.execute([\"git\", \"push\", \"origin\", f\"HEAD:refs/for/{self._branch}\"])\n print(f\"Pushed successfully to {self._repo} respository\")", "def gitCommit(filename, repo_dir, message, date=None):\n args = ['commit', '-m', message]\n if date:\n args.append('--date=\"%s\"' % date.strftime(\"%Y-%m-%d 00:00:00\"))\n git (args)", "def commits(self):\r\n url = '{0}/commits'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def commit():\n query = {\"type\": \"commit\", \"cmd\": \"<commit></commit>\"}\n\n return _get_job_results(query)", "def _push(self):\n push_cmds = self.vcs.push_commands()\n if not push_cmds:\n return\n if utils.ask(\"OK to push commits to the server?\"):\n for push_cmd in push_cmds:\n output = execute_command(push_cmd)\n logger.info(output)", "def whatsnew(ctx):\n embed = discord.Embed(title=\"Updates:\", colour=discord.Colour(14066432))\n embed.set_footer(text=\"Donations\")\n embed.add_field(name=\"**__New Updates__**\", value=\"---------\")\n embed.add_field(name=\"**Region/Platform Selection:**\",\n value=\"For matches' subcommands 'latest' and 'last' you can type a platform and region to look for matches now (Ex: '!matches latest Jabronious pc-na'). \"\n + \"Find the list of shards using !shards\")\n embed.add_field(name=\"**__Recent Updates__**\", value=\"---------\")\n embed.add_field(name=\"**Cooldowns:**\", value=\"Matches' commands will have cooldowns now. If you exceed them they will tell how long you have to wait.\")\n embed.add_field(name=\"**Showing Updates:**\", value=\"This too is a new command that can help keep you updated on things that new to the bot!\")\n yield from bot.say(embed=embed)", "def get_commit_message(self):\n r = requests.get('{base}/index.txt'.format(base=self.api_base_url))\n return r.text", "def push_commits(self, verbose=True):\n # The subprocess will return a non-zero exit code even if it succeeded.\n # Check its output to determine whether it worked.\n push_proc = subprocess.run(\n [\"git\", \"push\"],\n cwd=self.path,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n if \"updated in conservator\" not in push_proc.stdout:\n if \"Everything up-to-date\" in push_proc.stdout:\n logger.warning(push_proc.stdout)\n else:\n logger.error(\n \"Server did not accept changes to index.json:\\n%s\", push_proc.stdout\n )\n raise RuntimeError(\"Failed to push changes to index.json\")\n self.pull(verbose)", "async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)", "def git_commit(c, message):\n c.run(f\"git submodule foreach 'git commit -a -m \\\"{message}\\\" || echo '\")", "def _create_tag_message(commits: List[git.objects.commit.Commit],\n tag: semantic_version.Version) -> str:\n\n tag_message = 'Release {} \\n\\n'.format(str(tag))\n\n for message in [c.message for c in commits]:\n tag_message += ' * {}\\n'.format(message.split('\\n')[0].strip())\n return tag_message", "async def trigger_build(self, *, branch=None, message=None):", "def main():\n # Events from google calendar:\n events = GC.get_events()\n\n message_info = MC.create_message(events, True)\n message_to_post = message_info[0]\n emojis = message_info[1]\n\n # Post message to Slack\n posted_message = SL.send_message(\n sc, channels[\"bot-dev\"], message_to_post, True)\n\n # React the emojis to Slack\n for emoji in emojis:\n SL.emoji_react(sc, emoji, posted_message, False)\n\n correct_message = input(\"Did the message look ok in the #polls channel? If you answer with 'y' it will be posted in the polls channel. If you answer 'gen' it will be posted in general. If you answer with 'n' or anything other than the commands shown before, then it will be stopped and not posted any where else.\\n\")\n\n if \"gen\" in correct_message.lower():\n posted_message = SL.send_message(\n sc, channels[\"general\"], message_to_post, True)\n for emoji in emojis:\n SL.emoji_react(sc, emoji, posted_message, False)\n elif \"y\" in correct_message.lower():\n posted_message = SL.send_message(\n sc, channels[\"polls\"], message_to_post, True)\n for emoji in emojis:\n SL.emoji_react(sc, emoji, posted_message, False)", "def open_editor_to_amend_commit():\n command = f\"git commit --amend\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()", "def slackbuild_pubsub(data, context):\n global config\n global slack\n\n print(data)\n print(context)\n\n build, template = BuildStatus.toMessage(data, config)\n\n msg = slack.render_message(build, template)\n\n return slack.post_message(msg)", "def _announce_updates(self, updates):\n serialized = json.dumps(updates)\n log.debug('Sending serialized message: ' + serialized)\n msg = amqp.Message(serialized, content_type='application/json')\n self.channel.basic_publish(msg, self.amqp['routing_key'])", "def do_jira_case_commit_message(self, arg):\n cases = [(issue.key, issue.fields.summary, self.jira_url() + \"/browse/\" + issue.key) for issue in self.get_open_issues()]\n msg = \"\"\"\n--------------------------------------------------------------------\n[{}] {}\n \n<msg>\n \n{}\n-------------------------------------------------------------------- \n \"\"\"\n for case in cases:\n print(msg.format(case[0], case[1], case[2]))", "def test_git_commits(self):\n event_id = dog.Event.create(title=\"Testing git commits\", text=\"\"\"$$$\n eac54655 * Merge pull request #2 from DataDog/alq-add-arg-validation (alq@datadoghq.com)\n |\\\n 760735ef | * origin/alq-add-arg-validation Simple typecheck between metric and metrics (matt@datadoghq.com)\n |/\n f7a5a23d * missed version number in docs (matt@datadoghq.com)\n $$$\"\"\", event_type=\"commit\", source_type_name=\"git\", event_object=\"0xdeadbeef\")['event']['id']\n event = self.get_event_with_retry(event_id)\n self.assertEqual(event['event']['title'], \"Testing git commits\")", "def quote_to_embed(self,result):\n thedate = datetime.date.fromtimestamp(result[3])\n thechannel = self.bot.get_channel(result[2])\n themember = thechannel.server.get_member(result[1])\n theauthor = themember.name\n if hasattr(themember, \"nick\"):\n if themember.nick is not None:\n theauthor = themember.nick\n embed = discord.Embed(title=\"Quote #{}\".format(result[4]), description=result[0])\n embed.set_author(name=theauthor, icon_url=themember.avatar_url)\n embed.set_footer(text=\"Saved on: {}\".format(thedate.strftime(\"%d %B %y\")))\n return embed", "async def embed(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=message,\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)", "async def github(self, ctx: Message):\n\t\tawait self.send(\n\t\t f\"{ctx.author.mention} ㅤㅤ I'm open-source! You can look at my source code here!ㅤ https://github.com/asxlvm/DogeBoss :GitHub:\"\n\t\t)", "def hello():\n if request.method == \"GET\":\n return \"incorrect usage\"\n else:\n #s = \"\"\n #for repo in g.get_user().get_repos():\n # s+= repo.name + \" \"\n #s += g.get_user().get_repo(\"memeplatter.github.io\").name\n #for repo in g.get_organization(os.environ.get(\"GIT_ORG\")).get_repos():\n # s += repo.name + \" \"\n #s += g.get_organization(os.environ.get(\"GIT_ORG\")).get_repo(os.environ.get(\"GIT_REPO\")).name\n repo = None\n if os.environ.get(\"GIT_ORG\") == None:\n #there is no organization configured\n repo = g.get_user().get_repo(os.environ.get(\"GIT_REPO\"))\n else:\n # there is a organization configured\n repo = g.get_organization(os.environ.get(\"GIT_ORG\")).get_repo(os.environ.get(\"GIT_REPO\"))\n honeypot = request.form[\"email\"]\n \n if len(honeypot) > 0:\n return \"eh\"\n from_name = request.form['name']\n subject = request.form['subject']\n message = request.form['message']\n #return repo.name\n #repo.create_file(\"/\"+repo.name+\"/test.md\", \"commit message\", \"commit content\")\n rMessage = createPost(repo, from_name, subject, message)\n return rMessage", "async def github(self, ctx: commands.Context, *, path: str):\n user, _, repo = path.replace(' ', '/', 1).partition('/')\n if repo:\n async with self.bot.session.get(\n f\"https://api.github.com/repos/{user}/{repo}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=data['full_name'],\n description=f\"stars: {data['stargazers_count']} forks: {data['forks_count']}\\n\"\n f\"language: {data['language']} license: {data['license']['name'] if data['license'] else 'no'}\\n\"\n +(f\"homepage: {data['homepage']}\" if data['homepage'] else ''),\n url=data['html_url']\n ).set_author(\n name=data['owner']['login'],\n url=data['owner']['html_url'],\n icon_url=data['owner']['avatar_url']\n ).set_thumbnail(\n url=data['owner']['avatar_url']\n ).add_field(\n name=\"Description\",\n value=data['description']\n )\n await ctx.send(embed=embed)\n else:\n async with self.bot.session.get(\n f\"https://api.github.com/users/{user}\",\n headers={\"Authorization\": f\"token {self.config['token']}\"}\n ) as r:\n data = await r.json()\n embed = discord.Embed(\n title=f\"{data['name']} ({data['login']})\",\n description=f\"repos: {data['public_repos']} gists: {data['public_gists']}\\n\"\n f\"followers: {data['followers']} following: {data['following']}\\n\"\n f\"location: {data['location']}\",\n url=data['html_url']\n ).set_thumbnail(\n url=data['avatar_url']\n ).add_field(\n name=\"Bio\",\n value=data['bio']\n ).add_field(\n name=\"Contact\",\n value=''.join([\n (f\"email: [{data['email']}](mailto:{data['email']})\\n\" if data['email'] else ''),\n (f\"twitter: [{data['twitter_username']}](https://twitter.com/{data['twitter_username']})\\n\" if data['twitter_username'] else ''),\n (f\"company: {data['company']}\\n\" if data['company'] else ''),\n \n ]) or 'no contact avalible'\n ).set_footer(\n text=f\"id: {data['id']}\"\n )\n await ctx.send(embed=embed)", "def commit(self, commit_msg, top_repo_path):\n my_output = subprocess.check_output(\n [\"git\", \"commit\", \"-m\", commit_msg], cwd=top_repo_path\n )\n return my_output", "async def bc(self,ctx,*,msg):\n for hook in self.bot.mwebhooks:\n try:\n async def send_webhook():\n async with aiohttp.ClientSession() as session:\n webhook = Webhook.from_url(\n f\"{hook}\", adapter=AsyncWebhookAdapter(session))\n\n e = discord.Embed(color = self.bot.color,description=msg)\n e.set_author(name=ctx.author,icon_url = ctx.author.avatar_url)\n await webhook.send(embed=e)\n\n await send_webhook()\n except:\n continue", "def main():\n print(\"Hello World!!!\")\n print(\"Commit to DLNA\")\n print(\"Another commit\")\n print(\"Commit after PR merge\")", "def createChangeLog(outputType: str = 'json', isExposeEmail: bool = False, remSignedOff = True, isExtended: bool = False):\r\n tags = {}\r\n commits = {}\r\n references = {}\r\n out = \"\"\r\n print(\"Starting...\")\r\n if isWin:\r\n hasTags = Popen('git for-each-ref --sort=\"*authordate\" --format=\"%(refname:short)\" refs/tags', shell=True, stdout=PIPE).stdout.read().decode()\r\n else:\r\n hasTags = Popen('git for-each-ref --sort=\"*authordate\" --format=\"%(refname:short)\" refs/tags | grep -v \"^$\"#', shell=True, stdout=PIPE).stdout.read().decode()\r\n\r\n if hasTags.strip() == '':\r\n hasTags = None\r\n if hasTags is not None:\r\n hasTags = hasTags.strip().split('\\n')\r\n print(\"Found: \" + str(len(hasTags)) + \" tag(s).\")\r\n\r\n for i in hasTags:\r\n tags[len(tags)] = i\r\n tags = collections.OrderedDict(reversed(sorted(tags.items())))\r\n\r\n if len(tags) > 0:\r\n\r\n for key, tag in tags.items():\r\n if key - 1 < 0:\r\n search = '\"' + tag + '\"'\r\n else:\r\n search = '\"' + tags[key - 1] + '\"..\"' + tag + '\"'\r\n\r\n\r\n if isWin:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" ' + search + ' | findstr /v /C:\"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n else:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" ' + search + ' | grep -v \"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n partialCommit = partialCommit.strip().split(',|;,')\r\n k = 0\r\n for i in partialCommit:\r\n i = i.strip()\r\n if not i or i.strip().strip('\\n') == '':\r\n continue\r\n\r\n if remSignedOff is True:\r\n i = i.split(\"Signed-off-by:\")\r\n i = i[0].strip().strip('\\n')\r\n\r\n i = i.split(',;|,')\r\n if len(i) <= 3:\r\n continue\r\n if not isExposeEmail:\r\n i[3] = None\r\n else:\r\n references[i[2]] = i[3]\r\n if not tag in commits:\r\n commits[tag] = {}\r\n out += \"\\n####Version \" + tag.strip('v') + \" (\" + datetime.datetime.fromtimestamp(int(i[5])).strftime('%d.%m.%Y') + \")\\n\"\r\n if isExtended >= 3:\r\n comment = \"* [\" + i[0] + \"](../../commit/\" + i[0] + \") - [[\" + i[2] + \"]]: \" + i[6] + \"\\n\"\r\n commits[tag][\"commit_h\"] = i[1]\r\n commits[tag][\"by\"] = i[2]\r\n commits[tag][\"date\"] = i[4]\r\n commits[tag][\"date_unix\"] = i[5]\r\n elif isExtended == 2:\r\n comment = \"* **\" + i[0] + \"**: \" + i[6] + \"\\n\"\r\n commits[tag][\"commit_h\"] = i[1]\r\n else:\r\n comment = \"* \" + i[6] + \"\\n\"\r\n commits[tag][int(k)] = {\r\n 'commit': i[0],\r\n 'email': i[3],\r\n 'comment': i[6]\r\n }\r\n for r in needToBold:\r\n if r in comment:\r\n comment = comment.replace(r, '**' + r + '**')\r\n out += comment\r\n k += 1\r\n else:\r\n print(\"Error on tags\")\r\n exit(2)\r\n else:\r\n print(\"No Tags found. Switching to commit mode...\")\r\n if isWin:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" | findstr /v /C:\"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n else:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" | grep -v \"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n\r\n partialCommit = partialCommit.strip().split(',|;,')\r\n k = 0\r\n for i in partialCommit:\r\n i = i.strip()\r\n if not i or i.strip().strip('\\n') == '':\r\n continue\r\n\r\n if remSignedOff is True:\r\n i = i.split(\"Signed-off-by:\")\r\n i = i[0].strip().strip('\\n')\r\n\r\n i = i.split(',;|,')\r\n if not isExposeEmail:\r\n i[3] = None\r\n else:\r\n references[i[2]] = i[3]\r\n\r\n print(\"COMMIT: \" + i[0])\r\n out += \"\\n####\" + i[0] + \" (\" + datetime.datetime.fromtimestamp(int(i[5])).strftime('%d.%m.%Y') + \")\\n\"\r\n\r\n commits[i[0]] = {\r\n 'commit': i[0],\r\n 'email': i[3],\r\n 'comment': i[6]\r\n }\r\n if isExtended >= 3:\r\n comment = \"* [\" + i[0] + \"](../../commit/\" + i[0] + \") - [[\" + i[2] + \"]]: \" + i[6] + \"\\n\"\r\n commits[i[0]][\"commit_h\"] = i[1]\r\n commits[i[0]][\"by\"] = i[2]\r\n commits[i[0]][\"date\"] = i[4]\r\n commits[i[0]][\"date_unix\"] = i[5]\r\n elif isExtended == 2:\r\n comment = \"* **\" + i[0] + \"**: \" + i[6] + \"\\n\"\r\n\r\n commits[i[0]][\"commit_h\"] = i[1]\r\n else:\r\n comment = \"* \" + i[6] + \"\\n\"\r\n\r\n for r in needToBold:\r\n if r in comment:\r\n comment = comment.replace(r, '**' + r + '**')\r\n out += comment\r\n k += 1\r\n js = None\r\n if outputType == 'json':\r\n js = json.dumps(commits, indent=4, separators=(',', ': '))\r\n #sort_keys=True,\r\n else:\r\n out += \"\\n\\n\"\r\n if isExtended >= 3:\r\n for k, v in references.items():\r\n out += \"[\" + k + \"]:mailto://\" + v + \"\\n\"\r\n\r\n filename = \"CHANGELOG.md\"\r\n if js is not None:\r\n filename = \"version.json\"\r\n out = js\r\n\r\n file = codecs.open(filename, \"w\", \"utf-8\")\r\n file.write(out)\r\n file.close()\r\n print('Done.')", "def submitBuildRequest(ss, reason, props=None, now=False):", "def commit(self, message, verbose=True):\n commit_cmd = [\"git\", \"commit\"]\n if not verbose:\n commit_cmd.append(\"-q\")\n commit_cmd += [\"-m\", message]\n return subprocess.call(commit_cmd, cwd=self.path)", "def main():\n verbose = False\n online = True\n\n if online:\n TOKEN = \"\"\n g = Github(base_url=\"https://github.ibm.com/api/v3\", login_or_token=TOKEN)\n repo = g.get_repo(\"Raphael-Lambert/test_note\")\n\n path = \"C:/Users/RaphaelLambert/Documents/git_issues\"\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n if verbose:\n print(onlyfiles)\n treated = []\n issues = []\n\n with open(join(path, 'log.txt'), 'r') as doc:\n for line in doc:\n treated.append(line.rstrip('\\n'))\n\n with open(join(path, 'issues.txt'), 'r') as doc:\n for line in doc:\n issues.append(int(line.rstrip('\\n')))\n\n for title in onlyfiles:\n if title != 'log.txt' and title != 'issues.txt' and title not in treated:\n with open(join(path, title), 'rb') as fhdl:\n raw_email = fhdl.read()\n\n parsed_eml = eml_parser.eml_parser.decode_email_b(raw_email, include_raw_body=True)\n if verbose:\n print('-----------------')\n print(title)\n print('-----------------')\n print(parsed_eml)\n print('-----------------')\n body = parsed_eml['body']\n if len(body) > 0:\n raw_text = body[0]['content']\n else:\n raw_text = \"unable to retrieve the message\"\n raw_text = link_breaker(raw_text)\n num_get = 0\n if online and title[:4] == 'Re ' and title[4:] in treated:\n cont_issue = repo.get_issue(issues[treated.index(title[4:])])\n num_get = cont_issue.number\n cont_issue.create_comment(body=raw_text)\n elif online:\n new_issue = repo.create_issue(title=\"Conversation number {}: {}\".format(len(treated), title[:10]+\"...\"),\n body=raw_text)\n if verbose:\n print(new_issue)\n num_get = new_issue.number\n treated.append(title)\n issues.append(num_get)\n\n if verbose:\n print(treated)\n\n with open(join(path, 'log.txt'), 'w') as doc:\n for title in treated:\n doc.write(title+'\\n')\n with open(join(path, 'issues.txt'), 'w') as doc:\n for title in issues:\n doc.write(str(title)+'\\n')", "async def cmd_bugger(self, args: Args, src: Src, **_):\n if self.config.get(\"trello\") is None:\n raise CommandOperationError(\n \"Sorry, the bot maintainer has not enabled Trello bug reports.\"\n )\n try:\n url = f\"https://api.trello.com/1/lists/{self.config.get('trello/list_id')}/cards\"\n params = {\n \"key\": self.config.get(\"trello/app_key\"),\n \"token\": self.config.get(\"trello/token\"),\n }\n response = requests.request(\"GET\", url, params=params)\n\n except KeyError:\n raise CommandOperationError(\n \"The Trello keys are misconfigured, check your config file\"\n )\n\n if not response:\n raise CommandOperationError(\n \"Could not get cards for the list ID provided. Talk to your bot\"\n \" owner.\"\n )\n\n ticketnumber = str(\n max(\n (\n int(card[\"name\"])\n for card in (response.json())\n if card[\"name\"].isnumeric()\n )\n )\n + 1\n )\n\n params.update(\n {\n \"name\": ticketnumber.zfill(3),\n \"desc\": (\n \"{message}\\n\\n\\n\\n\\n\"\n \"Submitted by: {author.name} ({author.id})\\n\"\n \"Timestamp: {time}\\n\"\n \"Guild: {guild.name} ({guild.id})\\n\"\n \"Channel: {channel.name} ({channel.id})\".format(\n message=\" \".join(args),\n author=src.author,\n channel=src.channel,\n guild=src.guild,\n time=dt.utcnow(),\n )\n ),\n \"pos\": \"bottom\",\n \"idList\": self.config.get(\"trello/list_id\"),\n \"username\": self.config.get(\"trello/username\"),\n }\n )\n\n response = requests.request(\n \"POST\", \"https://api.trello.com/1/cards\", params=params\n )\n\n if not response:\n raise CommandOperationError(\n \"Could not create bug report. Talk to your bot owner.\"\n )\n\n return f\"Created bug report with ID `{ticketnumber}`\"", "def commit(self, message):\n tree = self.index.write_tree(self.repo)\n\n sig = pygit2.Signature(\n \"Wagtail Localize\", \"wagtail_localize_pontoon@wagtail.io\"\n )\n\n if self.repo_is_empty:\n self.repo.create_commit(\n \"HEAD\", sig, sig, message, tree, []\n )\n else:\n self.repo.create_commit(\n \"refs/heads/master\", sig, sig, message, tree, [self.repo.head.target]\n )\n\n return self.repo.head.target.hex", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def create_prebuild_new_commit(self, cr, uid, ids, context=None):\n build_pool = self.pool.get('runbot.build')\n build_line_pool = self.pool.get('runbot.build.line')\n repo_pool = self.pool.get('runbot.repo')\n branch_pool = self.pool.get('runbot.branch')\n build_new_ids = []\n for prebuild_id in ids:\n build_ids = build_pool.search(cr, uid, [\n ('prebuild_id', 'in', [prebuild_id]),\n ('from_main_prebuild_ok', '=', True),\n ], context=context)\n if not build_ids:\n # If not build exists then create it and mark as\n # from_main_prebuild_ok=True\n build_new_id = self.create_build(cr, uid, [prebuild_id],\n default_data={\n 'from_main_prebuild_ok': True}, context=context)\n build_new_ids.append(build_new_id)\n continue\n\n build_line_ids = build_line_pool.search(cr, uid, [\n ('build_id', 'in', build_ids),\n ('prebuild_line_id.check_new_commit', '=', True),\n ], context=context)\n if build_line_ids:\n # Get all branches from build_line of this prebuild_sticky\n build_line_datas = build_line_pool.read(\n cr, uid, build_line_ids, ['branch_id'], context=context)\n branch_ids = list(\n set([r['branch_id'][0] for r in build_line_datas]))\n # Get last commit and search it as sha of build line\n for branch in branch_pool.browse(cr, uid, branch_ids,\n context=context):\n _logger.info(\"get last commit info for check new commit\")\n refs = repo_pool.get_ref_data(\n cr, uid, [branch.repo_id.id], branch.name,\n fields=['objectname'], context=context)\n if refs and refs[branch.repo_id.id]:\n ref_data = refs[branch.repo_id.id][0]\n sha = ref_data['objectname']\n build_line_with_sha_ids = build_line_pool.search(cr,\n uid, [('branch_id', '=', branch.id),\n ('build_id', 'in', build_ids),\n ('sha', '=', sha)], context=context, limit=1)\n if not build_line_with_sha_ids:\n # If not last commit then create build with last\n # commit\n replace_branch_info = {\n branch.id: {'reason_ok': True}}\n default_data = {'from_main_prebuild_ok': True}\n build_new_id = self.create_build(cr, uid, [\n prebuild_id], default_data=default_data,\n replace_branch_info=replace_branch_info,\n context=context)\n build_new_ids.append(build_new_id)\n return build_new_ids", "def contact(update: Update) -> None:\n update.message.text(\"@New GEN\")", "async def _server(self, ctx: commands.Context) -> None:\n\n guild = ctx.guild\n\n embed = CleanEmbed(\n author_image=guild.icon_url,\n author_text=guild.name,\n thumbnail_url=guild.icon_url,\n fields=[\n {'name': 'Owner', 'value': f'{guild.owner.name}#{guild.owner.discriminator}', 'inline': True},\n {'name': 'ID', 'value': guild.id, 'inline': True},\n {'name': 'Members', 'value': guild.member_count, 'inline': True},\n {'name': 'Channels',\n 'value': f'{(len(guild.text_channels) + len(guild.voice_channels))} (+ {len(guild.categories)} categories)',\n 'inline': True},\n {'name': 'Region', 'value': GUILD_REGIONS[guild.region], 'inline': True},\n {'name': 'Emojis', 'value': len(guild.emojis), 'inline': True},\n {'name': 'Tier', 'value': f'{guild.premium_tier} ({guild.premium_subscription_count} boosts)',\n 'inline': True},\n {'name': 'Verification', 'value': GUILD_VERIFICATION_LEVELS[guild.verification_level], 'inline': True},\n {'name': 'Created', 'value': guild.created_at.strftime(\"%d %B, %Y\"), 'inline': True},\n ])\n\n await ctx.send(embed=embed)", "def handle(bot, update):\n print(update.message.text)\n bot.send_message(chat_id=update.message.chat_id,\n text='Hey! I\\'m Meditech Bot')", "def embed(ctx=None, title=None, description=None, fields=None, customFooter=False, customThumbnail=None, customColor=None, image=None):\n\n e = discord.Embed(title=title, description=description)\n if customColor is None:\n e.color = color()\n else:\n e.color = color(customColor)\n \n if fields != None:\n index = 0\n # Please fix the code below, There's nothing wrong with it, it's just messy and I'm sure that's not the right way to do it.\n for field in fields:\n session = []\n for key, value in field.items():\n session.append(key)\n\n if key == \"n\":\n name = value \n \n if key == \"v\":\n xValue = value \n \n if key == \"inline\":\n inline = value \n \n if not \"inline\" in session:\n inline = False\n \n e.add_field(name=f\"{name}\", value=xValue, inline=inline)\n \n if not customFooter:\n footer(e, ctx)\n \n if image is None:\n try:\n if customThumbnail is None:\n e.set_thumbnail(url=ctx.author.avatar_url)\n else:\n e.set_thumbnail(url=customThumbnail)\n except:\n pass \n else:\n e.set_image(url=image)\n return e", "async def about(self, ctx):\n embed = discord.Embed(title = f\"About {self.bot.user.name}\", color = discord.Color.blurple())\n embed.set_thumbnail(url = self.bot.user.avatar_url)\n embed.add_field(name = \"Developers\", value = \"Kowlin#4417 & A Trash Coder#0981\", inline = False)\n embed.add_field(name = \"Library\", value = \"discord.py rewrite\", inline = False)\n embed.add_field(name = \"Source Code\", value = \"[Click here](https://github.com/kowlintechnologies/DHB)\", inline = False)\n embed.add_field(name = \"Links\", value = \"[Docs](https://dhb-documentation.readthedocs.io/en/latest/index.html) | [Support](https://discord.gg/KEkwrwd) | [Invite](https://discordapp.com/api/oauth2/authorize?client_id=592811241756688405&permissions=2080762998&scope=bot)\")\n await ctx.send(embed = embed)", "def deploy():\n build()\n collect()\n commit()\n push()", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def html_message_formatter(mode, name, build, results, master_status):\n result = Results[results]\n\n limit_lines = 80\n text = list()\n text.append(u'<h4>Build status: %s</h4>' % result.upper())\n text.append(u'<table cellspacing=\"10\"><tr>')\n text.append(u\"<td>Buildslave for this Build:</td><td><b>%s</b></td></tr>\" % build.getSlavename())\n if master_status.getURLForThing(build):\n text.append(u'<tr><td>Complete logs for all build steps:</td><td><a href=\"%s\">%s</a></td></tr>'\n % (master_status.getURLForThing(build),\n master_status.getURLForThing(build))\n )\n text.append(u'<tr><td>Build Reason:</td><td>%s</td></tr>' % build.getReason())\n source = u\"\"\n for ss in build.getSourceStamps():\n if ss.codebase:\n source += u'%s: ' % ss.codebase\n if ss.branch:\n source += u\"[branch %s] \" % ss.branch\n if ss.revision:\n source += ss.revision\n else:\n source += u\"HEAD\"\n if ss.patch:\n source += u\" (plus patch)\"\n if ss.patch_info: # add patch comment\n source += u\" (%s)\" % ss.patch_info[1]\n text.append(u\"<tr><td>Build Source Stamp:</td><td><b>%s</b></td></tr>\" % source)\n text.append(u\"<tr><td>Blamelist:</td><td>%s</td></tr>\" % \",\".join(build.getResponsibleUsers()))\n text.append(u'</table>')\n if ss.changes:\n text.append(u'<h4>Recent Changes:</h4>')\n for c in ss.changes:\n cd = c.asDict()\n when = datetime.datetime.fromtimestamp(cd['when'] ).ctime()\n text.append(u'<table cellspacing=\"10\">')\n text.append(u'<tr><td>Repository:</td><td>%s</td></tr>' % cd['repository'] )\n text.append(u'<tr><td>Project:</td><td>%s</td></tr>' % cd['project'] )\n text.append(u'<tr><td>Time:</td><td>%s</td></tr>' % when)\n text.append(u'<tr><td>Changed by:</td><td>%s</td></tr>' % cd['who'] )\n text.append(u'<tr><td>Comments:</td><td>%s</td></tr>' % cd['comments'] )\n text.append(u'</table>')\n files = cd['files']\n if files:\n text.append(u'<table cellspacing=\"10\"><tr><th align=\"left\">Files</th></tr>')\n for file in files:\n text.append(u'<tr><td>%s:</td></tr>' % file['name'] )\n text.append(u'</table>')\n text.append(u'<br>')\n # get all the steps in build in reversed order\n rev_steps = reversed(build.getSteps())\n # find the last step that finished\n for step in rev_steps:\n if step.isFinished():\n break\n # get logs for the last finished step\n if step.isFinished():\n logs = step.getLogs()\n # No step finished, loop just exhausted itself; so as a special case we fetch all logs\n else:\n logs = build.getLogs()\n # logs within a step are in reverse order. Search back until we find stdio\n for log in reversed(logs):\n if log.getName() == 'stdio':\n break\n name = \"%s.%s\" % (log.getStep().getName(), log.getName())\n status, dummy = log.getStep().getResults()\n content = log.getText().splitlines() # Note: can be VERY LARGE\n url = u'%s/steps/%s/logs/%s' % (master_status.getURLForThing(build),\n log.getStep().getName(),\n log.getName())\n\n text.append(u'<i>Detailed log of last build step:</i> <a href=\"%s\">%s</a>'\n % (url, url))\n text.append(u'<br>')\n text.append(u'<h4>Last %d lines of \"%s\"</h4>' % (limit_lines, name))\n unilist = list()\n for line in content[len(content)-limit_lines:]:\n unilist.append(cgi.escape(unicode(line,'utf-8')))\n text.append(u'<pre>')\n text.extend(unilist)\n text.append(u'</pre>')\n text.append(u'<br><br>')\n text.append(u'<b>-The Buildbot</b>')\n return {\n 'body': u\"\\n\".join(text),\n 'type': 'html'\n }", "async def process_merge_request_hook(data: models.MergeRequestHook):\n project = data.project\n merge = data.merge_request\n user = data.user\n description = \"\"\n action = \"Issue updated\"\n colour = discord.Colour.light_grey()\n if merge.action == \"open\":\n action = \"Merge request opened\"\n description = merge.description\n colour = discord.Colour.dark_green()\n elif merge.action == \"close\":\n action = \"Merge request closed\"\n colour = discord.Colour.dark_grey()\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}] {action}: !{merge.iid} {merge.title}\",\n url=merge.url, description=description, colour=colour)\n embed.set_author(name=user.username, icon_url=user.avatar_url)\n embed.set_footer(text=f\"{merge.source_branch} → {merge.target_branch}\")\n await send_message(None, embed=embed)", "async def server(self, ctx):\n\n created = ctx.guild.created_at\n created = created.strftime('%a %b %d %Y at %I:%M %p')\n created1 = datetime.strptime(created, '%a %b %d %Y at %I:%M %p')\n created1 = relativedelta(created1, datetime.utcnow())\n\n channels = len(ctx.guild.channels)\n embed = discord.Embed(color=self.bot.embed_color)\n\n members = [x for x in ctx.guild.members if not x.bot]\n bots = [x for x in ctx.guild.members if x.bot]\n\n embed.title = f'{ctx.guild.name} 🏰'\n embed.description = f'Created on {created} \\nThat\\'s {abs(created1.years)}y(s), {abs(created1.months)}m, ' \\\n f'{abs(created1.days)}d, {abs(created1.minutes)}m and {abs(created1.seconds)}s ago!'\n\n embed.add_field(name='Owner 🤵', value=ctx.guild.owner.mention, inline=True)\n embed.set_thumbnail(url=ctx.guild.icon_url)\n embed.add_field(name='Server 🆔', value=ctx.guild.id, inline=True)\n embed.add_field(name='Members :family_mwgb:', value=(\n f\"**Users:** {len(members)} \\n\"\n f\"**Bots:** {len(bots)}\"\n ), inline=True)\n\n embed.add_field(name='Channels 📺', value=str(channels), inline=True)\n embed.add_field(name='Roles 📜', value=str(len(ctx.guild.roles)), inline=True)\n await ctx.send(embed=embed)", "def _get_changelog_contents(ctx: Context, version: str):\n return ctx.run(\n \"towncrier\",\n \"build\",\n \"--draft\",\n f\"--version={version}\",\n capture=True,\n ).stdout.decode()", "def commit(self, commit_message):\n self.git_repo.index.commit(commit_message)", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "async def embed_editor(self, guild):\n if self.embed_pooling:\n return\n self.embed_pooling = True\n await asyncio.sleep(3.0)\n current_embed = self.games_info[guild.id][0].embeds[0].to_dict()\n current_embed['fields'][0]['value'] = '\\n'.join(f'{p}' for p in self.games_info[guild.id][2]) or \"None\"\n self.embed_pooling = False\n await self.games_info[guild.id][0].edit(embed=discord.Embed.from_dict(current_embed))", "async def botinfo(ctx):\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name='Bot Info', value = \"I'm made with the library Discord.py Async.\"\n \" I'm developed by Shutdown.py#2406. \"\n \"If you need any help with me, Join my [devs' server](https://discord.gg/X4CJdEM).\"\n \"Send feedback using the feedback command\")\n embed.add_field(name='Total Commands', value=(len(bot.commands)))\n embed.add_field(name = 'Invite Me!', value = '[Invite](https://discordbots.org/bot/399115688792424448)')\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)", "def commit(env='development', message='', push='n', test='y'):\n\n project_settings = get_settings()\n projects = build_projects_vars()\n project = projects[env]\n\n if env != 'production':\n print \"========================================================\"\n print \"COMMIT IN %s...\" % env.upper()\n # TODO testing before committing\n #run_tests(env)\n for app in project_settings.EXTRA_APPS:\n if app[env]['dir'][:len(project['dir'])] == project['dir']:\n print \"\\nThe application %s is inside the project directory, no need to commit separately.\" % app['name']\n else:\n with settings(hide('warnings'), warn_only=True):\n print \"\\nCommitting changes for application %s in %s.\" % (app['name'], app[env]['dir'])\n local(\"cd %s && git add . && git commit -m '%s'\" % (app[env]['dir'], message))\n if push == 'y':\n local(\"cd %s && git push\" % app[env]['dir'])\n\n with settings(hide('warnings'), warn_only=True):\n print \"\\nCommitting changes in the directory project %s.\" % project['dir']\n local(\"cd %s && git add . && git commit -m '%s'\" % (project['dir'], message))\n if push == 'y':\n local(\"cd %s && git push\" % project['dir'])\n print \"========================================================\"", "def help(update: Update, context: CallbackContext):\n context.bot.send_message(\n chat_id=update.message.chat_id,\n text=PROMPTS[\"help\"],\n reply_markup=telegram.InlineKeyboardMarkup(\n [\n [\n telegram.InlineKeyboardButton(\n \"Contribute on GitHub!\", url=\"https://github.com/iugov/s4lbot\"\n )\n ]\n ]\n ),\n parse_mode=telegram.ParseMode.MARKDOWN,\n )", "def generate_commit_documents(self, build_data, manifest_info):\n\n projects = build_data['manifest']\n invalid_project_shas = defaultdict(list)\n\n for project in projects:\n commits = dict()\n\n commit_info, invalid_shas = self.find_commits(\n project, projects[project], manifest_info\n )\n remote_info = manifest_info.get_project_remote_info(project)[1]\n\n if invalid_shas:\n # We hit a bad SHA, so pop the project and SHA onto\n # a dictionary and rebuild the build_data without\n # that specific project SHA\n invalid_project_shas[project].extend(invalid_shas)\n shas = build_data['manifest'][project]\n build_data['manifest'][project] = [\n sha for sha in shas if sha not in invalid_shas\n ]\n\n # Also send out an email to notify of an invalid SHA\n # (or SHAs) having been found\n manifest_path = build_data['manifest_path']\n manifest_sha = build_data['manifest_sha']\n\n message = {\n 'subject': f'Invalid SHA(s) found in project {project}',\n 'body': f'Found the following invalid SHA(s) in project '\n f'{project}:\\n {\", \".join(invalid_shas)}\\n\\n'\n f'from remote {remote_info}, called from '\n f'manifest {manifest_path} at SHA {manifest_sha}'\n }\n send_email(\n self.smtp_server, self.receivers.split(','), message\n )\n continue\n\n for commit in commit_info:\n commit_name = f'{project}-{commit.id.decode()}'\n logging.debug(f'Generating commit document for '\n f'commit {commit_name}')\n\n # See if commit document already is in the database\n # and extract for updating if so, otherwise create\n # a new dictionary for population\n try:\n commit_data = self.db.get_document(commit_name)\n except cbdatabase_db.NotFoundError:\n commit_data = dict(type='commit', key_=commit_name)\n\n commit_data['project'] = project\n commit_data['sha'] = commit.id.decode()\n commit_data['in_build'] = list() # Populated later\n commit_data['author'] = commit.author.decode(errors='replace')\n commit_data['committer'] = \\\n commit.committer.decode(errors='replace')\n commit_data['summary'] = \\\n commit.message.decode(errors='replace')\n commit_data['timestamp'] = commit.commit_time\n commit_data['parents'] = [\n f'{project}-{commit_id.decode()}'\n for commit_id in commit.parents\n ]\n commit_data['remote'] = remote_info\n commits[commit_name] = commit_data\n\n if commits:\n self.db.upsert_documents(commits)\n\n if invalid_project_shas:\n # We had bad project SHAs, so we need to clean up build_data\n # a bit - in particular, if we have a project in the 'manifest'\n # key with a now empty SHA list, we need to remove it entirely\n # from the key - then add the list of invalid SHAs and write\n # the build document back out with the updated information\n logging.debug(f'Invalid SHAs found: '\n f'{\", \".join(invalid_project_shas)}')\n build_name = build_data['key_']\n build_data['manifest'] = {\n project: sha for project, sha\n in build_data['manifest'].items() if sha\n }\n build_data['invalid_shas'] = invalid_project_shas\n self.db.upsert_documents({build_name: build_data})", "def remote_commit(self, data):\n scene_name = mc.file(q=True, sn=True)\n new_scene_name = mc.file(\n \"{0}_farm.ma\".format(os.path.splitext(scene_name)[0]),\n exportAll=True,\n type=\"mayaAscii\"\n )\n\n cmd = (\n \"from nwave.effects.tools.nwFenixCommitter.Controller \"\n \"import Controller\\nController.commit_asset\"\n \"(\\\"{0}\\\", \\\"{1}\\\", \\\"{2}\\\", {3}, {4})\"\n )\n cmd = cmd.format(\n data.asset.name,\n self._model.user.code,\n data.commit_text,\n data.commit_to_fx_cache,\n data.commit_to_alembic_anim\n )\n\n batch = Batch(\n PipelineHelper.getContext(),\n Settings.MUSTER_FOLDER,\n \"commit_{0}\".format(data.asset.name.replace(':', '_')),\n maya_scene_file=new_scene_name\n )\n batch.add_job(cmd)\n batch.launch()\n\n DisplayMayaDialog.displayMayaDialog(\n 'Commit on Farm',\n (\n \"The job to commit the asset {0} has created on the Farm.\"\n ).format(data.asset.name),\n severity=DisplayMayaDialog.SeverityTypes.INFORMATION\n )", "def on_commit_comment(self, payload):\n pass", "def pubsub_push():\n if flask.request.args.get('token') != _PUBSUB_VERIFICATION_TOKEN:\n return 'Unauthorized', httplib.UNAUTHORIZED\n request_body = json.loads(flask.request.data.decode('utf-8'))\n try:\n run_results_dict = _extract_run_result(request_body)\n except ValueError:\n logging.error('Request body is not JSON-encodable: %s', request_body)\n return 'Invalid request body', httplib.BAD_REQUEST\n run_results = _get_run_result_list(run_results_dict)\n\n total_items_processed = sum(\n result.get_total_count() for result in run_results)\n\n current_datetime = datetime.datetime.now(pytz.timezone(_JAPAN_TIMEZONE))\n\n jinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n template_values = {\n 'currentMonth':\n current_datetime.strftime('%B'),\n 'currentYear':\n current_datetime.strftime('%Y'),\n 'fullTimestamp':\n '%s (%s)' %\n (current_datetime.strftime('%B %d, %Y %H:%M:%S'), _JAPAN_TIMEZONE),\n 'projectId':\n _PROJECT_ID,\n 'runResults':\n run_results,\n 'totalItemsProcessed':\n total_items_processed\n }\n\n template = jinja_environment.get_template('completion_mail.html')\n html_body = template.render(template_values)\n message = mail.EmailMessage(\n sender='no-reply@{0}.appspotmail.com'.format(_PROJECT_ID),\n subject='Shopping Feed Processing Completed',\n to=_EMAIL_TO,\n html=html_body)\n message.send()\n return 'OK!', httplib.OK", "def generate_build_document(self, commit_info, manifest_info):\n\n manifest_path, commit = commit_info\n build_name = manifest_info.name\n logging.info(f'Generating build document for manifest {build_name}...')\n\n # See if build document already is in the database and extract\n # for updating if so, otherwise create a new dictionary for\n # population\n try:\n build_data = self.db.get_document(build_name)\n except cbdatabase_db.NotFoundError:\n build_data = dict(type='build', key_=build_name)\n\n projects = dict()\n\n for project_name in manifest_info.get_projects():\n project_shas = manifest_info.get_project_shas(\n project_name\n )\n projects[project_name] = [\n f'{project_name}-{sha}' for sha in project_shas\n ]\n build_data['manifest'] = projects\n build_data['invalid_shas'] = dict() # Populated (potentially) later\n\n release_keys = ('product', 'release', 'version', 'build_num')\n release_data = manifest_info.get_release_info()\n product, release, version, build_num = release_data\n build_data.update(dict(zip(release_keys, release_data)))\n\n index_key = f'{product}-{version}'\n build_data['prev_build_num'] = (\n self.prod_ver_index.get(index_key, None)\n )\n\n build_data['commits'] = list() # Populated (potentially) later\n build_data['manifest_sha'] = commit.id.decode()\n build_data['manifest_path'] = manifest_path.decode()\n build_data['timestamp'] = commit.commit_time\n build_data['download_url'] = (\n f'http://latestbuilds.service.couchbase.com/builds/latestbuilds/'\n f'{product}/{release}/{build_num}'\n )\n\n # Used for related (external) data; preserve any existing data\n build_data['metadata'] = build_data.get('metadata', dict())\n\n logging.debug(f\"Final build document: {build_data}\")\n self.db.upsert_documents({build_name: build_data})\n\n self.first_prod_ver_build = (\n True if build_data['prev_build_num'] is None else False\n )\n self.prod_ver_index[index_key] = build_num\n self.db.update_product_version_index(self.prod_ver_index)\n\n return build_data", "def sendpr(m='This is PR', b='lf-dev', h=None):\n command = 'hub pull-request -m \"%s\" -b %s' % (m,b)\n\n current_branch_cmd = shlex.split('git rev-parse --abbrev-ref HEAD')\n process = subprocess.Popen(current_branch_cmd, stdout=subprocess.PIPE)\n current_branch, err = process.communicate()\n print('current_branch', current_branch)\n if not h:\n cmd = shlex.split(command)\n else:\n command = command + '-h %s' % (h)\n cmd = shlex.split(command)\n current_branch = h\n\n cmd = shlex.split(command)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output, err = process.communicate()\n message = m + \" PR from %s @ %s reviewers @%s @%s \\n URL: %s \\n %s >>> %s\" % (DEV_NAME,\n datetime.datetime.now().strftime(\"%y-%m-%d-%H-%M\"),\n REVIEWER[0], REVIEWER[1], output , b,\n current_branch)\n data = {\n \"color\":\"green\",\n \"message\":message,\n \"notify\":True,\n \"message_format\":\"text\"\n }\n req = urllib2.Request(HIPCHAT_WEB_HOOK)\n req.add_header(\"Content-Type\", \"application/json\")\n urllib2.urlopen(req, json.dumps(data))", "async def gecg(self, ctx):\n e = discord.Embed(title=\"Here is a feet gecg for you {}.\".format(ctx.author.name), color=discord.Color.magenta())\n e.set_image(url=nekos.img('gecg'))\n await ctx.send(embed=e)" ]
[ "0.5981543", "0.5900061", "0.58865404", "0.5829465", "0.5829347", "0.58225244", "0.5794675", "0.5761677", "0.5699984", "0.56958634", "0.5648896", "0.5571014", "0.5564511", "0.5560195", "0.5509813", "0.550364", "0.54569304", "0.54245734", "0.5418751", "0.5418156", "0.53992987", "0.53991514", "0.5396564", "0.5389076", "0.5362692", "0.53408855", "0.53322023", "0.5318748", "0.5304797", "0.53013146", "0.5300616", "0.52894944", "0.52856654", "0.52489704", "0.5248247", "0.5244783", "0.52401155", "0.5230998", "0.5227903", "0.5226449", "0.5214013", "0.52085906", "0.5198381", "0.515934", "0.5146535", "0.51456165", "0.51276666", "0.5122609", "0.5119835", "0.5115252", "0.51136136", "0.5099145", "0.50928324", "0.5069773", "0.5062607", "0.50618845", "0.5060742", "0.5049265", "0.5046984", "0.5039743", "0.5021609", "0.5010011", "0.50008744", "0.49837315", "0.49834484", "0.49807778", "0.49736333", "0.496802", "0.49625516", "0.49620122", "0.49587193", "0.4954491", "0.494022", "0.49190602", "0.49161786", "0.49142504", "0.49133953", "0.49015582", "0.4898799", "0.48844293", "0.48791522", "0.48622432", "0.4862215", "0.48525375", "0.48504868", "0.48462293", "0.48422477", "0.48379752", "0.48280144", "0.48169357", "0.4812692", "0.48123264", "0.48121822", "0.4809714", "0.4807225", "0.48039398", "0.4796457", "0.47943127", "0.4788645", "0.47814184" ]
0.6393245
0
Builds and sends an embed message with issues information.
async def process_issue_hook(issue_data): project = issue_data.project issue = issue_data.issue user = issue_data.user description = "" action = "Issue updated" colour = discord.Colour.light_grey() if issue.action == "open": action = "Issue opened" description = issue.description colour = discord.Colour.green() elif issue.action == "close": action = "Issue closed" colour = discord.Colour.dark_grey() embed = discord.Embed(title=f"[{project.namespace}/{project.name}] {action}: #{issue.iid} {issue.title}" , url=issue.url, description=description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def issue(msg: telebot.types.Message) -> None:\n data = msg.text.split()\n if len(data) == 1:\n bot.send_message(\n msg.from_user.id,\n 'Use this command to tell the developer about an issue. '\n 'Example usage: `/issue I got 4 in a row but game did not end.`',\n parse_mode='Markdown'\n )\n else:\n m = ' '.join(data[1:])\n for dev_id in config.DEV_ID:\n bot.send_message(\n dev_id,\n '<b>Issue</b> from <a href=\"tg://user?id={id}\">{first_name}</a>.\\n'.format(\n first_name=msg.from_user.first_name,\n id=msg.from_user.id\n ) +\n 'ID: {id}\\n'.format(id=msg.from_user.id) +\n '<i>{message}</i>\\n'.format(message=m),\n parse_mode='HTML'\n )\n bot.reply_to(\n msg,\n 'Developer was notified. Thank you for your time.'\n )", "def send_publication_issues_message():\r\n keys = [\"description\", \"city_name\", \"concat_address\", \"geofeat_id\"]\r\n issues = sorted(\r\n arcetl.attributes.as_iters(\r\n dataset.ADDRESS_ISSUES.path(),\r\n field_names=keys,\r\n dataset_where_sql=\"update_publication = 0\",\r\n )\r\n )\r\n table_header = \"<tr>{}</tr>\".format(\r\n \"\".join(\"<th>{}</th>\".format(key) for key in keys)\r\n )\r\n row_template = \"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>\"\r\n if issues:\r\n LOG.warning(\"Found validation publication issues: sending email.\")\r\n table_rows = \"\".join(row_template.format(*issue) for issue in issues)\r\n KWARGS_ISSUES_MESSAGE[\"body\"] = KWARGS_ISSUES_MESSAGE[\"body\"].format(\r\n table_header, table_rows\r\n )\r\n send_email(**KWARGS_ISSUES_MESSAGE)\r\n else:\r\n LOG.info(\"No validation publication issues found. Not sending email.\")", "async def issues(self, ctx):\n await ctx.message.delete()\n await ctx.send(\"Issue tracker: https://github.com/TheSuperGamer20578/Sudan-bot/issues\")", "def main():\n verbose = False\n online = True\n\n if online:\n TOKEN = \"\"\n g = Github(base_url=\"https://github.ibm.com/api/v3\", login_or_token=TOKEN)\n repo = g.get_repo(\"Raphael-Lambert/test_note\")\n\n path = \"C:/Users/RaphaelLambert/Documents/git_issues\"\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n if verbose:\n print(onlyfiles)\n treated = []\n issues = []\n\n with open(join(path, 'log.txt'), 'r') as doc:\n for line in doc:\n treated.append(line.rstrip('\\n'))\n\n with open(join(path, 'issues.txt'), 'r') as doc:\n for line in doc:\n issues.append(int(line.rstrip('\\n')))\n\n for title in onlyfiles:\n if title != 'log.txt' and title != 'issues.txt' and title not in treated:\n with open(join(path, title), 'rb') as fhdl:\n raw_email = fhdl.read()\n\n parsed_eml = eml_parser.eml_parser.decode_email_b(raw_email, include_raw_body=True)\n if verbose:\n print('-----------------')\n print(title)\n print('-----------------')\n print(parsed_eml)\n print('-----------------')\n body = parsed_eml['body']\n if len(body) > 0:\n raw_text = body[0]['content']\n else:\n raw_text = \"unable to retrieve the message\"\n raw_text = link_breaker(raw_text)\n num_get = 0\n if online and title[:4] == 'Re ' and title[4:] in treated:\n cont_issue = repo.get_issue(issues[treated.index(title[4:])])\n num_get = cont_issue.number\n cont_issue.create_comment(body=raw_text)\n elif online:\n new_issue = repo.create_issue(title=\"Conversation number {}: {}\".format(len(treated), title[:10]+\"...\"),\n body=raw_text)\n if verbose:\n print(new_issue)\n num_get = new_issue.number\n treated.append(title)\n issues.append(num_get)\n\n if verbose:\n print(treated)\n\n with open(join(path, 'log.txt'), 'w') as doc:\n for title in treated:\n doc.write(title+'\\n')\n with open(join(path, 'issues.txt'), 'w') as doc:\n for title in issues:\n doc.write(str(title)+'\\n')", "def show(request):\n if _use_new_ui(request):\n return _serve_new_ui(request)\n\n patchsets = request.issue.get_patchset_info(request.user, None)\n last_patchset = first_patch = None\n if patchsets:\n last_patchset = patchsets[-1]\n if last_patchset.patches:\n first_patch = last_patchset.patches[0]\n messages = []\n generated_messages = []\n has_draft_message = False\n # Keep track of the last non-generated message.\n message_index = -1\n last_user_message_index = -1\n for msg in request.issue.messages:\n if msg.auto_generated:\n generated_messages.append(msg)\n if not msg.draft:\n messages.append(msg)\n message_index += 1\n if not msg.auto_generated:\n last_user_message_index = message_index\n elif msg.draft and request.user and msg.sender == request.user.email():\n has_draft_message = True\n num_patchsets = len(patchsets)\n\n issue = request.issue\n issue.description = cgi.escape(issue.description)\n issue.description = urlize(issue.description)\n re_string = r\"(?<=BUG=)\"\n re_string += \"(\\s*(?:[a-z0-9-]+:)?\\d+\\s*(?:,\\s*(?:[a-z0-9-]+:)?\\d+\\s*)*)\"\n expression = re.compile(re_string, re.IGNORECASE)\n issue.description = re.sub(expression, _replace_bug, issue.description)\n src_url = _map_base_url(issue.base)\n\n display_generated_msgs = False\n if request.user:\n account = models.Account.current_user_account\n display_generated_msgs = account.display_generated_msgs\n\n landed_days_ago = issue.get_time_since_landed()\n landed_days_ago = landed_days_ago.days if landed_days_ago else 'unknown'\n\n return respond(request, 'issue.html', {\n 'first_patch': first_patch,\n 'has_draft_message': has_draft_message,\n 'is_editor': request.issue.edit_allowed,\n 'issue': request.issue,\n 'last_patchset': last_patchset,\n 'messages': messages,\n 'generated_messages': generated_messages,\n 'last_user_message_index': last_user_message_index,\n 'num_patchsets': num_patchsets,\n 'patchsets': patchsets,\n 'src_url': src_url,\n 'display_generated_msgs': display_generated_msgs,\n 'offer_cq': request.issue.is_cq_available,\n 'landed_days_ago': landed_days_ago,\n })", "async def issue(ctx, number: Option(int, \"Issue number\")):\n url = f\"{repo}/issues/{number}\"\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"View Issue\", url=url))\n await ctx.respond(f\"Here's a link\", view=view)", "def handle_issue(self, evt):\n author = self.format_nickname(evt.author)\n if evt.new:\n short_url = \"https://dolp.in/i%d\" % evt.issue\n url = Tags.UnderlineBlue(short_url)\n msg = 'Issue %d created: \"%s\" by %s - %s'\n msg = msg % (evt.issue, evt.title, author, url)\n else:\n short_url = \"https://dolp.in/i%d/%d\" % (evt.issue, evt.update)\n url = Tags.UnderlineBlue(short_url)\n msg = 'Update %d to issue %d (\"%s\") by %s - %s'\n msg = msg % (evt.update, evt.issue, evt.title, author, url)\n self.bot.say(msg)", "def issues_insert(self, mar, request):\n if not mar.perms.CanUsePerm(\n permissions.CREATE_ISSUE, mar.auth.effective_ids, mar.project, []):\n raise permissions.PermissionException(\n 'The requester %s is not allowed to create issues for project %s.' %\n (mar.auth.email, mar.project_name))\n\n with work_env.WorkEnv(mar, self._services) as we:\n owner_id = None\n if request.owner and request.owner.name:\n try:\n owner_id = self._services.user.LookupUserID(\n mar.cnxn, request.owner.name)\n except exceptions.NoSuchUserException:\n raise endpoints.BadRequestException(\n 'The specified owner %s does not exist.' % request.owner.name)\n\n cc_ids = []\n request.cc = [cc for cc in request.cc if cc]\n if request.cc:\n cc_ids = list(self._services.user.LookupUserIDs(\n mar.cnxn, [ap.name for ap in request.cc],\n autocreate=True).values())\n comp_ids = api_pb2_v1_helpers.convert_component_ids(\n mar.config, request.components)\n fields_add, _, _, fields_labels, _ = (\n api_pb2_v1_helpers.convert_field_values(\n request.fieldValues, mar, self._services))\n field_helpers.ValidateCustomFields(\n mar, self._services, fields_add, mar.config, mar.errors)\n if mar.errors.AnyErrors():\n raise endpoints.BadRequestException(\n 'Invalid field values: %s' % mar.errors.custom_fields)\n\n logging.info('request.author is %r', request.author)\n reporter_id, timestamp = self.parse_imported_reporter(mar, request)\n new_issue, _ = we.CreateIssue(\n mar.project_id, request.summary, request.status, owner_id,\n cc_ids, request.labels + fields_labels, fields_add,\n comp_ids, request.description,\n blocked_on=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blockedOn, mar, self._services),\n blocking=api_pb2_v1_helpers.convert_issueref_pbs(\n request.blocking, mar, self._services),\n reporter_id=reporter_id, timestamp=timestamp,\n send_email=request.sendEmail)\n we.StarIssue(new_issue, True)\n\n return api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssuesGetInsertResponse, new_issue, mar, self._services)", "def error_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.red())\n embed.title = \"\"\n embed.description = message\n return embed", "def issue(ctx, accountable, issue_key):\n accountable.issue_key = issue_key\n if not ctx.invoked_subcommand:\n issue = accountable.issue_meta()\n headers = issue.keys()\n rows = [headers, [v for k, v in issue.items()]]\n print_table(SingleTable(rows))", "def send_issue_slack(issue_pk: str):\n issue = Issue.objects.select_related(\"client\").get(pk=issue_pk)\n text = get_text(issue)\n logging.info(\"Notifying Slack of Issue<%s>\", issue_pk)\n send_slack_message(settings.SLACK_MESSAGE.CLIENT_INTAKE, text)\n # Mark request as sent\n Issue.objects.filter(pk=issue.pk).update(is_alert_sent=True)", "def show_issue(self, msg, issue_id):\n self._asset_bind(msg)\n yield \"https://github.com/{}/issues/{}\".format(task_repository_name(), issue_id)", "async def remind_about_pull_requests(self, issues: list) -> None:\n author = deepcopy(self.blocks['author'])\n author['elements'][1]['text'] = self.version\n starting_blocks = [\n self.blocks['header'],\n author,\n self.blocks['divider'],\n ]\n message = {'blocks': deepcopy(starting_blocks)}\n tasks = []\n for issue in issues:\n pull_requests = self._create_pull_requests_descriptions(issue['pull_requests'])\n if pull_requests:\n title = deepcopy(self.blocks['title'])\n title['text']['text'] = f':bender: *[{issue[\"key\"]}] {issue[\"title\"]}*'\n message['blocks'].extend([title] + pull_requests + [self.blocks['divider']])\n\n if len(message['blocks']) > 45:\n tasks.append(asyncio.create_task(self.send_message(message)))\n message = {'blocks': deepcopy(starting_blocks)}\n\n if not tasks:\n tasks = [asyncio.create_task(self.send_message(message))]\n\n set_key_in_redis('slack-known-user-ids', self.known_user_ids)\n await asyncio.gather(*tasks)", "def report(issues, show_urls=False):\r\n # titles may have unicode in them, so we must encode everything below\r\n if show_urls:\r\n for i in issues:\r\n role = 'ghpull' if 'merged' in i else 'ghissue'\r\n print('* :%s:`%d`: %s' % (role, i['number'],\r\n i['title'].encode('utf-8')))\r\n else:\r\n for i in issues:\r\n print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))", "def get_issue_embedding(self, repo_owner, repo_name, issue_num):\n\n issue_text = get_issue_text(owner=repo_owner,\n repo=repo_name,\n num=issue_num,\n idx=None)\n data = {'title': issue_text['title'],\n 'body': issue_text['body']}\n\n # sending post request and saving response as response object\n r = requests.post(url=self.embedding_api_endpoint,\n headers={'Token': pwd_context.hash(self.embedding_api_key)},\n json=data)\n if r.status_code != 200:\n logging.warning(f'Status code is {r.status_code} not 200: '\n 'can not retrieve the embedding')\n return None\n\n embeddings = np.frombuffer(r.content, dtype='<f4')[:1600]\n return embeddings", "def cgiIssue(formFields):\n \n # open the roundup tracker configuration file\n trackerConfig = ConfigParser.ConfigParser()\n trackerConfig.read(os.path.join(TRACKER_HOME, 'config.ini'))\n \n # open the roundup database\n r_instance = roundup.instance.open(TRACKER_HOME)\n r_db = r_instance.open(TRACKER_USER)\n\n # get handles to things like priority, etc\n title = (formFields.has_key('title') and formFields['title']) or \\\n DEFAULT_TITLE\n \n priority = findNode(r_db.getclass('priority'),\n {'name':(formFields.has_key('priority') and formFields['priority']) or DEFAULT_PRIORITY})['id']\n\n application = findNode(r_db.getclass('application'),\n {'identifier': formFields['app_id'],\n 'version' : formFields['app_version']})\n\n # see if we found the app record; if so, we just want the id\n if application:\n application = application['id']\n\n platform = findNode(r_db.getclass('platform'),\n {'identifier': formFields['platform']})\n if platform is None:\n # create the new platform, assuming \n\tp_id = formFields['platform']\n platform = r_db.getclass('platform').\\\n create(identifier=p_id, supported=True)\n else:\n # just get the ID\n platform = platform['id']\n\n if WATCH_USER is not None:\n nosy = [findNode(r_db.getclass('user'),\n {'username': WATCH_USER})['id']]\n else:\n nosy = []\n\n # get a handle to a default keyword we want to assign\n if DEFAULT_KEYWORD is not None:\n topics = [findNode(r_db.getclass('keyword'),\n\t {'name':DEFAULT_KEYWORD})['id']]\n else:\n topics=[]\n\n # add any notes to the issue as a message\n messages = []\n m_class = r_db.getclass('msg')\n\n if formFields.has_key('message'):\n msgs = formFields['message']\n \n # there may be one or more messages to create\n try:\n msgs.append(None)\n del msgs[-1]\n except:\n msgs = [msgs]\n\n for m in msgs:\n messages.append(m_class.create(content=m))\n \n \n issue_id = createIssue(r_db, title, priority, application,\n platform, nosy, messages, topics)\n\n return '%sissue%s' % (trackerConfig.get('tracker', 'web'),\n issue_id)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "def reply_to_issue(msg: telebot.types.Message) -> None:\n index = msg.reply_to_message.text.index('ID') + 4\n receiver_id = int(msg.reply_to_message.text[index:index + 9])\n\n bot.send_message(\n receiver_id,\n 'Answer to your issue from {first_name}:\\n'.format(first_name=msg.from_user.first_name) +\n '<i>{message}</i>'.format(message=msg.text),\n parse_mode='HTML'\n )", "def _error_embed_helper(title: str, description: str) -> discord.Embed:\n return discord.Embed(title=title, description=description, colour=discord.Colour.red())", "async def cmd_bugger(self, args: Args, src: Src, **_):\n if self.config.get(\"trello\") is None:\n raise CommandOperationError(\n \"Sorry, the bot maintainer has not enabled Trello bug reports.\"\n )\n try:\n url = f\"https://api.trello.com/1/lists/{self.config.get('trello/list_id')}/cards\"\n params = {\n \"key\": self.config.get(\"trello/app_key\"),\n \"token\": self.config.get(\"trello/token\"),\n }\n response = requests.request(\"GET\", url, params=params)\n\n except KeyError:\n raise CommandOperationError(\n \"The Trello keys are misconfigured, check your config file\"\n )\n\n if not response:\n raise CommandOperationError(\n \"Could not get cards for the list ID provided. Talk to your bot\"\n \" owner.\"\n )\n\n ticketnumber = str(\n max(\n (\n int(card[\"name\"])\n for card in (response.json())\n if card[\"name\"].isnumeric()\n )\n )\n + 1\n )\n\n params.update(\n {\n \"name\": ticketnumber.zfill(3),\n \"desc\": (\n \"{message}\\n\\n\\n\\n\\n\"\n \"Submitted by: {author.name} ({author.id})\\n\"\n \"Timestamp: {time}\\n\"\n \"Guild: {guild.name} ({guild.id})\\n\"\n \"Channel: {channel.name} ({channel.id})\".format(\n message=\" \".join(args),\n author=src.author,\n channel=src.channel,\n guild=src.guild,\n time=dt.utcnow(),\n )\n ),\n \"pos\": \"bottom\",\n \"idList\": self.config.get(\"trello/list_id\"),\n \"username\": self.config.get(\"trello/username\"),\n }\n )\n\n response = requests.request(\n \"POST\", \"https://api.trello.com/1/cards\", params=params\n )\n\n if not response:\n raise CommandOperationError(\n \"Could not create bug report. Talk to your bot owner.\"\n )\n\n return f\"Created bug report with ID `{ticketnumber}`\"", "def issueView(context, issue):\n\n args = {\n 'item': None,\n 'group': None\n }\n \n if issue.item != None:\n item = issue.item.item\n args['item'] = item\n args['status'] = item.status.all()\n if issue.group != None:\n args['group'] = issue.group.group\n\n return render_to_string('issueView.html', args, context)", "def markdown_report(issues, commits):\n print()\n print('Handled issues:')\n print()\n\n for issue in issues:\n markdown_item(\n '#{0} {1}'.format(\n issue.number,\n issue.title,\n ),\n issue.html_url,\n )\n\n print()\n print('Commits:')\n print()\n\n for commit in commits:\n markdown_item(\n '{0} - {1}'.format(\n commit.sha[:7],\n commit.commit.message.split('\\n')[0]\n ),\n commit.html_url,\n )", "def test_issue_create_issue(self):\n pass", "def embed():", "def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)", "def raise_jira_ticket(obj,org_id):\n try:\n app_id = obj.get('app_id','') \n vul_name = obj.get('vul_name','')\n cwe = int(obj.get('cwe',0))\n project_key = obj.get('project_key','')\n issuetype = obj.get('issuetype','Bug')\n assignee = obj.get('assignee')\n app_obj = Application.objects.get(pk=app_id)\n if app_id and vul_name:\n vuls = Vulnerability.objects.filter(is_false_positive=False,is_remediated=False,scan__application=app_obj,cwe=cwe,name=vul_name)\n jira_obj = JiraIssueTypes.objects.get(org__id=org_id)\n jira = get_jira_con(jira_obj) \n if jira and vuls.exists(): \n complete_desc = ''\n references = '' \n if app_obj:\n complete_desc += 'Application:\\n{0}\\n\\n'.format(app_obj.name)\n complete_desc += 'Application URL:\\n{0}\\n\\n'.format(app_obj.url)\n if cwe:\n complete_desc += 'CWE :\\n{0}\\n\\n'.format(cwe)\n org_obj = app_obj.org\n if org_obj.orl_config_exists():\n vul_info = get_open_vul_info_from_api(cwe,org_obj)\n complete_desc += 'Description:\\n{0}\\n\\n'.format(vul_info.get('description','')) \n if references:\n complete_desc += 'References:\\n{0}'.format(references) \n data_dict = {\n 'project':{'key':project_key },\n 'issuetype':{'name': issuetype},\n 'priority':{'name': 'Highest'},\n 'summary':vul_name,\n 'description':complete_desc, \n } \n new_issue = jira.create_issue(**data_dict) \n evids = VulnerabilityEvidence.objects.filter(vul__in=vuls) \n attachment = io.StringIO()\n attachment.write('Evidences') \n for evid in evids:\n data = '\\n\\t- {0}\\n\\t\\t- {1}'.format(evid.url,evid.name)\n attachment.write(data) \n jira.add_attachment(issue=new_issue, attachment=attachment, filename='evidences.txt') \n vuls.update(jira_id=str(new_issue),jira_issue_status=str(new_issue.fields.status))\n info_debug_log(event='Raise Jira ticket',status='success')\n if assignee:\n jira.assign_issue(new_issue,assignee)\n info_debug_log(event='Assign Jira ticket to an assignee',status='success')\n except BaseException as e:\n print(\"Error raising JIRA tickets\")\n # general_error_messages.delay(path='raise_jira_ticket function',msg=log_exception(e))\n critical_debug_log(event=e,status='failure')", "def get_issue(self, context):", "def submit_feedback(self, title, description, state):\n\n body = f\"\"\"\n**User Issue**\nEmail: {self.user.email}\nUser Agent: {get_user_agent(self.request)}\n\n{description}\n\n<details>\n\n<summary>Redux state</summary>\n\n<p>\n\n```json\n{json.dumps(state, indent=2)}\n```\n\n</p>\n</details>\n \"\"\"\n\n r = requests.post(\n 'https://api.github.com/repos/alexmojaki/futurecoder/issues',\n json={'title': title,\n 'body': body,\n 'labels': ['user', 'bug']},\n headers=dict(\n Authorization='token ' + settings.GITHUB_TOKEN,\n ),\n )\n\n assert r.status_code == 201", "def embed(ctx=None, title=None, description=None, fields=None, customFooter=False, customThumbnail=None, customColor=None, image=None):\n\n e = discord.Embed(title=title, description=description)\n if customColor is None:\n e.color = color()\n else:\n e.color = color(customColor)\n \n if fields != None:\n index = 0\n # Please fix the code below, There's nothing wrong with it, it's just messy and I'm sure that's not the right way to do it.\n for field in fields:\n session = []\n for key, value in field.items():\n session.append(key)\n\n if key == \"n\":\n name = value \n \n if key == \"v\":\n xValue = value \n \n if key == \"inline\":\n inline = value \n \n if not \"inline\" in session:\n inline = False\n \n e.add_field(name=f\"{name}\", value=xValue, inline=inline)\n \n if not customFooter:\n footer(e, ctx)\n \n if image is None:\n try:\n if customThumbnail is None:\n e.set_thumbnail(url=ctx.author.avatar_url)\n else:\n e.set_thumbnail(url=customThumbnail)\n except:\n pass \n else:\n e.set_image(url=image)\n return e", "async def embed(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=message,\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)", "async def bug(self, ctx):\n await ctx.message.delete()\n await ctx.send(\"File a bug report: https://github.com/TheSuperGamer20578/Sudan-bot/issues/new?labels=Bug&template=bug_report.md\")", "def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed", "def mailissue(request):\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n issue = request.issue\n msg = _make_message(request, issue, '', '', True)\n issue.put()\n msg.put()\n\n return HttpTextResponse('OK')", "async def pr(ctx, number: Option(int, \"Pull request number\")):\n url = f\"{repo}/issues/{number}\"\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"View Pull Request\", url=url))\n await ctx.respond(f\"Here's a link\", view=view)", "async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)", "def issueCreate(request):\n args = { 'statusForm' : forms.itemStatusForm(), }\n return render_to_string('issueCreate.html', args,\n context_instance=RequestContext(request))", "async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)", "def test_issue_get_issue(self):\n pass", "def __init__(self,\n project_id='issue-label-bot-dev',\n topic_name='event_queue',\n subscription_name='subscription_for_event_queue',\n embedding_api_endpoint='https://embeddings.gh-issue-labeler.com/text'):\n # TODO(chunhsiang): change the embedding microservice to be an internal DNS of k8s service.\n # see: https://v1-12.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services\n self.project_id = project_id\n self.topic_name = topic_name\n self.subscription_name = subscription_name\n self.embedding_api_endpoint = embedding_api_endpoint\n self.embedding_api_key = os.environ['GH_ISSUE_API_KEY']\n self.app_url = os.environ['APP_URL']\n\n # init GitHub app\n github_init()\n # init pubsub subscription\n self.create_subscription_if_not_exists()", "def create_issue(self, data, **kwargs):\n raise NotImplementedError", "def create_issue(self, data, **kwargs):\n raise NotImplementedError", "def test_issue(self):\n issue = Checkmarx.Issue('a_group', 'the_name', 'http://url', 3, 'New')\n\n self.assertEqual('a group', issue.group)\n self.assertEqual('the name', issue.title)\n self.assertEqual('http://url', issue.display_url)\n self.assertEqual(3, issue.count)\n self.assertEqual('New', issue.status)", "def generate_html_mesg(info, open_quests, owner, tags):\n\n msg = '<html>' \\\n '<body style=\"font-family: Verdana; font-size: 1em; color: #000\">'\n msg += (\n \"<div style='padding: 10px; border-radius: 5px; background: #232f3e; \"\n \"color: #fff; font-weight: bold; font-size: 1.25em;'>\"\n \"Hermes Notifications\"\n \"</div>\"\n \"<div style='padding: 10px;'><p>This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.</p>\"\n \"<p>Due dates, if any, are noted with each quest.</p>\"\n \"\".format(owner)\n )\n msg += (\n \"<p>To throw an event manually, you can run the following command \"\n \"on a shell server:</p>\"\n \"<pre style='font-size: 1.2em'>$ hermes event create [event] --host \"\n \"[hostname]</pre>\"\n \"<p>Or you can visit the quests linked below.</p></div>\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"<div style='border-radius: 5px; background: #dce1e6; \"\n \"padding: 10px; margin-bottom: 10px;'>\"\n \"<span style='font-size: 1.1em; font-weight: bold'>QUEST {}</span><br/>\"\n \"<strong>CREATOR:</strong> {}<br />\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"<strong>DUE:</strong> {}<br/>\".format(quest.target_time)\n msg += \"<strong>DESC:</strong><p> \\\"{}\\\"</p>\".format(quest.description)\n msg += \"<strong>LINK:</strong> <code>{}/v1/quests/{}</code><br/>\".format(\n settings.frontend, quest_id\n )\n else:\n msg += (\n \"<div style='border-radius: 5px; background: #dce1e6; \"\n \"padding: 10px; margin-bottom: 10px;'>\"\n \"<span style='font-size: 1.1em; font-weight: bold'>Labors not \"\n \"associated with a quest:</span><br />\"\n )\n\n msg += \"<p>Machines with labors:</p>\"\n\n msg += \"<pre style='margin-left: 10px; font-size: 1.2em'>\"\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \"{} ({})\\n\".format(hostname, tags_str)\n\n msg += \"</pre></div>\"\n\n msg += \"</body>\"\n\n return msg", "def html_message_formatter(mode, name, build, results, master_status):\n result = Results[results]\n\n limit_lines = 80\n text = list()\n text.append(u'<h4>Build status: %s</h4>' % result.upper())\n text.append(u'<table cellspacing=\"10\"><tr>')\n text.append(u\"<td>Buildslave for this Build:</td><td><b>%s</b></td></tr>\" % build.getSlavename())\n if master_status.getURLForThing(build):\n text.append(u'<tr><td>Complete logs for all build steps:</td><td><a href=\"%s\">%s</a></td></tr>'\n % (master_status.getURLForThing(build),\n master_status.getURLForThing(build))\n )\n text.append(u'<tr><td>Build Reason:</td><td>%s</td></tr>' % build.getReason())\n source = u\"\"\n for ss in build.getSourceStamps():\n if ss.codebase:\n source += u'%s: ' % ss.codebase\n if ss.branch:\n source += u\"[branch %s] \" % ss.branch\n if ss.revision:\n source += ss.revision\n else:\n source += u\"HEAD\"\n if ss.patch:\n source += u\" (plus patch)\"\n if ss.patch_info: # add patch comment\n source += u\" (%s)\" % ss.patch_info[1]\n text.append(u\"<tr><td>Build Source Stamp:</td><td><b>%s</b></td></tr>\" % source)\n text.append(u\"<tr><td>Blamelist:</td><td>%s</td></tr>\" % \",\".join(build.getResponsibleUsers()))\n text.append(u'</table>')\n if ss.changes:\n text.append(u'<h4>Recent Changes:</h4>')\n for c in ss.changes:\n cd = c.asDict()\n when = datetime.datetime.fromtimestamp(cd['when'] ).ctime()\n text.append(u'<table cellspacing=\"10\">')\n text.append(u'<tr><td>Repository:</td><td>%s</td></tr>' % cd['repository'] )\n text.append(u'<tr><td>Project:</td><td>%s</td></tr>' % cd['project'] )\n text.append(u'<tr><td>Time:</td><td>%s</td></tr>' % when)\n text.append(u'<tr><td>Changed by:</td><td>%s</td></tr>' % cd['who'] )\n text.append(u'<tr><td>Comments:</td><td>%s</td></tr>' % cd['comments'] )\n text.append(u'</table>')\n files = cd['files']\n if files:\n text.append(u'<table cellspacing=\"10\"><tr><th align=\"left\">Files</th></tr>')\n for file in files:\n text.append(u'<tr><td>%s:</td></tr>' % file['name'] )\n text.append(u'</table>')\n text.append(u'<br>')\n # get all the steps in build in reversed order\n rev_steps = reversed(build.getSteps())\n # find the last step that finished\n for step in rev_steps:\n if step.isFinished():\n break\n # get logs for the last finished step\n if step.isFinished():\n logs = step.getLogs()\n # No step finished, loop just exhausted itself; so as a special case we fetch all logs\n else:\n logs = build.getLogs()\n # logs within a step are in reverse order. Search back until we find stdio\n for log in reversed(logs):\n if log.getName() == 'stdio':\n break\n name = \"%s.%s\" % (log.getStep().getName(), log.getName())\n status, dummy = log.getStep().getResults()\n content = log.getText().splitlines() # Note: can be VERY LARGE\n url = u'%s/steps/%s/logs/%s' % (master_status.getURLForThing(build),\n log.getStep().getName(),\n log.getName())\n\n text.append(u'<i>Detailed log of last build step:</i> <a href=\"%s\">%s</a>'\n % (url, url))\n text.append(u'<br>')\n text.append(u'<h4>Last %d lines of \"%s\"</h4>' % (limit_lines, name))\n unilist = list()\n for line in content[len(content)-limit_lines:]:\n unilist.append(cgi.escape(unicode(line,'utf-8')))\n text.append(u'<pre>')\n text.extend(unilist)\n text.append(u'</pre>')\n text.append(u'<br><br>')\n text.append(u'<b>-The Buildbot</b>')\n return {\n 'body': u\"\\n\".join(text),\n 'type': 'html'\n }", "def create_issue(self, group, form_data, **kwargs):\n headers = { \"X-Redmine-API-Key\": self.get_option('key', group.project),\n 'content-type': 'application/json' }\n verifySSL = self.get_option('verify_ssl', group.project)\n url = urlparse.urljoin(self.get_option('host', group.project), \"issues.json\")\n payload = {\n 'project_id': self.get_option('project_id', group.project),\n 'tracker_id': self.get_option('tracker_id', group.project),\n 'status_id': '0',\n 'subject': form_data['title'].encode('utf-8'),\n 'description': form_data['description'].encode('utf-8'),\n }\n #print >> sys.stderr, \"url:\", url\n #print >> sys.stderr, \"payload:\\n\", pformat(payload)\n #print >> sys.stderr, pformat(group)\n #print >> sys.stderr, pformat(dir(group))\n\n try:\n r = requests.post(url, data=json.dumps({'issue': payload}), headers=headers, verify=verifySSL)\n except requests.exceptions.HTTPError as e:\n raise forms.ValidationError('Unable to reach Redmine host: %s' % repr(e))\n\n try:\n data = json.loads(r.text)\n except json.JSONDecodeError as e:\n #print >> sys.stderr, \"ERROR: %s\" % e\n #print >> sys.stderr, \"RESP:\", r.text\n raise forms.ValidationError('Unable to reach Redmine host: %s' % repr(e))\n\n if not 'issue' in data or not 'id' in data['issue']:\n raise forms.ValidationError('Unable to create redmine ticket')\n\n return data['issue']['id']", "def generate_body(issue):\n markdown = \"### {}\\n\".format(issue.pop('title'))\n for k, v in issue.iteritems():\n markdown += \"- {}: {}\\n\".format(k, v)\n return markdown", "def issues(self):\n if self.pull_request.body is not None:\n regex = r\"(?<=closes: #|elated: #)\\d{5}\"\n issue_strs = re.findall(regex, self.pull_request.body)\n self.issue_nums = [eval(s) for s in issue_strs]", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "def _build_about_embed(self) -> discord.Embed:\n with self.about_aoc_filepath.open(\"r\", encoding=\"utf8\") as f:\n embed_fields = json.load(f)\n\n about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)\n about_embed.set_author(name=\"Advent of Code\", url=self._base_url)\n for field in embed_fields:\n about_embed.add_field(**field)\n\n about_embed.set_footer(text=f\"Last Updated (UTC): {datetime.utcnow()}\")\n\n return about_embed", "def error_embed(message: str, title: Optional[str] = None) -> Embed:\n title = title or random.choice(ERROR_REPLIES)\n embed = Embed(colour=Colours.soft_red, title=title)\n embed.description = message\n return embed", "def answer_issue(self, issue: int, option: int) -> etree.Element:\n issueEffect = self.shards_xml(c=\"issue\", issue=str(issue), option=str(option))[\n \"issue\"\n ]\n return issueEffect", "def post_to_github(results: List[dict]):\n\n tests_info_body = ''\n has_failed = False\n for result in results:\n if result['status'] == 'passed':\n tests_info_body += f':white_check_mark: `{result[\"command\"]}`\\n'\n else:\n has_failed = True\n tests_info_body += f':x: `{result[\"command\"]}`\\n```{result[\"output\"]}```\\n<br>'\n\n pr_body = 'Whoopsie. Looks like there are some issues with this PR. :space_invader:' if \\\n has_failed else 'This PR is good to go ! :tada:'\n\n pr_body += f'\\n\\n<details><summary><strong>Tests</strong></summary><p>\\n\\n{tests_info_body}\\n</p></details>'\n\n try:\n source_repo = '/'.join(os.getenv('CODEBUILD_SOURCE_REPO_URL')[:-4].split('/')[-2:])\n source_commit_hash = os.getenv('CODEBUILD_RESOLVED_SOURCE_VERSION')\n source_pr = int(os.getenv('CODEBUILD_WEBHOOK_PR', '0'))\n\n if source_pr > 0:\n g = Github(os.getenv('GITHUB_API_TOKEN', ''))\n repo = g.get_repo(source_repo)\n pr: PullRequest = repo.get_pull(source_pr)\n\n print(\n f'Creating review comment: '\n f'pr -> {pr.title} // '\n f'commit -> {source_commit_hash} // '\n f'has_failed -> {has_failed}'\n )\n\n pr.create_review(\n repo.get_commit(sha=source_commit_hash),\n pr_body,\n 'REQUEST_CHANGES' if has_failed else 'APPROVE'\n )\n finally:\n if has_failed:\n print('Test(s) failed.')\n exit(1)", "def _make_message(request, issue, message, comments=None, send_mail=False,\n draft=None, in_reply_to=None):\n attach_patch = request.POST.get(\"attach_patch\") == \"yes\"\n template, context = _get_mail_template(request, issue, full_diff=attach_patch)\n # Decide who should receive mail\n my_email = db.Email(request.user.email())\n to = ([db.Email(issue.owner.email())] +\n issue.reviewers +\n [db.Email(email) for email in issue.collaborator_emails()])\n cc = issue.cc[:]\n if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:\n cc.append(db.Email(django_settings.RIETVELD_INCOMING_MAIL_ADDRESS))\n reply_to = to + cc\n if my_email in to and len(to) > 1: # send_mail() wants a non-empty to list\n to.remove(my_email)\n if my_email in cc:\n cc.remove(my_email)\n issue_id = issue.key.id()\n subject = issue.mail_subject()\n patch = None\n if attach_patch:\n subject = 'PATCH: ' + subject\n if 'patch' in context:\n patch = context['patch']\n del context['patch']\n if issue.num_messages:\n subject = 'Re: ' + subject\n if comments:\n details = _get_draft_details(request, comments)\n else:\n details = ''\n message = message.replace('\\r\\n', '\\n')\n text = ((message.strip() + '\\n\\n' + details.strip())).strip()\n if draft is None:\n msg = models.Message(issue_key=issue.key,\n subject=subject,\n sender=my_email,\n recipients=reply_to,\n text=text,\n parent=issue.key,\n issue_was_closed=issue.closed)\n else:\n msg = draft\n msg.subject = subject\n msg.recipients = reply_to\n msg.text = text\n msg.draft = False\n msg.date = datetime.datetime.now()\n msg.issue_was_closed = issue.closed\n issue.calculate_updates_for(msg)\n\n if in_reply_to:\n try:\n replied_msg_id = int(in_reply_to)\n replied_msg = models.Message.get_by_id(replied_msg_id, parent=issue.key)\n msg.in_reply_to_key = replied_msg.key\n replied_issue_id = replied_msg.issue_key.id()\n if replied_issue_id != issue_id:\n logging.warn('In-reply-to Message is for a different issue: '\n '%s instead of %s', replied_issue_id, issue_id)\n msg.in_reply_to_key = None\n except (db.KindError, db.BadKeyError, ValueError):\n logging.warn('Invalid in-reply-to Message or key given: %s', in_reply_to)\n\n if send_mail:\n # Limit the list of files in the email to approximately 200\n if 'files' in context and len(context['files']) > 210:\n num_trimmed = len(context['files']) - 200\n del context['files'][200:]\n context['files'].append('[[ %d additional files ]]' % num_trimmed)\n url = request.build_absolute_uri(reverse(show, args=[issue.key.id()]))\n reviewer_nicknames = ', '.join(library.get_nickname(rev_temp, True,\n request)\n for rev_temp in issue.reviewers)\n cc_nicknames = ', '.join(library.get_nickname(cc_temp, True, request)\n for cc_temp in cc)\n my_nickname = library.get_nickname(request.user, True, request)\n reply_to = ', '.join(reply_to)\n description = (issue.description or '').replace('\\r\\n', '\\n')\n home = request.build_absolute_uri(reverse(index))\n modified_added_count, modified_removed_count = _get_modified_counts(issue)\n context.update({'reviewer_nicknames': reviewer_nicknames,\n 'cc_nicknames': cc_nicknames,\n 'my_nickname': my_nickname, 'url': url,\n 'message': message, 'details': details,\n 'description': description, 'home': home,\n 'added_lines' : modified_added_count,\n 'removed_lines': modified_removed_count,\n })\n for key, value in context.iteritems():\n if isinstance(value, str):\n try:\n encoding.force_unicode(value)\n except UnicodeDecodeError:\n logging.error('Key %s is not valid unicode. value: %r' % (key, value))\n # The content failed to be decoded as utf-8. Enforce it as ASCII.\n context[key] = value.decode('ascii', 'replace')\n body = django.template.loader.render_to_string(\n template, context, context_instance=RequestContext(request))\n logging.warn('Mail: to=%s; cc=%s', ', '.join(to), ', '.join(cc))\n send_args = {'sender': my_email,\n 'to': [_encode_safely(address) for address in to],\n 'subject': _encode_safely(subject),\n 'body': _encode_safely(body),\n 'reply_to': _encode_safely(reply_to)}\n if cc:\n send_args['cc'] = [_encode_safely(address) for address in cc]\n if patch:\n send_args['attachments'] = [('issue_%s_patch.diff' % issue.key.id(),\n patch)]\n\n attempts = 0\n while True:\n try:\n mail.send_mail(**send_args)\n break\n except mail.InvalidSenderError:\n if django_settings.RIETVELD_INCOMING_MAIL_ADDRESS:\n previous_sender = send_args['sender']\n if previous_sender not in send_args['to']:\n send_args['to'].append(previous_sender)\n send_args['sender'] = django_settings.RIETVELD_INCOMING_MAIL_ADDRESS\n else:\n raise\n except apiproxy_errors.DeadlineExceededError:\n # apiproxy_errors.DeadlineExceededError is raised when the\n # deadline of an API call is reached (e.g. for mail it's\n # something about 5 seconds). It's not the same as the lethal\n # runtime.DeadlineExeededError.\n attempts += 1\n if attempts >= 3:\n raise\n if attempts:\n logging.warning(\"Retried sending email %s times\", attempts)\n\n return msg", "def detect_issues(ctx, sha, message):\n\n if sha and message:\n raise click.UsageError('Use either --sha or --message, not both.')\n\n if not sha and not message:\n raise click.UsageError('Must specify either --sha or --message.')\n\n try:\n log.echo('Detecting issue...', break_line=False)\n issues = ctx.obj.github.detect_issues(sha=sha, message=message)\n log.checkmark()\n for issue in issues:\n log.echo('Issue detected: {}'.format(issue.url))\n except BaseException as _:\n log.xmark()\n raise", "def deliver_issue(self, absolute_path: str):\n settings = self.config[\"DEFAULT\"]\n subject = \"News Assistant New Issue\"\n\n host = settings[\"host\"]\n port = settings[\"port\"]\n email = settings[\"email\"]\n password = settings[\"password\"]\n kindle_email = settings[\"kindle_email\"]\n\n message = MIMEMultipart()\n message[\"From\"] = email\n message[\"To\"] = kindle_email\n message[\"Subject\"] = subject\n\n # Add body to email\n body = \"This email was generated automatically and meant to be \\\nreceived by a kindle email address.\"\n message.attach(MIMEText(body, \"plain\"))\n\n # Convert epub to mobi\n mobi_attachment = self.create_book_attachment(absolute_path)\n\n # Add attachment and convert to string\n message.attach(mobi_attachment)\n text = message.as_string()\n\n context = ssl.create_default_context()\n if int(port) == 465:\n # Use SSL\n with smtplib.SMTP_SSL(host, port, context=context) as server:\n server.login(email, password)\n server.sendmail(email, kindle_email, text)\n else:\n # Otherwise use TLS\n with smtplib.SMTP(host, port) as server:\n server.starttls(context=context)\n server.login(email, password)\n server.sendmail(email, kindle_email, text)", "def build_notification(issue, statuses_by_importance):\n notification = { 'title':\"\", 'body':\"\", 'urgency':\"\" }\n\n notification['title'] = issue.project['name']\n notification['body'] = '#' + str(issue.id) + ': ' + issue.subject + '\\n' + '<i><b>' + \\\n issue.priority['name'] + '</b> - ' + issue.status['name'] + '</i>'\n\n if issue.status['id'] in statuses_by_importance['critical_statuses']:\n notification['urgency']='critical'\n elif issue.status['id'] in statuses_by_importance['unimportant_statuses']:\n notification['urgency']='low'\n else:\n notification['urgency']='normal'\n\n return notification", "def issues_list(self, mar, request):\n if request.additionalProject:\n for project_name in request.additionalProject:\n project = self._services.project.GetProjectByName(\n mar.cnxn, project_name)\n if project and not permissions.UserCanViewProject(\n mar.auth.user_pb, mar.auth.effective_ids, project):\n raise permissions.PermissionException(\n 'The user %s has no permission for project %s' %\n (mar.auth.email, project_name))\n url_params = [(name, mar.GetParam(name)) for name in\n framework_helpers.RECOGNIZED_PARAMS]\n # TODO(jrobbins): This should go through work_env.\n pipeline = frontendsearchpipeline.FrontendSearchPipeline(\n mar.cnxn, self._services, mar.auth, [mar.me_user_id], mar.query,\n mar.query_project_names, mar.num, mar.start, url_params, mar.can,\n mar.group_by_spec, mar.sort_spec, mar.warnings, mar.errors,\n mar.use_cached_searches, mar.profiler, display_mode=mar.mode,\n project=mar.project)\n if not mar.errors.AnyErrors():\n pipeline.SearchForIIDs()\n pipeline.MergeAndSortIssues()\n pipeline.Paginate()\n else:\n raise endpoints.BadRequestException(mar.errors.query)\n\n issue_list = [\n api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssueWrapper, r, mar, self._services)\n for r in pipeline.visible_results]\n return api_pb2_v1.IssuesListResponse(\n kind='monorail#issueList',\n totalResults=pipeline.total_count,\n items=issue_list)", "def issue_details(board, issue_number):\n issue = BoardIssue(board, issue_number)\n return issue.details()", "def _create_issue(*, image: str, repo: str, run: str, stacktrace: str) -> Issue:\n title = f\"Automatic error report from {repo}\"\n body = _report_body(image=image, repo=repo, run=run, stacktrace=stacktrace)\n return TAGBOT_ISSUES_REPO.create_issue(title, body)", "async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)", "def test_issue_list_issues(self):\n pass", "def slackbuild_pubsub(data, context):\n global config\n global slack\n\n print(data)\n print(context)\n\n build, template = BuildStatus.toMessage(data, config)\n\n msg = slack.render_message(build, template)\n\n return slack.post_message(msg)", "def _get_error_embed(self, title: str, body: str) -> Embed:\n return Embed(\n title=title,\n colour=Colours.soft_red,\n description=body\n )", "def do_jira_case_commit_message(self, arg):\n cases = [(issue.key, issue.fields.summary, self.jira_url() + \"/browse/\" + issue.key) for issue in self.get_open_issues()]\n msg = \"\"\"\n--------------------------------------------------------------------\n[{}] {}\n \n<msg>\n \n{}\n-------------------------------------------------------------------- \n \"\"\"\n for case in cases:\n print(msg.format(case[0], case[1], case[2]))", "def api_issue(request):\n messages = request.GET.get('messages', 'false').lower() == 'true'\n values = _issue_as_dict(request.issue, messages, request)\n return values", "def api_issue(request):\n messages = request.GET.get('messages', 'false').lower() == 'true'\n values = _issue_as_dict(request.issue, messages, request)\n return values", "async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)", "async def about(self, ctx):\n embed = Embed(color=self.bot.main_color, timestamp=datetime.utcnow())\n embed.set_author(\n name=\"Modmail - About\",\n icon_url=self.bot.user.avatar_url,\n url=\"https://discord.gg/F34cRU8\",\n )\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n\n desc = \"This is an open source Discord bot that serves as a means for \"\n desc += \"members to easily communicate with server administrators in \"\n desc += \"an organised manner.\"\n embed.description = desc\n\n embed.add_field(name=\"Uptime\", value=self.bot.uptime)\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency * 1000:.2f} ms\")\n embed.add_field(name=\"Version\", value=f\"`{self.bot.version}`\")\n embed.add_field(name=\"Author\", value=\"[`kyb3r`](https://github.com/kyb3r)\")\n\n changelog = await Changelog.from_url(self.bot)\n latest = changelog.latest_version\n\n if parse_version(self.bot.version) < parse_version(latest.version):\n footer = f\"A newer version is available v{latest.version}\"\n else:\n footer = \"You are up to date with the latest version.\"\n\n embed.add_field(\n name=\"GitHub\", value=\"https://github.com/kyb3r/modmail\", inline=False\n )\n\n embed.add_field(\n name=\"Discord Server\", value=\"https://discord.gg/F34cRU8\", inline=False\n )\n\n embed.add_field(\n name=\"Donate\",\n value=\"Support this bot on [`Patreon`](https://patreon.com/kyber).\",\n )\n\n embed.set_footer(text=footer)\n await ctx.send(embed=embed)", "def discord_webhook(title, url, thumbnail, sizes):\n fields = []\n for size in sizes:\n fields.append({\"name\": size, \"value\": \"Available\", \"inline\": True})\n\n data = {\n \"username\": CONFIG['USERNAME'],\n \"avatar_url\": CONFIG['AVATAR_URL'],\n \"embeds\": [{\n \"title\": title,\n \"url\": CONFIG['URL'].replace('.json', '/') + url, \n \"thumbnail\": {\"url\": thumbnail},\n \"fields\": fields,\n \"color\": int(CONFIG['COLOUR']),\n \"footer\": {\"text\": \"Made by Yasser\"},\n \"timestamp\": str(datetime.utcnow()),\n }]\n }\n\n result = rq.post(CONFIG['WEBHOOK'], data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n result.raise_for_status()\n except rq.exceptions.HTTPError as err:\n logging.error(err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(result.status_code))\n logging.info(msg=\"Payload delivered successfully, code {}.\".format(result.status_code))", "def build_issue_tracker_url(issue_id):\n issue_tracker_tmpl = settings.ISSUE_TRACKER_BUG_URL_TMPL\n url_tmpl = issue_tracker_tmpl if issue_tracker_tmpl else 'http://issue/%s'\n return url_tmpl % issue_id", "def create_issues(repo, title, body, verbose=None):\n label = get_label(repo, title)\n if not label:\n err = \"A label embedded in parentheses is currently required. For \" \\\n \"example 'Title of Error (title_tag).' You provided: {0}\"\n raise NotImplementedError(err.format(title))\n # get stdout written to file\n with open(body) as fi:\n issues = fi.readlines()\n fi.close()\n # Handle empty body\n if not issues:\n raise RuntimeWarning(\"The body text is empty and no issue will be \"\n \"created for file: {}.\".format(body))\n # Handle multiline error messages.\n if 'Traceback' in ''.join(issues):\n if verbose:\n print \"Issue is a Traceback...\"\n string = \"\".join(issues)\n sha = hashlib.sha1(string).hexdigest()[0:6]\n error = dict(experiment_site_id=\"Traceback:{}\".format(sha),\n error=\"Traceback\",\n message=string)\n issues = [json.dumps(error, sort_keys=True)]\n for issue in issues:\n # Check for new format\n try:\n issue_dict = json.loads(issue)\n issue_dict.update({'title': get_valid_title(title)})\n error_msg = issue_dict.get('error')\n experiment_site_id = issue_dict.get('experiment_site_id')\n subject = \"{}, {}\".format(experiment_site_id, error_msg)\n body = generate_body(issue_dict)\n except:\n if verbose:\n print(\"Falling back to old issue formatting.\")\n # Old error handling approach.\n # Create a unique id.\n sha1 = hashlib.sha1(issue).hexdigest()[0:6]\n subject_base = title[0:title.index(' (')]\n subject = subject_base + \": {0}\".format(sha1)\n body = issue\n if is_open_issue(repo, subject, verbose=verbose):\n pass\n else:\n try:\n github_issue = repo.create_issue(subject, body=body, labels=label)\n except Exception as e:\n print 'Failed to create_issue with title:{0}, body:{1} and label:{2}, \\\n exception: {3}'.format(subject, body, label, str(e))\n if verbose:\n print \"Created issue... See: {0}\".format(github_issue.url)\n return None", "def reply_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.blurple())\n embed.title = \"\"\n embed.description = message\n return embed", "def test_zendesk_comment_and_resolve_ticket_command_closes_the_issue(\n post_message,\n create_ticket,\n close_ticket,\n get_ticket,\n add_comment,\n resolve_command,\n log,\n db\n):\n slack_client = MagicMock()\n zendesk_client = MagicMock()\n workspace_uri = 'https://s.l.a.c.k'\n zendesk_uri = 'https://z.e.n.d.e.s.k'\n user_id = '100000000004'\n group_id = '200000000005'\n\n slack_client.users_info.return_value = FakeUserResponse()\n get_ticket.return_value = None\n ticket = FakeTicket(ticket_id='77')\n create_ticket.return_value = ticket\n assert ZenSlackChat.objects.count() == 0\n\n def handle_message(payload):\n is_handled = handler(\n payload,\n our_channel='C0192NP3TFG',\n workspace_uri=workspace_uri,\n zendesk_uri=zendesk_uri,\n slack_client=slack_client,\n zendesk_client=zendesk_client,\n user_id=user_id,\n group_id=group_id,\n )\n assert is_handled is True\n\n # Create an issue\n #\n handle_message({\n 'channel': 'C0192NP3TFG',\n 'event_ts': '1602064330.001600',\n 'text': 'My 🖨 is on 🔥',\n 'ts': '1602064330.001600',\n 'user': 'UGF7MRWMS',\n })\n\n # There should now be one instance here:\n assert ZenSlackChat.objects.count() == 1\n assert len(ZenSlackChat.open_issues()) == 1\n\n # Verify what the stored issue should look like:\n issue = ZenSlackChat.get('C0192NP3TFG', '1602064330.001600')\n assert issue.active is True\n assert issue.opened is not None\n assert issue.closed is None\n assert issue.channel_id == 'C0192NP3TFG'\n assert issue.chat_id == '1602064330.001600'\n assert issue.ticket_id == '77'\n\n # Check a new comment is sent over to zendesk:\n #\n create_ticket.reset_mock()\n post_message.reset_mock()\n\n # Return the fake ticket instance this time\n get_ticket.return_value = ticket\n\n handle_message({\n 'channel': 'C0192NP3TFG',\n 'event_ts': '1602064330.001600',\n 'text': 'No wait, it was just a blinking red light',\n # This is a reply message so thread_ts refers to the parent chat id:\n 'thread_ts': issue.chat_id,\n # and the ts refers to the reply message id:\n 'ts': '1602065965.003200',\n 'user': 'UGF7MRWMS',\n })\n assert ZenSlackChat.objects.count() == 1\n assert len(ZenSlackChat.open_issues()) == 1\n\n # None of test should have changed yet:\n issue = ZenSlackChat.get('C0192NP3TFG', '1602064330.001600')\n assert issue.active is True\n assert issue.opened is not None\n assert issue.closed is None\n assert issue.channel_id == 'C0192NP3TFG'\n assert issue.chat_id == '1602064330.001600'\n assert issue.ticket_id == '77'\n\n # No ticket should be created here\n create_ticket.assert_not_called()\n\n # Check the comment was \"sent\" to Zendesk correctly:\n add_comment.assert_called_with(\n zendesk_client,\n ticket,\n \"Bob Sprocket (Slack): No wait, it was just a blinking red light\"\n )\n\n # No slack message should have been sent:\n post_message.assert_not_called()\n\n # Resolve the issue:\n #\n create_ticket.reset_mock()\n post_message.reset_mock()\n add_comment.reset_mock()\n\n handle_message({\n 'channel': 'C0192NP3TFG',\n 'event_ts': '1602064330.001600',\n 'text': resolve_command,\n # This is a reply message so thread_ts refers to the parent chat id\n 'thread_ts': '1602064330.001600',\n 'ts': '1602065965.003200',\n 'user': 'UGF7MRWMS',\n })\n\n # There should now be one instance here:\n assert ZenSlackChat.objects.count() == 1\n assert len(ZenSlackChat.open_issues()) == 0\n\n # Verify what the stored issue should look like:\n issue = ZenSlackChat.get('C0192NP3TFG', '1602064330.001600')\n assert issue.active is False\n assert issue.opened is not None\n assert issue.closed is not None\n assert issue.channel_id == 'C0192NP3TFG'\n assert issue.chat_id == '1602064330.001600'\n assert issue.ticket_id == '77'\n\n slack_client.users_info.assert_called_with(user='UGF7MRWMS')\n create_ticket.assert_not_called()\n add_comment.assert_not_called()\n\n # Check the message that should go to slack closing the issue:\n url = f'https://z.e.n.d.e.s.k/{ticket.id}'\n post_message.assert_called_with(\n slack_client,\n '1602064330.001600',\n 'C0192NP3TFG',\n f'🤖 Understood. Ticket {url} has been closed.'\n )", "def test_api_can_request_issues(self):\n payload = request_github_issues('razat249', 'github-view')\n self.assertEqual(payload['error'], False)\n self.assertLess(payload['status_code'], 400)", "def _generate_issue(self, run, entry, callablesCount):\n\n trace_frames = []\n\n for p in entry[\"preconditions\"]:\n tf = self._generate_issue_traces(TraceKind.PRECONDITION, run, entry, p)\n trace_frames.append(tf)\n\n for p in entry[\"postconditions\"]:\n tf = self._generate_issue_traces(TraceKind.POSTCONDITION, run, entry, p)\n trace_frames.append(tf)\n\n features = set()\n for f in entry[\"features\"]:\n features.update(self._generate_issue_feature_contents(entry, f))\n\n callable = entry[\"callable\"]\n handle = self._get_issue_handle(entry)\n initial_sources = {\n self._get_shared_text(SharedTextKind.SOURCE, kind)\n for (_name, kind, _depth) in entry[\"initial_sources\"]\n }\n final_sinks = {\n self._get_shared_text(SharedTextKind.SINK, kind)\n for (_name, kind, _depth) in entry[\"final_sinks\"]\n }\n\n source_details = {\n self._get_shared_text(SharedTextKind.SOURCE_DETAIL, name)\n for (name, _kind, _depth) in entry[\"initial_sources\"]\n if name\n }\n sink_details = {\n self._get_shared_text(SharedTextKind.SINK_DETAIL, name)\n for (name, _kind, _depth) in entry[\"final_sinks\"]\n if name\n }\n\n issue = Issue.Record(\n id=IssueDBID(),\n code=entry[\"code\"],\n handle=handle,\n status=IssueStatus.UNCATEGORIZED,\n first_seen=run.date,\n run_id=run.id,\n )\n\n self.graph.add_issue(issue)\n\n fix_info = None\n fix_info_id = None\n if entry.get(\"fix_info\") is not None:\n fix_info = IssueInstanceFixInfo.Record(\n id=DBID(), fix_info=json.dumps(entry[\"fix_info\"])\n )\n fix_info_id = fix_info.id\n\n message = self._get_shared_text(SharedTextKind.MESSAGE, entry[\"message\"])\n filename_record = self._get_shared_text(\n SharedTextKind.FILENAME, entry[\"filename\"]\n )\n callable_record = self._get_shared_text(SharedTextKind.CALLABLE, callable)\n\n instance = IssueInstance.Record(\n id=DBID(),\n issue_id=issue.id,\n location=self.get_location(entry),\n filename_id=filename_record.id,\n callable_id=callable_record.id,\n run_id=run.id,\n fix_info_id=fix_info_id,\n message_id=message.id,\n rank=0,\n min_trace_length_to_sources=self._get_minimum_trace_length(\n entry[\"postconditions\"]\n ),\n min_trace_length_to_sinks=self._get_minimum_trace_length(\n entry[\"preconditions\"]\n ),\n callable_count=callablesCount[callable],\n )\n\n for sink in final_sinks:\n self.graph.add_issue_instance_shared_text_assoc(instance, sink)\n for detail in sink_details:\n self.graph.add_issue_instance_shared_text_assoc(instance, detail)\n for source in initial_sources:\n self.graph.add_issue_instance_shared_text_assoc(instance, source)\n for detail in source_details:\n self.graph.add_issue_instance_shared_text_assoc(instance, detail)\n\n if fix_info is not None:\n self.graph.add_issue_instance_fix_info(instance, fix_info)\n\n for trace_frame in trace_frames:\n self.graph.add_issue_instance_trace_frame_assoc(instance, trace_frame)\n\n for feature in features:\n feature = self._get_shared_text(SharedTextKind.FEATURE, feature)\n self.graph.add_issue_instance_shared_text_assoc(instance, feature)\n\n self.graph.add_issue_instance(instance)", "def _get_error_embed(self, title: str, body: str) -> Embed:\n return Embed(\n title=title,\n colour=0xFF0000,\n description=body,\n )", "def on_issue(self, payload):\n pass", "def _issue_as_dict(issue, messages, request=None):\n landed_days_ago = issue.get_time_since_landed()\n landed_days_ago = landed_days_ago.days if landed_days_ago else 'unknown'\n values = {\n 'offer_cq': issue.is_cq_available,\n 'owner': library.get_nickname(issue.owner, True, request),\n 'owner_email': issue.owner.email(),\n 'is_editor': issue.edit_allowed,\n 'modified': str(issue.modified),\n 'created': str(issue.created),\n 'closed': issue.closed,\n 'cc': issue.cc,\n 'reviewers': issue.reviewers,\n 'required_reviewers': issue.required_reviewers,\n 'all_required_reviewers_approved': issue.all_required_reviewers_approved,\n 'patchsets': [key.id() for key in issue.patchsets.iter(keys_only=True)],\n 'description': issue.description,\n 'subject': issue.subject,\n 'project': issue.project,\n 'issue': issue.key.id(),\n 'base_url': issue.base,\n 'target_ref': issue.target_ref,\n 'private': issue.private,\n 'commit': issue.commit,\n 'cq_dry_run': issue.cq_dry_run,\n 'cq_dry_run_last_triggered_by': issue.cq_dry_run_last_triggered_by,\n 'landed_days_ago': landed_days_ago,\n }\n if messages:\n values['messages'] = sorted(\n ({\n 'sender': m.sender,\n 'recipients': m.recipients,\n 'date': str(m.date),\n 'text': m.text,\n 'approval': m.approval,\n 'disapproval': m.disapproval,\n 'auto_generated': m.auto_generated,\n 'issue_was_closed': m.issue_was_closed,\n 'patchset': m.patchset_key.id() if m.patchset_key else None,\n }\n for m in models.Message.query(ancestor=issue.key)),\n key=lambda x: x['date'])\n return values", "def report_bug(self) -> str:\n\n width = os.get_terminal_size().columns\n title = \"YIKES! There's a bug!\".center(width, \"-\")\n title += (\n \"If you are seeing this, then there is something wrong with \"\n \"Miroslava. Please report this issue here: 'https://github.com/\"\n \"kaamiki/miroslava/issues/new' so that we can fix it at the \"\n \"earliest. It would be a great help if you provide the steps, \"\n \"traceback information or even a code sample for reproducing this \"\n \"bug while submitting an issue.\"\n )\n return textwrap.fill(title, width)", "async def process_push_hook(push: models.PushHook):\n repository = push.repository\n project = push.project\n commit_str = \"commit\" if push.total_commits_count == 1 else \"commits\"\n # Show link to commit compare if there's more than one commit\n if push.total_commits_count > 1:\n embed_url = f\"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}\"\n else:\n embed_url = f\"{repository.homepage}/commit/{push.after[:7]}\"\n\n if push.before == EMPTY_COMMIT:\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}] New branch created {push.branch}\",\n url=embed_url, colour=discord.Colour.light_grey())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)\n elif push.after == EMPTY_COMMIT:\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}] Branch deleted {push.branch}\",\n url=embed_url, colour=discord.Colour.light_grey())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)\n\n # If there are no commits, do not show a message\n if not push.total_commits_count:\n return\n\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}:{push.branch}] \"\n f\"{push.total_commits_count} new {commit_str}\",\n url=embed_url, colour=discord.Colour.blurple())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n embed.description = \"\"\n for commit in push.commits:\n message = commit.message.splitlines()[0]\n embed.description += f\"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\\n\"\n print(\"Sending push message\")\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)", "def whatsnew(self, msg, args):\n self._asset_bind(msg)\n client = self._github_operator(msg)\n cmd = \"repo:{} label:welcome is:open type:issue\".format(\n task_repository_name())\n issue_list = client.search_issue(cmd, 10)\n result = limit_result(\n [\"{}: {}\".format(i.number, i.title)\n for i in issue_list])\n yield \"\\n\".join(result)", "def issue_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-a\", \"--assignees\", default=[], nargs=\"*\", help=\"users to assign to this issue\"\n )\n parser.add_argument(\"-b\", \"--body\", default=None, help=\"text body of the issue\")\n parser.add_argument(\n \"-c\",\n \"--column\",\n default=DEFAULT_COLUMN_NAME,\n help=\"name of column to place card in\",\n )\n parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=DEFAULT_COLUMN_NAME,\n help=\"Edit issue title and body in vim\",\n )\n parser.add_argument(\n \"-l\", \"--labels\", default=None, nargs=\"*\", help=\"labels to add to the new issue\"\n )\n parser.add_argument(\n \"-m\",\n \"--milestone\",\n default=None,\n help=\"milestone id to place this issue in. \"\n \"This should be an integer. \"\n \"Find milestone ids with the `milestones` command.\",\n )\n parser.add_argument(\n \"-p\", \"--project\", default=SCRUM_BOARD_NAME, help=\"project to create issue in\"\n )\n parser.add_argument(\"title\", default=None, nargs=\"?\", help=\"issue title\")\n\n args = parser.parse_args()\n\n # only required arg for creating an issue. can be overridden in interactive mode\n title = args.title\n\n # this can be overridden in interactive mode\n body = args.body\n\n if args.interactive:\n with tempfile.NamedTemporaryFile(\"w\") as fh:\n path = fh.name\n\n editor = os.environ.get(\"EDITOR\", os.environ.get(\"VISUAL\", \"vi\"))\n\n proc = getattr(sh, editor)\n\n proc(path, _fg=True)\n\n with open(path, \"r\") as rfh:\n\n # grab top line as title\n title = rfh.readline().replace(\"\\n\", \"\")\n\n # grab remaining lines as body\n body = \"\".join(rfh.readlines())\n\n session = GithubSession()\n\n additional_args = {\n \"assignees\": args.assignees,\n \"body\": body,\n \"labels\": args.labels,\n \"milestone\": args.milestone,\n }\n\n issue = session.create_issue(title, **additional_args)\n\n column_name = args.column\n project_name = args.project\n\n project = session.get_project(project_name)\n column = session.get_column(project, column_name)\n\n # finally, create the card\n session.create_card(column, issue)\n\n print(json.dumps(issue, indent=2))", "async def info(self, ctx):\n python = sys.version_info\n\n start = datetime.now()\n await ctx.trigger_typing()\n end = datetime.now()\n\n process = psutil.Process()\n\n embed = discord.Embed(title='Info',\n color=self.bot.color)\n embed.add_field(name='Latest Changelog',\n value='Restructured the project.',\n inline=False)\n embed.add_field(name='Creator',\n value='\\n'.join(self.bot.get_user(owner).mention for owner in self.bot.owner_ids))\n embed.add_field(name='Created on',\n value=f'{self.bot.created_on.strftime(\"%m/%d/%Y\")}\\n'\n f'(~{timeago.format(self.bot.created_on, datetime.utcnow())})')\n embed.add_field(name='Made With',\n value=f'[Python {python.major}.{python.minor}.{python.micro}](https://www.python.org/)\\n'\n f'[discord.py {discord.__version__}](https://discordpy.readthedocs.io/en/latest/)')\n embed.add_field(name='Status',\n value=f'Ping: {(end - start).total_seconds() * 1000:.2f}ms\\n'\n f'CPU: {process.cpu_percent()}%\\n'\n f'RAM: {process.memory_info().rss / 1048576:.2f}MB') # bits to bytes\n embed.add_field(name='Uptime',\n value='Online since:\\n'\n f'{self.bot.uptime.strftime(\"%m/%d/%Y %H:%M UTC\")}\\n'\n f'(~{timeago.format(self.bot.uptime, datetime.utcnow())})')\n embed.add_field(name='Statistics',\n value=f'Commands Run: {1003}\\n'\n f'Guilds: {len(list(self.bot.guilds))}\\n'\n f'Users: {len(list(self.bot.get_all_members()))} '\n f'(Unique: {len(set(self.bot.get_all_members()))})')\n embed.add_field(name='Acknowledgements',\n value='<@113104128783159296> - Answering a lot of questions I had, couldn\\'t have done it with you!\\n'\n '`[RKN]` - Testing! thanks guys :)',\n inline=False)\n\n await ctx.send(embed=embed)", "async def make_rules_embed(guild: discord.Guild, team: str, rules: str) -> discord.Embed:\n warning = _(\n \"***Violating [Discord Terms of Service](https://discordapp.com/terms) \"\n \"or [Community Guidelines](https://discordapp.com/guidelines) will \"\n \"result in an immediate ban. You may also be reported to Discord.***\"\n )\n em = discord.Embed(colour=int(TEAMS[team][\"home\"].replace(\"#\", \"\"), 16))\n em.description = rules\n em.title = _(\"__RULES__\")\n em.add_field(name=_(\"__**WARNING**__\"), value=warning)\n em.set_thumbnail(url=str(guild.icon_url))\n em.set_author(name=guild.name, icon_url=str(guild.icon_url))\n return em", "def generate_plain_mesg(info, open_quests, owner, tags):\n\n msg = (\n \"This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.\\n\\n\"\n \"Due dates, if any, are noted with each quest.\\n\".format(owner)\n )\n msg += (\n \"\\nTo throw an event manually, you can run the following command \"\n \"on a shell server:\"\n \"\\n\\n\"\n \"$ hermes event create [event] --host [hostname].\\n\\n\"\n \"Or you can visit the quests linked below.\\n\\n\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"==[ QUEST {} ]================================\\n\"\n \"CREATOR: {}\\n\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"DUE: {}\\n\".format(quest.target_time)\n msg += \"DESC: \\\"{}\\\"\\n\".format(textwrap.fill(\n quest.description,\n width=60, subsequent_indent=\"\"\n ))\n msg += \"LINK: {}/v1/quests/{}\\n\\n\".format(\n settings.frontend, quest_id\n )\n else:\n msg += \" Labors not associated with a quest:\\n\\n\"\n\n msg += \"Machines with labors:\\n\"\n\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \" {} ({})\\n\".format(hostname, tags_str)\n\n msg += \"\\n\\n\"\n\n return msg", "def build_actions_embed(actions: List[LoggingActions]) -> discord.Embed:\n\n embed = discord.Embed(title='Logging Actions', color=0x00bbff)\n for index, action in enumerate(LoggingActions.all_action_names()):\n embed.add_field(name=f'{index}: {action}', value='✅ Enabled' if action in actions else '❌ Disabled',\n inline=False)\n embed.set_footer(text='Please report any issues to my owner!')\n\n return embed", "def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)", "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "def do_the_issues(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp:\n path = os.path.join(tmp, \"{}_{}_issues.txt\".format(repo_id, user_id))\n issues_initial_url = get_initial_url_issues(user_id, repo_id)\n resp_obj = requests.get(issues_initial_url, headers=headers)\n # prase the initial request. for Issue\n all_issues = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n print(\"the len of resp is {}\".format(len(all_issues)))\n LINK_HEADER = \"Link\"\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n # parse next page (if present)\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n # subsequent page\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n all_issues = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))\n print(\"the issues path is \" + str(path))", "async def helps(ctx):\n embed = discord.Embed(title='**Help....**', description=\"The prefix for the bot is 'qq'.\\\nYah cuz you know _less qq, more pew pew_ ...\", colour=discord.Color.purple())\n embed.set_footer(text='For full list of commands with complete functions do _cmds')\n embed.add_field(name='Core', value='ping, help, cmds, botinfo')\n embed.add_field(name='Economy', value='cry, vaultoftears, tear shop', inline=False)\n embed.add_field(name='Entertainment', value='roast, flirt, compliment, geek, nerdystuff, quote, fortune,\\\n8ball, coffee, wannagrabacoffee, book, dadjoke', inline=False)\n embed.add_field(name='Utility', value='purge, ban, kick, unban', inline=False)\n embed.add_field(name='Games', value='diceroll, guessing_game', inline=False)\n await ctx.send(embed=embed)", "async def submit(client, event,\n submission_reference_url: ('str', 'Please give a link to your submission'),\n ):\n if (event.guild is not None):\n return Embed('Error', 'Please use this channel in a private channel.')\n \n if not event.user.has_roole(ROLE__SUPPORT__VERIFIED):\n return Embed('Permission denied', f'You must have {ROLE__SUPPORT__VERIFIED.mention} role to invoke this '\n f'command.')\n \n if datetime.utcnow() >= QUALIFIER_DEADLINE:\n return Embed('Oh No!', 'Qualifier over', color = COLOR__EVENT)\n \n user = event.user\n await client.message_create(CHANNEL__SUPPORT__EVENT, f'{user:f}, [{user.id}] submitted:\\n'\n f'`{submission_reference_url}`')\n \n return Embed('Success', 'Noice', color = COLOR__EVENT)", "def issues(db):\n db.session.query(Issue).delete()\n\n issues = [\n {\n 'label': 'login',\n 'email': 'admin@localhost.com',\n 'question': '42.',\n 'status': 'unread'\n },\n {\n 'label': 'login',\n 'email': 'admin@localhost.com',\n 'question': 'Hello.',\n 'status': 'unread'\n }\n ]\n\n for issue in issues:\n db.session.add(Issue(**issue))\n\n db.session.commit()\n\n return db", "def Issue(self, **kwargs):\n if 'raise_on_failure' not in kwargs:\n kwargs['raise_on_failure'] = False\n return vm_util.IssueCommand(self._GetCommand(), **kwargs)", "def label(self, name):\r\n return labels.IssueLabel(self, name)", "def create_confluence_itissues_page(subject, status, summary, public, sites, comments,\n firstalert, resolution, impact, cause, handler, html_part,\n sender):\n\n server = None\n token = None\n itissues_parentpage = \"Ongoing IT Issues\"\n itissues_template_title = \"IT ISSUES ANNOUNCEMENT TEMPLATE\"\n space = 'proj'\n\n server = confluence.connect()\n token = confluence.login(server, 'admin', 'fanjie427')\n\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n today = datetime.now().strftime('%Y-%m-%d')\n\n #title = \"[%s] %s\" % (today, subject)\n title = subject\n if confluence.page_exists(space=space, name=title,\n server=server, token=token):\n print(\"Confluence page '%s' already exists!\" % (title))\n return False\n\n itissues_template_page = confluence.get_page(space=space,\n name=itissues_template_title,\n server=server,\n token=token)\n\n if not itissues_template_page:\n print(\"Unable to get IT ISSUES ANNOUNCEMENT TEMPLATE from confluence: [%s]\" % (itissues_template_title))\n return False\n\n content = itissues_template_page['content']\n\n content = content.replace(\"##now##\", now)\n content = content.replace(\"##subject##\", subject)\n content = content.replace(\"##status##\", status)\n content = content.replace(\"##summary##\", summary)\n content = content.replace(\"##public##\", public)\n content = content.replace(\"##sites##\", sites)\n content = content.replace(\"##comments##\", comments)\n content = content.replace(\"##firstalert##\", firstalert)\n content = content.replace(\"##resolution##\", resolution)\n content = content.replace(\"##impact##\", impact)\n content = content.replace(\"##cause##\", cause)\n content = content.replace(\"##handler##\", handler)\n user_link = confluence.build_user_link(sender, server, token)\n content = content.replace(\"##sender##\", user_link)\n content = content.replace(\"##html_part##\", \"<h2>\" + subject + \"</h2>\" + html_part)\n\n new_page = confluence.add_page(space=space,\n name=title,\n wiki=False,\n content=content,\n parent=itissues_parentpage,\n server=server,\n token=token)\n\n confluence.add_label(\n label=\"itissue,ongoing\",\n space=space,\n name=title,\n server=server,\n token=token)\n\n print(\"<br/>IT ISSUE Page is <a href='%s'>%s</a>\" % (new_page['url'], title))\n return new_page['id']", "def add_labels_to_issue(self, installation_id, repo_owner, repo_name,\n issue_num, predictions):\n # take an action if the prediction is confident enough\n if predictions['labels']:\n label_names, label_probabilities = self.filter_specified_labels(repo_owner,\n repo_name,\n predictions)\n else:\n label_names = []\n\n # get the isssue handle\n issue = get_issue_handle(installation_id, repo_owner, repo_name, issue_num)\n\n if label_names:\n # create message\n message = \"\"\"Issue-Label Bot is automatically applying the labels `{labels}` to this issue, with the confidence of {confidence}.\n Please mark this comment with :thumbsup: or :thumbsdown: to give our bot feedback!\n Links: [app homepage](https://github.com/marketplace/issue-label-bot), [dashboard]({app_url}data/{repo_owner}/{repo_name}) and [code](https://github.com/hamelsmu/MLapp) for this bot.\n \"\"\".format(labels=\"`, `\".join(label_names),\n confidence=\", \".join([\"{:.2f}\".format(p) for p in label_probabilities]),\n app_url=self.app_url,\n repo_owner=repo_owner,\n repo_name=repo_name)\n # label the issue using the GitHub api\n issue.add_labels(*label_names)\n logging.info(f'Add `{\"`, `\".join(label_names)}` to the issue # {issue_num}')\n else:\n message = \"\"\"Issue Label Bot is not confident enough to auto-label this issue.\n See [dashboard]({app_url}data/{repo_owner}/{repo_name}) for more details.\n \"\"\".format(app_url=self.app_url,\n repo_owner=repo_owner,\n repo_name=repo_name)\n logging.warning(f'Not confident enough to label this issue: # {issue_num}')\n\n # make a comment using the GitHub api\n comment = issue.create_comment(message)", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)", "async def prob(self, ctx, problem_name):\n if not await problem_exists(ctx, problem_name):\n return\n text = 'Problem details:```\\n'\n text += problems[problem_name].details\n text += '\\n```'\n await ctx.send(text)", "def create_jira_ticket(self):\n table_name = self.config.iamUserKeysRotation.ddb_table_name\n\n main_account = Account(region=self.config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(table_name)\n jira = JiraReporting(self.config)\n slack = SlackNotification(self.config)\n\n for account_id, account_name in self.config.iamUserKeysRotation.accounts.items():\n logging.debug(f\"Checking '{account_name} / {account_id}'\")\n issues = IssueOperations.get_account_not_closed_issues(ddb_table, account_id, IAMKeyRotationIssue)\n for issue in issues:\n key_id = issue.issue_id\n username = issue.issue_details.username\n # issue has been already reported\n if issue.timestamps.reported is not None:\n if issue.status in [IssueStatus.Resolved, IssueStatus.Whitelisted]:\n logging.debug(f\"Closing stale access key {issue.status.value} '{key_id} / {username}' issue\")\n\n comment = (f\"Closing {issue.status.value} stale access key '{key_id} / {username}' issue \"\n f\"in '{account_name} / {account_id}' account\")\n if issue.status == IssueStatus.Whitelisted:\n # Adding label with \"whitelisted\" to jira ticket.\n jira.add_label(\n ticket_id=issue.jira_details.ticket,\n label=IssueStatus.Whitelisted.value\n )\n jira.close_issue(\n ticket_id=issue.jira_details.ticket,\n comment=comment\n )\n slack.report_issue(\n msg=f\"{comment}\"\n f\"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}\",\n account_id=account_id,\n )\n IssueOperations.set_status_closed(ddb_table, issue)\n else:\n logging.debug(f\"No changes for '{key_id}/{username}'\")\n # issue has not been reported yet\n else:\n logging.debug(f\"Reporting stale access key '{key_id} / {username}' issue\")\n\n issue_summary = (f\"IAM access key '{key_id}' for '{username}' has not been rotated \"\n f\"for {self.config.iamUserKeysRotation.rotation_criteria_days.days} days \"\n f\"in '{account_name} / {account_id}' account\")\n\n create_date = dateutil.parser.parse(issue.issue_details.create_date).replace(tzinfo=None).isoformat(' ', 'minutes')\n issue_description = (\n f\"IAM access key has not been rotated for {self.config.iamUserKeysRotation.rotation_criteria_days.days} days.\\n\\n\"\n f\"*Risk*: Low\\n\\n\"\n f\"*Account Name*: {account_name}\\n\"\n f\"*Account ID*: {account_id}\\n\"\n f\"*User Name*: {username}\\n\"\n f\"*Key ID*: {key_id}\\n\"\n f\"*Key created*: {create_date}\\n\"\n f\"\\n\")\n\n # auto_remediation_date = (self.config.now + self.config.iamUserKeysRotation.issue_retention_date).date()\n # issue_description += f\"\\n{{color:red}}*Auto-Remediation Date*: {auto_remediation_date}{{color}}\\n\\n\"\n\n issue_description += f\"*Recommendation*: Rotate specified stale access key. \"\n\n if self.config.whitelisting_procedure_url:\n issue_description += (f\"For any other exceptions, please follow the [whitelisting procedure|{self.config.whitelisting_procedure_url}] \"\n f\"and provide a strong business reasoning. \")\n\n try:\n response = jira.add_issue(\n issue_summary=issue_summary, issue_description=issue_description,\n priority=\"Major\", labels=[\"iam-key-rotation\"],\n account_id=account_id,\n )\n except Exception:\n logging.exception(\"Failed to create jira ticket\")\n continue\n\n if response is not None:\n issue.jira_details.ticket = response.ticket_id\n issue.jira_details.ticket_assignee_id = response.ticket_assignee_id\n\n slack.report_issue(\n msg=f\"Discovered {issue_summary}\"\n f\"{' (' + jira.ticket_url(issue.jira_details.ticket) + ')' if issue.jira_details.ticket else ''}\",\n account_id=account_id,\n )\n\n IssueOperations.set_status_reported(ddb_table, issue)", "def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues" ]
[ "0.6676813", "0.63654536", "0.6173436", "0.59853166", "0.5810283", "0.57947206", "0.57469696", "0.57408506", "0.56667405", "0.56306005", "0.5611144", "0.5595954", "0.5525713", "0.55207026", "0.55166113", "0.5467382", "0.54175764", "0.5404614", "0.53625447", "0.5345686", "0.53427225", "0.53263915", "0.531203", "0.5310896", "0.5303767", "0.52725947", "0.5271747", "0.5269129", "0.5251331", "0.52201915", "0.5189292", "0.5181078", "0.51712316", "0.5127793", "0.5122006", "0.51093894", "0.5105569", "0.50973165", "0.5085197", "0.50824726", "0.50824726", "0.5073225", "0.50652146", "0.5047219", "0.50465626", "0.5040835", "0.5029571", "0.5017996", "0.501534", "0.4999531", "0.4999242", "0.49969178", "0.4993742", "0.4993338", "0.49918732", "0.4984577", "0.49838", "0.49803177", "0.49769187", "0.4965604", "0.49655333", "0.49600855", "0.4955165", "0.49549842", "0.4952576", "0.4952576", "0.49343252", "0.4921247", "0.49201712", "0.49123713", "0.49123535", "0.4908116", "0.49069902", "0.49036908", "0.49032265", "0.49000242", "0.48983076", "0.4882701", "0.48712125", "0.4868872", "0.4868125", "0.48619577", "0.48544243", "0.48531362", "0.4850872", "0.48456848", "0.4841146", "0.4839287", "0.4835232", "0.48155665", "0.4811575", "0.48057494", "0.48045868", "0.4798107", "0.4793744", "0.47903112", "0.47862402", "0.4777866", "0.47696158", "0.47644877" ]
0.5929863
4
Builds and sends an embed message with notes information.
async def process_note_hook(data: models.NoteHook): note = data.note user = data.user project = data.project colour = discord.Colour.greyple() embed = discord.Embed(url=note.url, description=note.description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) if data.issue: issue = data.issue embed.title = f"[{project.namespace}/{project.name}] New comment on issue #{issue.iid}: {issue.title}" if data.commit: commit = data.commit embed.title = f"[{project.namespace}/{project.name}] New comment on commit `{commit.id[:7]}`" if data.merge_request: merge = data.merge_request embed.title = f"[{project.namespace}/{project.name}] New comment on merge request !{merge.iid}: {merge.title}" await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embed():", "async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "async def _view_note(self, ctx: Context, number: int):\n\n author = ctx.author\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n try:\n note = notes[number-1]\n except IndexError:\n return await ctx.send(\n _(\"Note number {} not found.\").format(number)\n )\n\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Note #{}\").format(author_str, number),\n timestamp=ctx.message.created_at\n )\n await ctx.send(embed=page)\n else:\n page = _(\n \"**{author} TvM Note #{number}**\"\n \"\\n\\n{note}\"\n ).format(\n author=author_str,\n number=number,\n note=note_info\n )\n await ctx.send(page)", "async def _notes(self, ctx: Context):\n pass", "async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)", "async def _view_all_notes(self, ctx: Context):\n\n author = ctx.author\n\n note_infos = []\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n total = len(notes)\n for page_num, note in enumerate(notes, start=1):\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Notes\").format(author_str),\n timestamp=ctx.message.created_at\n )\n\n page.set_footer(\n text=_(\"Page {page_num}/{leng}\").format(\n page_num=page_num, leng=total\n )\n )\n else:\n page = _(\n \"**{author} TvM Notes**\"\n \"\\n\\n{note}\"\n \"\\n{footer}\"\n ).format(\n author=author_str,\n note=note_info,\n footer=_(\"*Page {page_num}/{leng}*\").format(\n page_num=page_num, leng=total\n )\n )\n\n note_infos.append(page)\n\n await menu(ctx, note_infos, DEFAULT_CONTROLS)", "def _get_body(self):\n\n bodyWrap = (\n u\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n u\"<!DOCTYPE en-note SYSTEM \\\"http://xml.evernote.com/pub/enml2.dtd\\\">\"\n u\"<en-note>{body}</en-note>\"\n )\n att_enml = \"\\n\".join(self.embed_resources)\n\n return bodyWrap.format(body=att_enml)", "def note(self):\n content = sys.argv[2]\n self.record('NOTE %s' % content)\n print('Note added')", "def _build_about_embed(self) -> discord.Embed:\n with self.about_aoc_filepath.open(\"r\", encoding=\"utf8\") as f:\n embed_fields = json.load(f)\n\n about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)\n about_embed.set_author(name=\"Advent of Code\", url=self._base_url)\n for field in embed_fields:\n about_embed.add_field(**field)\n\n about_embed.set_footer(text=f\"Last Updated (UTC): {datetime.utcnow()}\")\n\n return about_embed", "def note(self, irc, msg, args, user, id):\n try:\n note = self.db.get(id)\n except dbi.NoRecordError:\n irc.errorInvalid('note id')\n if user.id != note.frm and user.id != note.to:\n s = 'You may only retrieve notes you\\'ve sent or received.'\n irc.error(s)\n return\n newnote = self._formatNote(note, user.id)\n irc.reply(newnote, private=(not note.public))\n self.db.setRead(id)", "def createNote(self, authenticationToken, note):\r\n pass", "async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)", "async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)", "def generate_message(self) -> List[mido.Message]:\n # check for a None note (which is a \"pause\")\n if self.__note:\n note_value = self.__note + self.__info.octave\n note_velocity = self.__info.volume\n else:\n note_value = 0\n note_velocity = 0\n return [\n mido.Message(\n \"note_on\",\n note=note_value,\n velocity=note_velocity,\n time=NOTE_DURATION,\n ),\n mido.Message(\n \"note_off\",\n note=note_value,\n velocity=note_velocity,\n time=0,\n ),\n ]", "def reply_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.blurple())\n embed.title = \"\"\n embed.description = message\n return embed", "def note():", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "async def dadjoke(self, ctx):\n author = ctx.message.author\n joke = await self.get_joke()\n data = Embed.create(self, ctx, title='Demaratus Dad Jokes :joy:',\n description=joke)\n image = (f\"https://media.discordapp.net/attachments/745608075670585344/770068453502877716/DADJOKES.png?width=1442&height=481\")\n data.set_author\n data.set_image(url=image)\n await ctx.send(embed=data)", "async def about(self, ctx):\n embed = Embed(color=self.bot.main_color, timestamp=datetime.utcnow())\n embed.set_author(\n name=\"Modmail - About\",\n icon_url=self.bot.user.avatar_url,\n url=\"https://discord.gg/F34cRU8\",\n )\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n\n desc = \"This is an open source Discord bot that serves as a means for \"\n desc += \"members to easily communicate with server administrators in \"\n desc += \"an organised manner.\"\n embed.description = desc\n\n embed.add_field(name=\"Uptime\", value=self.bot.uptime)\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency * 1000:.2f} ms\")\n embed.add_field(name=\"Version\", value=f\"`{self.bot.version}`\")\n embed.add_field(name=\"Author\", value=\"[`kyb3r`](https://github.com/kyb3r)\")\n\n changelog = await Changelog.from_url(self.bot)\n latest = changelog.latest_version\n\n if parse_version(self.bot.version) < parse_version(latest.version):\n footer = f\"A newer version is available v{latest.version}\"\n else:\n footer = \"You are up to date with the latest version.\"\n\n embed.add_field(\n name=\"GitHub\", value=\"https://github.com/kyb3r/modmail\", inline=False\n )\n\n embed.add_field(\n name=\"Discord Server\", value=\"https://discord.gg/F34cRU8\", inline=False\n )\n\n embed.add_field(\n name=\"Donate\",\n value=\"Support this bot on [`Patreon`](https://patreon.com/kyber).\",\n )\n\n embed.set_footer(text=footer)\n await ctx.send(embed=embed)", "def push_note(self, device_iden, title, body):\n self.session.post(\n PUSH_URL,\n json={\n \"device_iden\": device_iden,\n \"type\": \"note\",\n \"title\": title,\n \"body\": body\n }).raise_for_status()", "def embed(ctx=None, title=None, description=None, fields=None, customFooter=False, customThumbnail=None, customColor=None, image=None):\n\n e = discord.Embed(title=title, description=description)\n if customColor is None:\n e.color = color()\n else:\n e.color = color(customColor)\n \n if fields != None:\n index = 0\n # Please fix the code below, There's nothing wrong with it, it's just messy and I'm sure that's not the right way to do it.\n for field in fields:\n session = []\n for key, value in field.items():\n session.append(key)\n\n if key == \"n\":\n name = value \n \n if key == \"v\":\n xValue = value \n \n if key == \"inline\":\n inline = value \n \n if not \"inline\" in session:\n inline = False\n \n e.add_field(name=f\"{name}\", value=xValue, inline=inline)\n \n if not customFooter:\n footer(e, ctx)\n \n if image is None:\n try:\n if customThumbnail is None:\n e.set_thumbnail(url=ctx.author.avatar_url)\n else:\n e.set_thumbnail(url=customThumbnail)\n except:\n pass \n else:\n e.set_image(url=image)\n return e", "def add_note():\n pass", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "async def CoMLegendBuilder(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "def create_a_note(self, data):\n return self.client._post(\"/notes\", json=data)", "def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes", "def build_note(text, level=1, limit=180, strip=True, keyword=\"NOTE\"):\n note = []\n key = int(level)\n tag = keyword\n data = text\n if strip:\n data = data.strip()\n while data != \"\":\n index = limit\n if len(data) < limit:\n index = len(data)\n else:\n while data[index - 1] == \" \" and index > 0:\n index = index - 1\n chunk = data[:index]\n data = data[index:]\n entry = \"{0} {1} {2}\".format(key, tag, chunk)\n note.append(entry)\n tag = \"CONC\"\n key = int(level) + 1\n return note", "def create_note(self, owner, title, text, note_type, important):\r\n note = self.create(owner=owner, title=title, text=text, note_type=note_type, important=important)\r\n return note", "async def helps(ctx):\n embed = discord.Embed(title='**Help....**', description=\"The prefix for the bot is 'qq'.\\\nYah cuz you know _less qq, more pew pew_ ...\", colour=discord.Color.purple())\n embed.set_footer(text='For full list of commands with complete functions do _cmds')\n embed.add_field(name='Core', value='ping, help, cmds, botinfo')\n embed.add_field(name='Economy', value='cry, vaultoftears, tear shop', inline=False)\n embed.add_field(name='Entertainment', value='roast, flirt, compliment, geek, nerdystuff, quote, fortune,\\\n8ball, coffee, wannagrabacoffee, book, dadjoke', inline=False)\n embed.add_field(name='Utility', value='purge, ban, kick, unban', inline=False)\n embed.add_field(name='Games', value='diceroll, guessing_game', inline=False)\n await ctx.send(embed=embed)", "def quote_to_embed(self,result):\n thedate = datetime.date.fromtimestamp(result[3])\n thechannel = self.bot.get_channel(result[2])\n themember = thechannel.server.get_member(result[1])\n theauthor = themember.name\n if hasattr(themember, \"nick\"):\n if themember.nick is not None:\n theauthor = themember.nick\n embed = discord.Embed(title=\"Quote #{}\".format(result[4]), description=result[0])\n embed.set_author(name=theauthor, icon_url=themember.avatar_url)\n embed.set_footer(text=\"Saved on: {}\".format(thedate.strftime(\"%d %B %y\")))\n return embed", "async def _add_note(\n self,\n ctx: Context,\n note: Union[discord.Message, str],\n *,\n reason: str = None\n ):\n\n if isinstance(note, discord.Message):\n content = note.clean_content\n author = str(note.author)\n channel = note.channel.mention\n jump_url = note.jump_url\n else:\n content = note\n author = None\n channel = None\n jump_url = None\n\n async with self.config.member(ctx.author).notes() as notes:\n notes.append({\n \"note\": content,\n \"reason\": reason or \"No reason\",\n \"author\": author,\n \"channel\": channel,\n \"jump_url\": jump_url\n })\n\n await ctx.message.add_reaction(CHECK_MARK)", "def createNote(self, authenticationToken, note):\r\n self.send_createNote(authenticationToken, note)\r\n return self.recv_createNote()", "async def embed(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=message,\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)", "def notes(self, notes):\n if (self.local_vars_configuration.client_side_validation and\n notes is not None and len(notes) > 255):\n raise ValueError(\"Invalid value for `notes`, length must be less than or equal to `255`\") # noqa: E501\n\n self._notes = notes", "def getnotes():", "def emailNote(self, authenticationToken, parameters):\r\n pass", "def send_joke(update, context):\n\n joke = submission_fetcher.joke_fetcher.get_post()\n msg = joke.title + '\\n\\n' + joke.selftext\n lad_bot.send_message(chat_id=update.effective_chat.id, text=msg)\n print(f\"Joke sent for {update.message.from_user.first_name} \n {update.message.from_user.last_name} (username: {update.message.from_user.username}).\")", "def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed", "def note(self, key=None):\n if key is None:\n raise SimplenoteError('Unable to get note: Key not given')\n url = self.base_url + 'data/' + key\n note = self._process_query(url)\n return note", "def notification(title, body, email):\n ACCESS_TOKEN = \"o.5ls4UBW48oQ6bm5VI6ABbiySEjIS9enC\"\n data_send = {\"type\": \"note\", \"title\": title, \"body\": body, \"email\":email}\n resp = requests.post('https://api.pushbullet.com/v2/pushes', data=json.dumps(data_send),\n headers={'Authorization': 'Bearer ' + ACCESS_TOKEN,\n 'Content-Type': 'application/json'})", "def display_note(self, note):\n\t\tself.canvas.itemconfig(self.note, text = note)", "async def info(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n embed = discord.Embed()\n embed.colour = discord.Colour.blurple()\n embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)\n\n embed.title = f'Your current level : {level}'\n\n embed.add_field(name='Question', value=f'{self.enigmas[level][\"question\"]}')\n\n embed.set_footer(text='I love Ducks')\n\n await ctx.send(embed=embed)", "async def about(self, ctx):\n embed = discord.Embed(title = f\"About {self.bot.user.name}\", color = discord.Color.blurple())\n embed.set_thumbnail(url = self.bot.user.avatar_url)\n embed.add_field(name = \"Developers\", value = \"Kowlin#4417 & A Trash Coder#0981\", inline = False)\n embed.add_field(name = \"Library\", value = \"discord.py rewrite\", inline = False)\n embed.add_field(name = \"Source Code\", value = \"[Click here](https://github.com/kowlintechnologies/DHB)\", inline = False)\n embed.add_field(name = \"Links\", value = \"[Docs](https://dhb-documentation.readthedocs.io/en/latest/index.html) | [Support](https://discord.gg/KEkwrwd) | [Invite](https://discordapp.com/api/oauth2/authorize?client_id=592811241756688405&permissions=2080762998&scope=bot)\")\n await ctx.send(embed = embed)", "def new_note(self, context, payload):\n\n note = PipedriveCRMNote( \n note_id= payload[\"current\"][\"id\"],\n content= payload[\"current\"][\"content\"],\n deal_id= payload[\"current\"][\"deal_id\"],\n pin_note_on_specified_deal= payload[\"current\"][\"pinned_to_deal_flag\"],\n organization_id= payload[\"current\"][\"org_id\"],\n pin_note_on_specified_organization= payload[\"current\"][\"pinned_to_organization_flag\"],\n person_id= payload[\"current\"][\"person_id\"],\n pin_note_on_specified_person= payload[\"current\"][\"pinned_to_person_flag\"],\n lead_id= payload[\"current\"][\"lead_id\"],\n pin_note_on_specified_lead= payload[\"current\"][\"pinned_to_lead_flag\"]\n )\n return note.__dict__", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "async def _now(self, ctx: commands.Context):\n\n await ctx.send(embed=ctx.voice_state.current.create_embed())", "async def _now(self, ctx: commands.Context):\n\n await ctx.send(embed=ctx.voice_state.current.create_embed())", "def emailNote(self, authenticationToken, parameters):\r\n self.send_emailNote(authenticationToken, parameters)\r\n self.recv_emailNote()", "def displayMelody(self):\r\n print(self.notes)", "async def get_opening_note(self):\n server_prefix = await ex.get_server_prefix(self.context)\n return f\"Use ``{server_prefix}help [command]`` for more info on a command.\\nYou can also use \" \\\n f\"``{server_prefix}help [category]`` (CASE-SENSITIVE) for more info on a category.\\nTo reset a \" \\\n f\"server prefix, you may type ``{ex.keys.bot_prefix}setprefix``.\\n\\n \" \\\n f\"**Support Server:** {ex.keys.bot_support_server_link}\\n\\n\" \\\n f\"**[Link to Commands](https://irenebot.com/commands)**\"", "async def get_opening_note(self):\n server_prefix = await ex.get_server_prefix_by_context(self.context)\n return f\"Use ``{server_prefix}help [command]`` for more info on a command.\\nYou can also use \" \\\n f\"``{server_prefix}help [category]`` (CASE-SENSITIVE) for more info on a category.\\nTo reset a \" \\\n f\"server prefix, you may type ``{bot_prefix}setprefix``.\\n\\n \" \\\n f\"**Support Server:** {bot_support_server_link}\\n\\n\" \\\n f\"**[Link to Commands](https://irenebot.com/commands)**\"", "def help(self, update, context):\n help_message = textwrap.dedent(\"\"\"\n 1. /subscribe - To subscribe to sixes scored in IPL to avail 60% off swiggy coupon (SWIGGY6)\n 2. /snooze - To snooze the notifications for sixes scored for the day.\n 3. /removeSnooze - To resume the notifications for the day.\n 4. /unsubscribe - To unsubscribe to the sixes scored notifications.\n 5. /swiggyOffer - To know more about the ongoing swiggy offer.\n \"\"\")\n self.bot.send_message(chat_id=update.message.chat_id, text=help_message, parse_mode='markdown')", "def _lyrics_embed(colour, page: Dict[str, Any], data: Dict[str, Any]) -> discord.Embed:\n title = [\n x.get(\"value\")\n for x in data.get(\"names\")\n if x.get(\"language\") == LANGUAGE_MAP.get(page[\"cultureCode\"])\n ]\n em = discord.Embed(\n title=title[0] if title else data.get(\"defaultName\"),\n colour=colour,\n )\n em.set_thumbnail(url=data.get(\"thumbUrl\") or \"\")\n if data.get(\"id\"):\n em.url = f\"https://vocadb.net/S/{data['id']}\"\n em.description = page[\"value\"][:4090] if page.get(\"value\") else \"No lyrics found.\"\n if page.get(\"url\"):\n em.add_field(\n name=\"Source\",\n value=f\"[{page.get('source') or 'Source'}]({page['url']})\",\n )\n return em", "def bibtex(self):\n return \"@comment{%(id)s: %(message)s}\" % \\\n {'id': self.id, 'message': self.message}", "def pp_add_note(note_url, msg, method=\"None - Internal\", mtype=\"Other\", flag=0):\n req_uri = \"https://secure1.inmotionhosting.com%s\" % (note_url)\n\n # build Requests session\n pp = requests.Session()\n pp.auth = (udata.pp2['user'], udata.pp2['pass'])\n pp.cookies.update(get_cookies('secure1.inmotionhosting.com'))\n pp.headers.update({'referer': req_uri})\n\n # build form request\n fdata = {\n 'comment': msg,\n 'method': method,\n 'type': mtype,\n 'flag': flag,\n 'send_to_cc': 0,\n 'submit': \"Add Note\"\n }\n # then post update\n bpost = pp.post(req_uri, data=fdata)\n\n # validate login\n check_pp_login(bpost)\n\n if bpost.status_code == 200 or bpost.status_code == 302:\n print(\"Note posted OK\")\n else:\n print(\"!! Failed to post note to account\")\n\n return bpost", "def help_message(bot, update):\n with open('./timetable_bot/static/help_message') as file:\n text = file.read()\n bot.send_message(update.message.chat_id,\n text=text, parse_mode='Markdown')", "async def cmds(ctx):\n text = \"Here's a list of commands along with functions....\"\n embed = discord.Embed(title='**COMMANDS**', description=text, color=discord.Color.green())\n embed.add_field(name='helpme',\n value='displays the command prefix and a basic list of commands...')\n embed.add_field(name='ping',\n value='The ping command for pinging...', inline=False)\n embed.add_field(name='cmds',\n value='Dislays this message', inline=False)\n embed.add_field(name='botinfo',\n value='Displays info on the bot...')\n embed.add_field(name='say',\n value=\"Makes the bot say stuff. Usage- '_say <sentence/word>'\")\n embed.add_field(name='roast',\n value=\"This is the roast command.Go get 'em. Usage- '_roast <@member>'\")\n embed.add_field(name='flirt',\n value=\"*wink *wink Wanna hit on someone?. Usage-'_flirt <@member>'\")\n embed.add_field(name='compliment',\n value=\"Wanna commend and compliment someone?. Usage- '_compliment <@member>'\")\n embed.add_field(name='geek',\n value='Prints geeky statements...Aliases= \"pimp,techie\"')\n embed.add_field(name='nerdystuff',\n value='Prints stuff for that one nerd in the chat....')\n embed.add_field(name='quote',\n value='Get ready for some of the best quotes ever....')\n embed.add_field(name='fortune',\n value='Wanna know the future? Aliases=\"future\"')\n embed.add_field(name='8ball',\n value='Wanna ask questions from the crystal ball?. Aliases=\"seer\". Usage-\"_8ball <Question>\"')\n embed.add_field(name='coffee',\n value='Just try a nice cup of coffee....')\n embed.add_field(name='wannagrabacoffe',\n value=\"Wanna ask your e-crush out? Here you go.... Usage-'_wannagrabacoffee <@member>'\")\n embed.add_field(name='book',\n value='Wanna read a book. Here are some recommendations....')\n embed.add_field(name='dadjoke',\n value='Wanna hear some cringey bad jokes?')\n embed.add_field(name='diceroll',\n value='Rolls a dice. If you get a number higher than the bot then you win...')\n embed.add_field(name='guessing_game',\n value='Bot thinks of a number smaller than 15 and you have to guess that number.\\\nIf you guess it correct, you win')\n embed.set_footer(text='I hope that helped......')\n await ctx.send(embed=embed)", "async def botinfo(self, ctx: commands.Context) -> None:\n\n embed = CleanEmbed(\n author_text=\"About Freddy\",\n description=f\"Freddy is a powerful multi-purpose bot, developed and designed with ease of use in mind. \"\n f\"Created {(datetime.utcnow() - self.bot.user.created_at).days} days ago, he has been providing \"\n f\"value to many guilds for a long time.\",\n thumbnail_url=self.bot.user.avatar_url,\n fields=[\n {\"name\": \"Commands\", \"value\": f\"{len(self.bot.commands)} public commands\", \"inline\": True},\n {\"name\": \"Maintainment\", \"value\": \"Developed and designed by Harry\", \"inline\": True},\n {\"name\": \"Invite Freddy\", \"value\": \"Invite here\", \"inline\": True},\n {\"name\": \"Timeline\", \"value\":\n \"~~-~~**1**~~------~~**2**~~-------~~**3**~~------------------~~**4**~~-----------~~**5**~~-~~ \\n\\n\"\n \"**1** - \" + self.bot.user.created_at.strftime(\"%B %Y\") + \" - Freddy was created \\n\"\n \"**2** - November 2019 - Development was paused \\n\"\n \"**3** - January 2020 - Development resumed and Freddy grew rapidly \\n\"\n \"**4** - December 2020 - Freddy's development stopped \\n\"\n \"**5** - May 2021 - In process of re-designing and bot verification\"\n }\n ]\n )\n\n await ctx.send(embed=embed)", "def _construct_message(self):\n self.message[\"text\"] = \"\"\n if self.from_:\n self.message[\"text\"] += \"From: \" + self.from_ + \"\\n\"\n if self.subject:\n self.message[\"text\"] += \"Subject: \" + self.subject + \"\\n\"\n\n self.message[\"text\"] += self.body\n self._add_attachments()", "def record():\n src = request.args.get(\"src\")\n\n if not src:\n abort(403, description=\"`src` app ID is required required.\")\n\n if not _valid_app_ids([src]):\n abort(403, description=\"Unknown `src` or `dest` application ID(s) provided.\")\n\n try:\n status_code, message = _work_order_flex_notes.main(src)\n\n except Exception as e:\n # todo: debug only. this is not safe!\n abort(503, description=e)\n\n if status_code == 200:\n return message\n\n else:\n abort(503, description=message)", "async def info(self, ctx):\n python = sys.version_info\n\n start = datetime.now()\n await ctx.trigger_typing()\n end = datetime.now()\n\n process = psutil.Process()\n\n embed = discord.Embed(title='Info',\n color=self.bot.color)\n embed.add_field(name='Latest Changelog',\n value='Restructured the project.',\n inline=False)\n embed.add_field(name='Creator',\n value='\\n'.join(self.bot.get_user(owner).mention for owner in self.bot.owner_ids))\n embed.add_field(name='Created on',\n value=f'{self.bot.created_on.strftime(\"%m/%d/%Y\")}\\n'\n f'(~{timeago.format(self.bot.created_on, datetime.utcnow())})')\n embed.add_field(name='Made With',\n value=f'[Python {python.major}.{python.minor}.{python.micro}](https://www.python.org/)\\n'\n f'[discord.py {discord.__version__}](https://discordpy.readthedocs.io/en/latest/)')\n embed.add_field(name='Status',\n value=f'Ping: {(end - start).total_seconds() * 1000:.2f}ms\\n'\n f'CPU: {process.cpu_percent()}%\\n'\n f'RAM: {process.memory_info().rss / 1048576:.2f}MB') # bits to bytes\n embed.add_field(name='Uptime',\n value='Online since:\\n'\n f'{self.bot.uptime.strftime(\"%m/%d/%Y %H:%M UTC\")}\\n'\n f'(~{timeago.format(self.bot.uptime, datetime.utcnow())})')\n embed.add_field(name='Statistics',\n value=f'Commands Run: {1003}\\n'\n f'Guilds: {len(list(self.bot.guilds))}\\n'\n f'Users: {len(list(self.bot.get_all_members()))} '\n f'(Unique: {len(set(self.bot.get_all_members()))})')\n embed.add_field(name='Acknowledgements',\n value='<@113104128783159296> - Answering a lot of questions I had, couldn\\'t have done it with you!\\n'\n '`[RKN]` - Testing! thanks guys :)',\n inline=False)\n\n await ctx.send(embed=embed)", "def add(self, add_on): \n \n self.body = self.body + add_on + \" \"", "def _make_notification(error=False, routable=0, repo_configs=None):\n if error:\n return {\"something\" : \"broken\"}\n\n # get a base notification from the test fixtures\n note = fixtures.APIFactory.incoming()\n\n # now overwrite everything with randomised content\n note[\"event\"] = _select_from([\"accepted\", \"published\"])\n note[\"provider\"][\"agent\"] = _random_string(4, 10)\n note[\"provider\"][\"ref\"] = _random_string(5, 6)\n\n note[\"links\"] = []\n for i in range(randint(0, 5)):\n link = {}\n link[\"type\"] = _select_from([\"fulltext\", \"splash\"])\n link[\"format\"] = _select_from([\"application/pdf\", \"text/html\"])\n link[\"url\"] = _random_url()\n note[\"links\"].append(link)\n\n es = _random_datetime(datetime.fromtimestamp(0))\n ee = _random_datetime(es)\n note[\"embargo\"][\"start\"] = es.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n note[\"embargo\"][\"end\"] = ee.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n note[\"embargo\"][\"duration\"] = int((ee - es).total_seconds() / (60 * 60 * 24 * 30))\n\n note[\"metadata\"][\"title\"] = _random_string(50, 200)\n note[\"metadata\"][\"version\"] = _select_from([\"AO\", \"SMUR\", \"AM\", \"P\", \"VoR\", \"CVoR\", \"EVoR\", \"NA\"])\n note[\"metadata\"][\"publisher\"] = _random_string(10, 25)\n note[\"metadata\"][\"source\"][\"name\"] = _random_string(30, 100)\n note[\"metadata\"][\"source\"][\"identifier\"] = [{\"type\" : \"issn\", \"id\" : _random_issn()}]\n\n note[\"metadata\"][\"identifier\"][0][\"id\"] = _random_doi()\n\n note[\"metadata\"][\"author\"] = []\n for i in range(randint(1, 3)):\n author = {}\n author[\"name\"] = _random_string(10, 20)\n author[\"identifier\"] = [{\"type\" : \"email\", \"id\" : _random_email()}]\n author[\"affiliation\"] = _random_string(10, 40)\n note[\"metadata\"][\"author\"].append(author)\n\n note[\"metadata\"][\"language\"] = _select_from(isolang.ISO_639_2)[0]\n\n ds = _random_datetime(datetime.fromtimestamp(0))\n da = _random_datetime(ds)\n pd = _random_datetime(da)\n note[\"metadata\"][\"publication_date\"] = pd.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n note[\"metadata\"][\"date_accepted\"] = da.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n note[\"metadata\"][\"date_submitted\"] = ds.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n note[\"metadata\"][\"license_ref\"] = {}\n note[\"metadata\"][\"license_ref\"][\"title\"] = _select_from([\"CC0\", \"CC BY\", \"CC BY-SA\", \"CC BY-SA-ND\"])\n note[\"metadata\"][\"license_ref\"][\"type\"] = note[\"metadata\"][\"license_ref\"][\"title\"]\n note[\"metadata\"][\"license_ref\"][\"url\"] = \"http://creativecommons.org/\" + note[\"metadata\"][\"license_ref\"][\"title\"].lower().replace(\" \", \"-\")\n note[\"metadata\"][\"license_ref\"][\"version\"] = _select_from([\"1.0\", \"2.0\", \"3.0\", \"4.0\"])\n\n note[\"metadata\"][\"project\"] = []\n for i in range(randint(1, 2)):\n project = {}\n project[\"name\"] = _random_string(3, 6)\n project[\"identifier\"] = [{\"type\" : \"ringold\", \"id\" : _random_string(10, 16)}]\n project[\"grant_number\"] = _random_string(5, 7)\n note[\"metadata\"][\"project\"].append(project)\n\n note[\"metadata\"][\"subject\"] = []\n for i in range(randint(0, 10)):\n note[\"metadata\"][\"subject\"].append(_random_string(10, 15))\n\n # now determine if we are going to add routing metadata to this notification\n route = _select_from([True, False], [routable, 1 - routable])\n\n # we're not going to route it, the random content alone is sufficient\n if not route:\n return note\n\n route_to = _select_n(repo_configs, randint(1, len(repo_configs)))\n\n uber = {}\n for cfg in route_to:\n # the config may not be fully populated, so only allow us to choose from a field which has data in it\n routable_fields = []\n for f, l in cfg.iteritems():\n if l is not None and len(l) > 0:\n routable_fields.append(f)\n\n # field = _select_from([\"domains\", \"name_variants\", \"author_ids\", \"postcodes\", \"grants\", \"strings\"])\n field = _select_from(routable_fields)\n idx = randint(0, len(cfg[field]) - 1)\n if field not in uber:\n uber[field] = []\n uber[field].append(cfg[field][idx])\n\n # now layer the uber match record over the randomised notification\n for k, v in uber.iteritems():\n if k == \"domains\":\n # add an author with that domain in their email\n for domain in v:\n author = {}\n author[\"name\"] = _random_string(10, 20)\n author[\"identifier\"] = [{\"type\" : \"email\", \"id\" : _random_string(10, 12) + \"@\" + domain}]\n note[\"metadata\"][\"author\"].append(author)\n elif k == \"name_variants\":\n # add an author with that name variant in their affiliation\n for nv in v:\n author = {}\n author[\"name\"] = _random_string(10, 20)\n author[\"affiliation\"] = nv\n note[\"metadata\"][\"author\"].append(author)\n elif k == \"author_ids\":\n # add an author with these properties\n for aid in v:\n author = {}\n if aid.get(\"type\") == \"name\":\n author[\"name\"] = aid.get(\"id\")\n else:\n author[\"name\"] = _random_string(10, 20)\n author[\"identifier\"] = [{\"type\" : aid.get(\"type\"), \"id\" : aid.get(\"id\")}]\n note[\"metadata\"][\"author\"].append(author)\n elif k == \"postcodes\":\n # add an author with that postcode in their affiliation\n for postcode in v:\n author = {}\n author[\"name\"] = _random_string(10, 20)\n author[\"affiliation\"] = postcode\n note[\"metadata\"][\"author\"].append(author)\n elif k == \"grants\":\n # add a project with that grant number\n for grant in v:\n project = {}\n project[\"name\"] = _random_string(3, 6)\n project[\"grant_number\"] = grant\n note[\"metadata\"][\"project\"].append(project)\n elif k == \"strings\":\n # add an author with that string in their affiliation\n for s in v:\n author = {}\n author[\"name\"] = _random_string(10, 20)\n author[\"affiliation\"] = s\n note[\"metadata\"][\"author\"].append(author)\n\n return note", "def create_note(job_applied_id, user_id, note_title, note_text, note_category, note_date_created):\n\n note = Note(job_applied_id =job_applied_id, user_id = user_id , note_title = note_title , note_text = note_text,note_category = note_category, note_date_created = note_date_created)\n db.session.add(note)\n db.session.commit()\n\n return note", "async def message(description = None, **kwargs):\n if not kwargs.get(\"color\"):\n kwargs[\"color\"] = discord.Color(0x82b1ff)\n\n return discord.Embed(type = \"rich\",\n description = description,\n **kwargs)", "def test(self):\n self.note(\"Test Note\", \"\"\" This is a note.\nsecond line\"\"\", \"date\")", "def notes(self, notes: str):\n self._notes = notes", "async def botinfo(ctx):\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name='Bot Info', value = \"I'm made with the library Discord.py Async.\"\n \" I'm developed by Shutdown.py#2406. \"\n \"If you need any help with me, Join my [devs' server](https://discord.gg/X4CJdEM).\"\n \"Send feedback using the feedback command\")\n embed.add_field(name='Total Commands', value=(len(bot.commands)))\n embed.add_field(name = 'Invite Me!', value = '[Invite](https://discordbots.org/bot/399115688792424448)')\n embed.set_footer(text= \"{} | Requested by: {} at\".format(version, ctx.message.author))\n await bot.say(embed = embed)", "async def botinfo(self, context: Context) -> None:\n embed = discord.Embed(\n description=\"Used [Krypton's](https://krypton.ninja) template\",\n color=0x9C84EF,\n )\n embed.set_author(name=\"Bot Information\")\n embed.add_field(name=\"Owner:\", value=\"Krypton#7331\", inline=True)\n embed.add_field(\n name=\"Python Version:\", value=f\"{platform.python_version()}\", inline=True\n )\n embed.add_field(\n name=\"Prefix:\",\n value=f\"/ (Slash Commands) or {self.bot.config['prefix']} for normal commands\",\n inline=False,\n )\n embed.set_footer(text=f\"Requested by {context.author}\")\n await context.send(embed=embed)", "def play_protobuf(self, notes):\n with open_output(self.midi_port) as outport:\n\n outport.send(Message('program_change', program=12))\n \n try:\n last_end_time = 0\n for n in notes:\n # Calculate sleep time for rests\n sleep_time = n.start_time - last_end_time\n if sleep_time > 0:\n print(\"Rest {0} sec\".format(sleep_time))\n time.sleep(sleep_time)\n\n # Send note messages\n note_duration = n.end_time - n.start_time\n outport.send(Message('note_on', note=n.pitch, velocity=n.velocity))\n print(\"Note {0}, {1} sec\".format(n.pitch, note_duration))\n time.sleep(note_duration)\n outport.send(Message('note_off', note=n.pitch, velocity=n.velocity))\n last_end_time = n.end_time\n\n except KeyboardInterrupt:\n print('Stopping MIDI output')\n outport.reset()\n outport.panic()", "async def help(ctx):\n\tembed = discord.Embed(description='Below Are All The Commands For SecretBot')\n\tembed.set_author(name='SecretBot Help Center')\n\tembed.add_field(name='***`!ping`***', value='Returns the bot latency', inline=False)\n\tembed.add_field(name='***`!clear`***', value='Deletes the given amount of messages(default 10) \\n***i.e. !clear 20***', inline=False)\n\tembed.add_field(name='***`!live`***', value='Shows an image with data from a live League of Legends game \\n***i.e. !live NA Test Summoner Name***', inline=False)\n\tembed.add_field(name='***`!league`***', value='Shows an image with data of a summoner \\nFor multiple summoners, split by commas \\n***i.e. !league NA Test_Summoner_Name***', inline=False)\n\tembed.add_field(name='***`!ac`***', value='Shows data on bugs, fossils, or fish\\n***i.e. !ac fish horse mackerel***', inline=False)\n\tembed.set_footer(text='SecretBot isn’t endorsed by Riot Games and doesn’t reflect the views or opinions of Riot Games or anyone officially involved in producing or managing League of Legends. League of Legends and Riot Games are trademarks or registered trademarks of Riot Games, Inc. League of Legends © Riot Games, Inc.')\n\tawait ctx.send(embed=embed)", "def note_repr(key):\n return {\n 'url': request.host_url.rstrip('/') + url_for('notes_detail', key=key),\n 'text': notes[key]\n }", "async def docs(self, ctx):\n embed = discord.Embed(title = \"Documentation\", description = \"[Click here to visit our documentation!](https://dhb-documentation.readthedocs.io/en/latest/index.html)\", color = discord.Color.blurple())\n await ctx.send(embed = embed)", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "async def generate_news_paper(message: types.Message):\n chat_id = message.chat.id\n title = message.text[5:]\n commands.get_wall_newspaper(chat_id, title)\n await bot.send_photo(chat_id, open('./output/greeting_card.png', 'rb'), 'Have fun :3')", "def cmd_notification_id(client, args):\n notification = client.get_notification(args.notification_id)\n notification = notification.__dict__\n if 'comment' in notification['content']:\n notification['content'] = format_comment_tree(notification['content'])\n generate_output({'notification': notification})", "def generate_comment_body():\n body = {\n \"external_id\": 1,\n \"external_slug\": factories.random_str(),\n \"description\": \"External comment\",\n \"context\": None,\n }\n\n return body", "async def info(self, ctx):\r\n openfile = open(\"info.txt\", \"r\")\r\n embed = discord.Embed(title='Aristobot', description='This is a bot made by Aristoza that uses the TrueSkill '\r\n 'python package (http://trueskill.org/) which is based on '\r\n 'the '\r\n 'TrueSkill rating system developed by Microsoft.',\r\n color=33023)\r\n embed.add_field(name='How it works', value=openfile.read(), inline=False)\r\n await ctx.send(embed=embed)", "async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)", "def OnBlipSubmitted(properties, context):\n root_wavelet = context.GetRootWavelet()\n root_wavelet.CreateBlip().GetDocument().SetText(\"New Blip is submitted\")\n #renren.send(\"This is a message from google wave. Powered by Wenbin Wu.\")", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def notes(self, notes):\n\n self._notes = notes", "def createNote(title, author, body) -> dict:\n new_note = Note(title=title, author=author, body=body, created_at=now())\n new_note.save()\n return {\n 'uuid': new_note.uuid, 'title': new_note.title,\n 'author': new_note.author, 'body': new_note.body, 'created_at': localtime(new_note.created_at)\n }", "def notes_detail(key):\n if request.method == \"PUT\":\n note = str(request.data.get(\"text\", \"\"))\n notes[key] = note\n return note_repr(key)\n\n elif request.method == \"DELETE\":\n notes.pop(key, None)\n return \"\", status.HTTP_204_NO_CONTENT\n\n # request.method == 'GET'\n if key not in notes:\n raise exceptions.NotFound()\n return note_repr(key)", "def execute(cls, slack_wrapper, args, channel_id, user_id, user_is_admin):\n try:\n with open(\"intro_msg\") as f:\n message = f.read()\n\n slack_wrapper.post_message(channel_id, message)\n except:\n message = \"Sorry, I forgot what I wanted to say (or the admins forgot to give me an intro message :wink:)\"\n\n slack_wrapper.post_message(channel_id, message)", "def render_note(note: str) -> str:\n note = emojize(note)\n note = markdown(note, extensions=['nl2br'])\n return note", "def notes_setup(self):\n pass", "def generate_plain_mesg(info, open_quests, owner, tags):\n\n msg = (\n \"This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.\\n\\n\"\n \"Due dates, if any, are noted with each quest.\\n\".format(owner)\n )\n msg += (\n \"\\nTo throw an event manually, you can run the following command \"\n \"on a shell server:\"\n \"\\n\\n\"\n \"$ hermes event create [event] --host [hostname].\\n\\n\"\n \"Or you can visit the quests linked below.\\n\\n\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"==[ QUEST {} ]================================\\n\"\n \"CREATOR: {}\\n\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"DUE: {}\\n\".format(quest.target_time)\n msg += \"DESC: \\\"{}\\\"\\n\".format(textwrap.fill(\n quest.description,\n width=60, subsequent_indent=\"\"\n ))\n msg += \"LINK: {}/v1/quests/{}\\n\\n\".format(\n settings.frontend, quest_id\n )\n else:\n msg += \" Labors not associated with a quest:\\n\\n\"\n\n msg += \"Machines with labors:\\n\"\n\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \" {} ({})\\n\".format(hostname, tags_str)\n\n msg += \"\\n\\n\"\n\n return msg", "async def bubblewrap(self, ctx):\n data = Embed.create(\n self, ctx, title=\"Bubblewrap!\",\n description=(\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n )\n )\n await ctx.send(embed=data)", "def _info_embed(self, colour, data: Dict[str, Any]) -> discord.Embed:\n minutes = data.get(\"lengthSeconds\", 0) // 60\n seconds = data.get(\"lengthSeconds\", 0) % 60\n pub_date = self._parse_date(data.get(\"publishDate\"))\n all_artists = \", \".join(\n f\"[{x.get('name')}](https://vocadb.net/Ar/{x.get('id')}) ({x.get('categories')})\"\n for x in data.get(\"artists\")\n )\n embed = discord.Embed(colour=colour)\n embed.title = f\"{data.get('defaultName')} - {data.get('artistString')}\"\n embed.url = f\"https://vocadb.net/S/{data.get('id')}\"\n embed.set_thumbnail(url=data.get(\"thumbUrl\", \"\"))\n embed.add_field(name=\"Duration\", value=f\"{minutes} minutes, {seconds} seconds\")\n favorites, score = (data.get(\"favoritedTimes\", 0), data.get(\"ratingScore\", 0))\n embed.add_field(name=\"Published On\", value=pub_date)\n embed.add_field(name=\"Statistics\", value=f\"{favorites} favourite(s), {score} total score\")\n embed.add_field(name=\"Artist(s)\", value=all_artists)\n embed.set_footer(text=\"Powered by VocaDB\")\n return embed", "async def about(self, ctx: Context):\n embed = discord.Embed(\n colour=ctx.me.colour,\n description=f'I am {self.bot.user}, a bot made by {self.bot.owner}. My prefix is `{self.bot.prefix}`.'\n ).set_author(name=f'About {self.bot.user.name}:', icon_url=self.bot.user.avatar_url)\n\n await ctx.send(embed=embed)", "async def info(ctx):\n embed = discord.Embed(title=\"Zane Bot\", description=\"All hail the hypnotoad!\", color=0x0091C5)\n\n # give info about you here\n embed.add_field(name=\"Author\", value=\"Zanexius\")\n\n # Shows the number of servers the bot is member of.\n embed.add_field(name=\"Server count\", value=f\"{len(bot.guilds)}\")\n\n # give users a link to invite thsi bot to their server\n embed.add_field(name=\"Invite\", value=\"[Invite link](<insert your OAuth invitation link here>)\")\n\n await ctx.send(embed=embed)", "def notes_detail(key):\n if request.method == 'PUT':\n note = str(request.data.get('text', ''))\n notes[key] = note\n return note_repr(key)\n\n elif request.method == 'DELETE':\n notes.pop(key, None)\n return '', status.HTTP_204_NO_CONTENT\n\n # request.method == 'GET'\n if key not in notes:\n raise exceptions.NotFound()\n return note_repr(key)", "def buildMelodyNotes(m, bars):\n\n # Get the notes part of the scale specified in Melody object\n s = generate_scale(m.getKeySignature(), m.getScale(), m.getOctave())\n\n if bars < MIN_BARS or bars > MAX_BARS:\n bars = random.randint(MIN_BARS, 4)\n\n # Get a random number of notes value between the number of bars (so\n # that there is roughly one note per bar) and SMALLEST_NOTE //\n # OTHER_NOTE (so that each note would be roughly OTHER_NOTE long)\n numNotes = random.randint(4, max(1, bars * (SMALLEST_NOTE //\n DOTTED_EIGHTH_NOTE)))\n seq = []\n # randomize notes in scale by choosing {numNotes} random notes\n for i in range(numNotes):\n seq.append(random.choice(s))\n\n # randomize length of each notes using getRandomStructure function\n lengths, barSpace = getEqualStructure(bars, numNotes)\n\n # add the random notes to generate the melody\n for i in range(numNotes):\n if seq[i] == -1:\n m.addNote(Note(0, lengths[i], vel=0))\n else:\n m.addNote(Note(seq[i], lengths[i]))\n\n if barSpace > 0:\n # Add rest just in case last note does not extend to end of bar\n m.addNote(Note(0, barSpace, vel=0))", "def build_midi_notes(num_octaves=9, naming='standard', middle_c_name='C4', middle_c_note_num=60):\n octaves = [x for x in range(num_octaves)]\n if naming == 'standard':\n note_names = ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab', 'A', 'Bb', 'B']\n else:\n print('Note naming parameter: {0} is unknown'.format(naming))\n return False\n\n note_name_list = [''.join((x[1], str(x[0]))) for x in itertools.product(octaves, note_names)]\n midi_note = {}\n\n # Iteratively name all notes\n # Middle C and above\n note_num = middle_c_note_num\n for n in range(note_name_list.index(middle_c_name), len(note_name_list)):\n midi_note[note_name_list[n]] = note_num\n note_num += 1\n\n # Below middle C\n note_num = middle_c_note_num - 1\n for n in range(note_name_list.index(middle_c_name)-1, -1, -1):\n midi_note[note_name_list[n]] = note_num\n note_num -= 1\n\n # Add enharmonic conversions\n enharmonic_mapping = {'Db': 'C#',\n 'Eb': 'D#',\n 'Gb': 'F#',\n 'Ab': 'G#',\n 'Bb': 'A#'}\n\n for n in [x for x in midi_note if x[0:2] in enharmonic_mapping]:\n midi_note[enharmonic_mapping[n[0:2]] + n[-1]] = midi_note[n]\n\n return midi_note", "def add(self, text, account=None, images=()):\n # type: (Text, Text, Tuple[Union[cgtwq.model.ImageInfo, Text,], ...]) -> ...\n\n account = account or get_account_id(self.select.token)\n\n # TODO: Refactor arguments at next major version.\n message = Message.load(text)\n message.images += images\n\n text_key = \"dom_text\"\n id_key = \"#link_id\"\n from_account_id_key = \"from_account_id\"\n if compat.api_level() == compat.API_LEVEL_5_2:\n text_key = \"text\"\n id_key = \"#task_id\"\n from_account_id_key = \"#from_account_id\"\n\n select = self.select\n select.call(\n \"c_note\",\n \"create\",\n field_data_array={\n \"module\": select.module.name,\n \"module_type\": select.module.module_type,\n id_key: \",\".join(select),\n text_key: message.api_payload(),\n from_account_id_key: account,\n },\n )" ]
[ "0.653712", "0.6195667", "0.5904277", "0.59017", "0.58894485", "0.58441114", "0.5828449", "0.5747832", "0.5732143", "0.57031065", "0.5664874", "0.56358755", "0.55843145", "0.5578333", "0.55621606", "0.5557521", "0.55531627", "0.5505942", "0.54942673", "0.54742163", "0.5460713", "0.5444641", "0.54375815", "0.540969", "0.5406481", "0.5381556", "0.5378305", "0.53701764", "0.53509796", "0.5317061", "0.53126806", "0.53075093", "0.53066933", "0.5297632", "0.5297016", "0.5270644", "0.52674806", "0.52579695", "0.5251387", "0.5248325", "0.5243869", "0.52381355", "0.5227518", "0.5215115", "0.52141625", "0.52103543", "0.5200503", "0.5200503", "0.5197293", "0.51929784", "0.51712674", "0.5165818", "0.5165583", "0.5161256", "0.5143406", "0.51419854", "0.51404524", "0.5139668", "0.51380736", "0.5131594", "0.5127731", "0.5121693", "0.512139", "0.5111933", "0.51064146", "0.5091525", "0.5090463", "0.50827414", "0.50798786", "0.507254", "0.5053087", "0.50496954", "0.5048695", "0.5046365", "0.5045649", "0.50383854", "0.5038186", "0.5032645", "0.5032146", "0.5026167", "0.5026021", "0.5017005", "0.5017005", "0.5017005", "0.5017005", "0.5017005", "0.5010423", "0.5010187", "0.50101316", "0.5009251", "0.5007126", "0.49975044", "0.4996465", "0.4993972", "0.4988508", "0.4988393", "0.49865544", "0.49778312", "0.49609485", "0.4950467" ]
0.63638
1
Builds and sends an embed message with merge request information.
async def process_merge_request_hook(data: models.MergeRequestHook): project = data.project merge = data.merge_request user = data.user description = "" action = "Issue updated" colour = discord.Colour.light_grey() if merge.action == "open": action = "Merge request opened" description = merge.description colour = discord.Colour.dark_green() elif merge.action == "close": action = "Merge request closed" colour = discord.Colour.dark_grey() embed = discord.Embed(title=f"[{project.namespace}/{project.name}] {action}: !{merge.iid} {merge.title}", url=merge.url, description=description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) embed.set_footer(text=f"{merge.source_branch} → {merge.target_branch}") await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed", "def embed():", "async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)", "def slackbuild_pubsub(data, context):\n global config\n global slack\n\n print(data)\n print(context)\n\n build, template = BuildStatus.toMessage(data, config)\n\n msg = slack.render_message(build, template)\n\n return slack.post_message(msg)", "async def CoMLegendBuilder(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "async def embed(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=message,\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)", "async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)", "def slackbuild_webhook(req: Request):\n global config\n global slack\n global cloudbuild\n\n # slack submits a POST\n if req.method != \"POST\":\n return abort(405)\n\n # not a true request from slack\n verified, err = slack.verify_webhook(req)\n if not verified:\n print(err)\n return abort(403)\n\n body = Slack.parse_request(req)\n argv = Slack.parse_command(body)\n msg = \"\"\n\n output, success = Command.run(argv, cloudbuild, config)\n\n if output is None:\n if success:\n # intentionaly not responding with a slack message\n return ('', 200)\n else:\n return abort(500)\n elif Slack.is_interactive_message(body):\n msg = slack.render_interactive_message(body, success, output)\n else:\n color = Colors.SUCCESS if success else Colors.FAILURE\n msg = slack.render_message({\"result\": output, \"color\": color}, \"command.json\")\n\n msg = json.dumps(msg)\n print(msg)\n return Response(response=msg, content_type=\"application/json\")", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)", "def notify_channel_on_merge(self):\n if self.pr.is_merged:\n LOG.debug(\"**** Repo=%s, new merge came to=%s, setting trace to=%s channel\"\n %(self.pr.repo, self.pr.base_branch, self.pr.config.codeChannelName))\n msg = MSG_CODE_CHANNEL.format(title=self.pr.title, desc=self.pr.description, pr=self.pr.link,\n head_branch=self.pr.head_branch, base_branch=self.pr.base_branch,\n pr_by=self.created_by, merge_by=self.merged_by)\n self.slack.postToSlack(self.pr.config.codeChannelName, msg)\n LOG.info(\"informed %s because pr=%s is merged into sensitive branch=%s\" %\n (self.pr.config.codeChannelName, self.pr.link_pretty, self.pr.base_branch))\n return {\"msg\":\"informed %s because pr=%s is merged into sensitive branch=%s\" %\n (self.pr.config.codeChannelName, self.pr.link_pretty, self.pr.base_branch)}\n return {\"msg\", \"Skipped posting to code channel because '%s' is not merge event\" %self.pr.action}", "def handle_new_oembed_details(embed_data):\n\n source = embed_data.get('oembed_source').strip()\n tweet_id = embed_data.get('tweet_id')\n\n assert tweet_id, \"Can only handle tweets\"\n assert embed_data.get('html'), \"Need HTML for embedding\"\n assert source, \"Need to know where this came from\"\n\n print 'new oembed details: %s %s' % (source, len(embed_data))\n\n # store all the data we received\n key = keys.tweet_embed_data(source,tweet_id)\n r = rc.hmset(key, embed_data)\n\n # we are giving preference to embedly data,\n # so also update the tweet's data w/ the embedly html\n if source == 'embedly':\n print 'embedly found, updating tweet data'\n key = keys.tweet_data(tweet_id)\n r = rc.hset(key, 'embed_html', embed_data.get('html'))\n\n # fire event that oembed has been saved\n revent.fire('new_oembed_details_saved', embed_data)\n\n return True", "def submitBuildRequest(ss, reason, props=None, now=False):", "def _update_mandrill_payload(self, payload, message):\n\n accepted_headers = {}\n if message.extra_headers:\n for k in message.extra_headers.keys():\n if k.startswith('X-') or k == 'Reply-To':\n accepted_headers[str(k)] = message.extra_headers[k]\n payload['message'].update({'headers': accepted_headers})\n\n payload['message'].update({\n 'tags': message.tags,\n 'track_opens': message.track_opens,\n 'track_clicks': message.track_clicks,\n 'headers': accepted_headers,\n })\n\n if message.global_merge_vars:\n payload['message']['global_merge_vars'] = [\n {'name': key, 'content': value}\n for key, value in message.global_merge_vars.iteritems()\n ]\n\n # sending html over to mandrill\n if getattr(message, 'alternatives', None):\n if len(message.alternatives) > 1:\n raise ImproperlyConfigured(\n \"Mandrill only accepts plain text and html emails. \"\n \"Please check the alternatives you have attached to \"\n \"your message.\")\n payload['message']['html'] = message.alternatives[0][0]\n\n # using a mandrill template message\n if message.content_subtype == 'mandrill.template':\n payload.update({\n 'template_name': message.template_name,\n 'template_content': message.template_content,\n })", "def embed(self, data, mime_type=\"text/plain\", encode_data_to_base64=True):\n if encode_data_to_base64:\n data = base64.standard_b64encode(data.encode()).decode()\n self.embeddings.append({\"data\": data, \"mime_type\": mime_type})", "async def process_push_hook(push: models.PushHook):\n repository = push.repository\n project = push.project\n commit_str = \"commit\" if push.total_commits_count == 1 else \"commits\"\n # Show link to commit compare if there's more than one commit\n if push.total_commits_count > 1:\n embed_url = f\"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}\"\n else:\n embed_url = f\"{repository.homepage}/commit/{push.after[:7]}\"\n\n if push.before == EMPTY_COMMIT:\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}] New branch created {push.branch}\",\n url=embed_url, colour=discord.Colour.light_grey())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)\n elif push.after == EMPTY_COMMIT:\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}] Branch deleted {push.branch}\",\n url=embed_url, colour=discord.Colour.light_grey())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)\n\n # If there are no commits, do not show a message\n if not push.total_commits_count:\n return\n\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}:{push.branch}] \"\n f\"{push.total_commits_count} new {commit_str}\",\n url=embed_url, colour=discord.Colour.blurple())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n embed.description = \"\"\n for commit in push.commits:\n message = commit.message.splitlines()[0]\n embed.description += f\"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\\n\"\n print(\"Sending push message\")\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "def form_payload(build_number, job_name, build_url, status):\n message = \"Build #{} {} for {}\".format(build_number, status, job_name)\n description = \"Build #{} {} for {}. \\nPlease check detailed logs here: {}console\".format(build_number, status, job_name, build_url)\n \n branch_name = \"\"\n # Check optional env variable\n if \"BRANCH_NAME\" in os.environ:\n branch_name = os.environ['BRANCH_NAME']\n\n payload_rep = {\"message\" : message , \"description\" : description, \"branch_name\" : branch_name,\n \"build_url\": build_url, \"job_name\": job_name, \"build_number\": build_number, \"node_name\": os.environ['NODE_NAME'],\n \"status\" : status, \"event_id\" : job_name}\n return payload_rep", "def svn_client_mergeinfo_log_merged(char_path_or_url, svn_opt_revision_t_peg_revision, char_merge_source_path_or_url, svn_opt_revision_t_src_peg_revision, svn_log_entry_receiver_t_receiver, svn_boolean_t_discover_changed_paths, apr_array_header_t_revprops, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def reply_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.blurple())\n embed.title = \"\"\n embed.description = message\n return embed", "def build(self, observation):\n raise NotImplementedError(\n 'Needs to be implemented as part of Embedder Interface')", "def _embed_result(self, embedding):\n # project original embedding\n project_weight = self.project.weight # (o, c)\n project_embedding = embedding.permute(0, 2, 1).unsqueeze(-1) \\\n * project_weight.permute(1, 0) # (n, e, c, 1) * (c, o) -> (n, e, c, o)\n project_embedding = project_embedding.permute(0, 3, 2, 1) # (n, o, c, e)\n # interaction\n square_of_sum = torch.sum(project_embedding, dim=2) ** 2\n sum_of_square = torch.sum(project_embedding ** 2, dim=2)\n embed_result = 0.5 * (square_of_sum - sum_of_square).sum(dim=2)\n return embed_result", "def queue_buildrequest(event):\n get().build_queue.put(event)", "async def bubblewrap(self, ctx):\n data = Embed.create(\n self, ctx, title=\"Bubblewrap!\",\n description=(\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n )\n )\n await ctx.send(embed=data)", "def help(update: Update, context: CallbackContext):\n context.bot.send_message(\n chat_id=update.message.chat_id,\n text=PROMPTS[\"help\"],\n reply_markup=telegram.InlineKeyboardMarkup(\n [\n [\n telegram.InlineKeyboardButton(\n \"Contribute on GitHub!\", url=\"https://github.com/iugov/s4lbot\"\n )\n ]\n ]\n ),\n parse_mode=telegram.ParseMode.MARKDOWN,\n )", "def contact(update: Update) -> None:\n update.message.text(\"@New GEN\")", "def discord_webhook(title, url, thumbnail, sizes):\n fields = []\n for size in sizes:\n fields.append({\"name\": size, \"value\": \"Available\", \"inline\": True})\n\n data = {\n \"username\": CONFIG['USERNAME'],\n \"avatar_url\": CONFIG['AVATAR_URL'],\n \"embeds\": [{\n \"title\": title,\n \"url\": CONFIG['URL'].replace('.json', '/') + url, \n \"thumbnail\": {\"url\": thumbnail},\n \"fields\": fields,\n \"color\": int(CONFIG['COLOUR']),\n \"footer\": {\"text\": \"Made by Yasser\"},\n \"timestamp\": str(datetime.utcnow()),\n }]\n }\n\n result = rq.post(CONFIG['WEBHOOK'], data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n result.raise_for_status()\n except rq.exceptions.HTTPError as err:\n logging.error(err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(result.status_code))\n logging.info(msg=\"Payload delivered successfully, code {}.\".format(result.status_code))", "def _build_about_embed(self) -> discord.Embed:\n with self.about_aoc_filepath.open(\"r\", encoding=\"utf8\") as f:\n embed_fields = json.load(f)\n\n about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)\n about_embed.set_author(name=\"Advent of Code\", url=self._base_url)\n for field in embed_fields:\n about_embed.add_field(**field)\n\n about_embed.set_footer(text=f\"Last Updated (UTC): {datetime.utcnow()}\")\n\n return about_embed", "def generate_build_document(self, commit_info, manifest_info):\n\n manifest_path, commit = commit_info\n build_name = manifest_info.name\n logging.info(f'Generating build document for manifest {build_name}...')\n\n # See if build document already is in the database and extract\n # for updating if so, otherwise create a new dictionary for\n # population\n try:\n build_data = self.db.get_document(build_name)\n except cbdatabase_db.NotFoundError:\n build_data = dict(type='build', key_=build_name)\n\n projects = dict()\n\n for project_name in manifest_info.get_projects():\n project_shas = manifest_info.get_project_shas(\n project_name\n )\n projects[project_name] = [\n f'{project_name}-{sha}' for sha in project_shas\n ]\n build_data['manifest'] = projects\n build_data['invalid_shas'] = dict() # Populated (potentially) later\n\n release_keys = ('product', 'release', 'version', 'build_num')\n release_data = manifest_info.get_release_info()\n product, release, version, build_num = release_data\n build_data.update(dict(zip(release_keys, release_data)))\n\n index_key = f'{product}-{version}'\n build_data['prev_build_num'] = (\n self.prod_ver_index.get(index_key, None)\n )\n\n build_data['commits'] = list() # Populated (potentially) later\n build_data['manifest_sha'] = commit.id.decode()\n build_data['manifest_path'] = manifest_path.decode()\n build_data['timestamp'] = commit.commit_time\n build_data['download_url'] = (\n f'http://latestbuilds.service.couchbase.com/builds/latestbuilds/'\n f'{product}/{release}/{build_num}'\n )\n\n # Used for related (external) data; preserve any existing data\n build_data['metadata'] = build_data.get('metadata', dict())\n\n logging.debug(f\"Final build document: {build_data}\")\n self.db.upsert_documents({build_name: build_data})\n\n self.first_prod_ver_build = (\n True if build_data['prev_build_num'] is None else False\n )\n self.prod_ver_index[index_key] = build_num\n self.db.update_product_version_index(self.prod_ver_index)\n\n return build_data", "def merge(session_id, context, entities, msg):\n pass", "def _build(cls, update, others, self_id, entities, client):", "def competition(update, context):\n #update.message.reply_text(s)\n chat_id = update.message.chat_id\n bot.send_message(chat_id,text=message,\n parse_mode=telegram.ParseMode.HTML)\n #return s ", "def build_request(self,token=config.PUBSUB_VERIFICATION_TOKEN):\n payload = {\n \"metricKind\": \"DELTA\", \n \"metric\": {\n \"labels\": {\n \"response_code\": \"0\"\n }, \n \"type\": \"agent.googleapis.com/agent/request_count\"\n }, \n \"points\": [\n {\n \"interval\": {\"endTime\": \"2019-02-18T22:09:53.939194Z\", \"startTime\": \"2019-02-18T21:09:53.939194Z\"}, \n \"value\": {\"int64Value\": \"62\"}\n }, \n {\n \"interval\": {\"endTime\": \"2019-02-18T21:09:53.939194Z\", \"startTime\": \"2019-02-18T20:09:53.939194Z\"}, \n \"value\": {\"int64Value\": \"61\"}\n }\n ], \n \"resource\": {\n \"labels\": {\n \"instance_id\": \"9113659852587170607\", \n \"project_id\": \"YOUR_PROJECT_ID\", \n \"zone\": \"us-east4-a\"\n }, \n \"type\": \"gce_instance\"\n }, \n \"valueType\": \"INT64\"\n }\n request = {\n \"message\": \n {\n \"attributes\": {\n \"batch_id\": self.batch_id,\n \"token\": token\n },\n \"data\": base64.b64encode(json.dumps(payload))\n }\n \n }\n return request", "def html_message_formatter(mode, name, build, results, master_status):\n result = Results[results]\n\n limit_lines = 80\n text = list()\n text.append(u'<h4>Build status: %s</h4>' % result.upper())\n text.append(u'<table cellspacing=\"10\"><tr>')\n text.append(u\"<td>Buildslave for this Build:</td><td><b>%s</b></td></tr>\" % build.getSlavename())\n if master_status.getURLForThing(build):\n text.append(u'<tr><td>Complete logs for all build steps:</td><td><a href=\"%s\">%s</a></td></tr>'\n % (master_status.getURLForThing(build),\n master_status.getURLForThing(build))\n )\n text.append(u'<tr><td>Build Reason:</td><td>%s</td></tr>' % build.getReason())\n source = u\"\"\n for ss in build.getSourceStamps():\n if ss.codebase:\n source += u'%s: ' % ss.codebase\n if ss.branch:\n source += u\"[branch %s] \" % ss.branch\n if ss.revision:\n source += ss.revision\n else:\n source += u\"HEAD\"\n if ss.patch:\n source += u\" (plus patch)\"\n if ss.patch_info: # add patch comment\n source += u\" (%s)\" % ss.patch_info[1]\n text.append(u\"<tr><td>Build Source Stamp:</td><td><b>%s</b></td></tr>\" % source)\n text.append(u\"<tr><td>Blamelist:</td><td>%s</td></tr>\" % \",\".join(build.getResponsibleUsers()))\n text.append(u'</table>')\n if ss.changes:\n text.append(u'<h4>Recent Changes:</h4>')\n for c in ss.changes:\n cd = c.asDict()\n when = datetime.datetime.fromtimestamp(cd['when'] ).ctime()\n text.append(u'<table cellspacing=\"10\">')\n text.append(u'<tr><td>Repository:</td><td>%s</td></tr>' % cd['repository'] )\n text.append(u'<tr><td>Project:</td><td>%s</td></tr>' % cd['project'] )\n text.append(u'<tr><td>Time:</td><td>%s</td></tr>' % when)\n text.append(u'<tr><td>Changed by:</td><td>%s</td></tr>' % cd['who'] )\n text.append(u'<tr><td>Comments:</td><td>%s</td></tr>' % cd['comments'] )\n text.append(u'</table>')\n files = cd['files']\n if files:\n text.append(u'<table cellspacing=\"10\"><tr><th align=\"left\">Files</th></tr>')\n for file in files:\n text.append(u'<tr><td>%s:</td></tr>' % file['name'] )\n text.append(u'</table>')\n text.append(u'<br>')\n # get all the steps in build in reversed order\n rev_steps = reversed(build.getSteps())\n # find the last step that finished\n for step in rev_steps:\n if step.isFinished():\n break\n # get logs for the last finished step\n if step.isFinished():\n logs = step.getLogs()\n # No step finished, loop just exhausted itself; so as a special case we fetch all logs\n else:\n logs = build.getLogs()\n # logs within a step are in reverse order. Search back until we find stdio\n for log in reversed(logs):\n if log.getName() == 'stdio':\n break\n name = \"%s.%s\" % (log.getStep().getName(), log.getName())\n status, dummy = log.getStep().getResults()\n content = log.getText().splitlines() # Note: can be VERY LARGE\n url = u'%s/steps/%s/logs/%s' % (master_status.getURLForThing(build),\n log.getStep().getName(),\n log.getName())\n\n text.append(u'<i>Detailed log of last build step:</i> <a href=\"%s\">%s</a>'\n % (url, url))\n text.append(u'<br>')\n text.append(u'<h4>Last %d lines of \"%s\"</h4>' % (limit_lines, name))\n unilist = list()\n for line in content[len(content)-limit_lines:]:\n unilist.append(cgi.escape(unicode(line,'utf-8')))\n text.append(u'<pre>')\n text.extend(unilist)\n text.append(u'</pre>')\n text.append(u'<br><br>')\n text.append(u'<b>-The Buildbot</b>')\n return {\n 'body': u\"\\n\".join(text),\n 'type': 'html'\n }", "def gokul(update, context):\n update.message.reply_text(\"\"\"GOKULAKRISHNAN-191MC126\n MOBILE-1234567890\"\"\")", "def svn_client_mergeinfo_get_merged(apr_hash_t_mergeinfo, char_path_or_url, svn_opt_revision_t_peg_revision, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def merge(self, message: str, master_branch: str, dev_branch: str) -> str:\n self.__verify_repo_initialized()\n commit_hash = select_merge_algorithm(\n message=message,\n branchenv=self._env.branchenv,\n stageenv=self._env.stageenv,\n refenv=self._env.refenv,\n stagehashenv=self._env.stagehashenv,\n master_branch=master_branch,\n dev_branch=dev_branch,\n repo_path=self._repo_path)\n\n return commit_hash", "def build_id(self):\n if self.method == 'tagBuild':\n return self.params[1]", "def attention_imp_merge():\n global X_DIM, Y_DIM\n # Load Embeddings matrix\n embedding_weights = joblib.load(config.DUMPED_VECTOR_DIR + 'mb_voc_embeddings.pkl')\n\n # model cnn\n\n model_atn = Sequential()\n model_atn.add(Embedding(max_features,\n embedding_dims,\n input_length=max_len,\n weights=[embedding_weights],\n trainable=True))\n model_atn.add(Bidirectional(GRU(100, return_sequences=True), name='bidirectional'))\n model_atn.add(TimeDistributed(Dense(200), name='time_dist'))\n model_atn.add(AttLayer(name='att'))\n\n model_feature_vec = Sequential()\n model_feature_vec.add(Dense(200, input_dim=N_Features, init='normal', activation='relu'))\n model_feature_vec.add(Dense(100, init='normal', activation='relu'))\n model_feature_vec.add(Dropout(0.2))\n model_feature_vec.add(Dense(50, init='normal', activation='relu'))\n model_feature_vec.add(Dense(10, init='normal', activation='relu'))\n\n merged_layer = Sequential()\n merged_layer.add(Merge([model_atn, model_feature_vec], mode='concat',\n concat_axis=1, name='merge_layer'))\n merged_layer.add(Dense(200, activation='relu'))\n # merged_layer.add(Bidirectional(GRU(100, return_sequences=True), name='bidirectional_2'))\n # merged_layer.add(TimeDistributed(Dense(200), name='time_dist'))\n # merged_layer.add(AttLayer(name='att'))\n merged_layer.add(Dense(1, init='normal', name='combined_dense'))\n\n # # Compile model\n merged_layer.compile(loss='mean_squared_error', optimizer='adam')\n\n print(merged_layer.summary())\n return merged_layer", "def send_apod(bot, job):\n apod_data = requests.get(config.APOD_API_URL).json()\n\n title = apod_data['title']\n\n chat_ids = json.loads(redis.get('chat_ids'))\n\n for chat_id in chat_ids:\n logger.info(f\"Sending message to chat: {chat_id}\")\n\n if apod_data[\"media_type\"] == \"video\":\n bot.send_message(chat_id=chat_id,\n text=f'<a href=\"{config.APOD_URL}\">{title}</a>',\n parse_mode=ParseMode.HTML)\n\n else:\n bot.send_photo(chat_id=chat_id,\n photo=apod_data['url'],\n caption=f'<a href=\"{config.APOD_URL}\">{title}</a>',\n parse_mode=ParseMode.HTML)", "def build( c ):\n\n print(\"Building Please Respond...\")\n c.run( \"pyinstaller -y please_respond.py\" )", "def Merge(self, request, global_params=None):\n config = self.GetMethodConfig('Merge')\n return self._RunMethod(\n config, request, global_params=global_params)", "def github_merge(config_data):\n issues = load_issues(config_data.issues_path)\n with open(config_data.entropy_path) as entropy:\n entropy_rdr = csv.reader(entropy)\n with open(config_data.merged_path, 'w', newline='') as merge:\n merge_wrtr = csv.writer(merge)\n entropy_hdrs = next(entropy_rdr)\n issue_hdrs = [\"Created\", \"Closed\", \"Open\",\n \"Created_Avg\", \"Closed_Avg\", \"Open_Avg\"\n ]\n merge_hdrs = entropy_hdrs + issue_hdrs\n merge_wrtr.writerow(merge_hdrs)\n default_i = {}\n for k in issue_hdrs:\n default_i[k] = None\n for e_row in entropy_rdr:\n i = issues.get(e_row[0], default_i)\n i_row = [i[k] for k in issue_hdrs]\n e_row.extend(i_row)\n merge_wrtr.writerow(e_row)\n \n print(\"Generated: {0}\".format(config_data.merged_path))", "def push_build(id, tag_prefix):\n req = swagger_client.BuildRecordPushRequestRest()\n req.tag_prefix = tag_prefix\n req.build_record_id = id\n response = utils.checked_api_call(pnc_api.build_push, 'push', body=req)\n if response:\n return utils.format_json_list(response)", "def hello():\n if request.method == \"GET\":\n return \"incorrect usage\"\n else:\n #s = \"\"\n #for repo in g.get_user().get_repos():\n # s+= repo.name + \" \"\n #s += g.get_user().get_repo(\"memeplatter.github.io\").name\n #for repo in g.get_organization(os.environ.get(\"GIT_ORG\")).get_repos():\n # s += repo.name + \" \"\n #s += g.get_organization(os.environ.get(\"GIT_ORG\")).get_repo(os.environ.get(\"GIT_REPO\")).name\n repo = None\n if os.environ.get(\"GIT_ORG\") == None:\n #there is no organization configured\n repo = g.get_user().get_repo(os.environ.get(\"GIT_REPO\"))\n else:\n # there is a organization configured\n repo = g.get_organization(os.environ.get(\"GIT_ORG\")).get_repo(os.environ.get(\"GIT_REPO\"))\n honeypot = request.form[\"email\"]\n \n if len(honeypot) > 0:\n return \"eh\"\n from_name = request.form['name']\n subject = request.form['subject']\n message = request.form['message']\n #return repo.name\n #repo.create_file(\"/\"+repo.name+\"/test.md\", \"commit message\", \"commit content\")\n rMessage = createPost(repo, from_name, subject, message)\n return rMessage", "def merge(self, response_to_merge):\n self.n_messages += response_to_merge.n_messages\n self.n_errors += response_to_merge.n_errors\n self.n_warnings += response_to_merge.n_warnings\n for message in response_to_merge.messages:\n self.messages.append(message)\n if response_to_merge.status != 'OK':\n self.status = response_to_merge.status\n self.error_code = response_to_merge.error_code\n self.http_status = response_to_merge.http_status\n self.message = response_to_merge.message", "def add_embed_itmes(data):\n for k, v in data.items() :\n embed.add_embed_field(name=k, value=v)", "def api_github_message():\n if request.headers['Content-Type'] == 'application/json':\n print('inside server ')\n my_info = json.dumps(request.json)\n payload = json.loads(my_info)\n if not payload['action'] == 'closed':\n model = StoreModel().loadData()\n tdf = TestData()\n tdf1 = TestData1()\n parameter_dict = tdf.fetcher(my_info)\n extension_file = tdf1.file_fetcher(my_info)\n feature_dict = parameter_dict['feature_dict']\n comment_url = parameter_dict['comment_url']\n comment_body = tdf.test_feeder(feature_dict, model)\n file_comment_body = tdf1.file_test_feeder(extension_file[0], extension_file[1])\n Comment.post_comment(comment_url, comment_body)\n Comment.post_comment(comment_url, str(file_comment_body))\n app.logger.info(comment_body)\n prediction_response = json.dumps({\"state\": comment_body})\n app.logger.info(comment_body)\n res = Response(prediction_response, status=200, mimetype='application.json')\n return res\n prediction_response = json.dumps({\"state\": \"closed pull request\"})\n app.logger.info(\"closed pull request\")\n res = Response(prediction_response, status=200, mimetype='application.json')\n return res", "async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)", "def embed(ctx=None, title=None, description=None, fields=None, customFooter=False, customThumbnail=None, customColor=None, image=None):\n\n e = discord.Embed(title=title, description=description)\n if customColor is None:\n e.color = color()\n else:\n e.color = color(customColor)\n \n if fields != None:\n index = 0\n # Please fix the code below, There's nothing wrong with it, it's just messy and I'm sure that's not the right way to do it.\n for field in fields:\n session = []\n for key, value in field.items():\n session.append(key)\n\n if key == \"n\":\n name = value \n \n if key == \"v\":\n xValue = value \n \n if key == \"inline\":\n inline = value \n \n if not \"inline\" in session:\n inline = False\n \n e.add_field(name=f\"{name}\", value=xValue, inline=inline)\n \n if not customFooter:\n footer(e, ctx)\n \n if image is None:\n try:\n if customThumbnail is None:\n e.set_thumbnail(url=ctx.author.avatar_url)\n else:\n e.set_thumbnail(url=customThumbnail)\n except:\n pass \n else:\n e.set_image(url=image)\n return e", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)", "def send_joke(update, context):\n\n joke = submission_fetcher.joke_fetcher.get_post()\n msg = joke.title + '\\n\\n' + joke.selftext\n lad_bot.send_message(chat_id=update.effective_chat.id, text=msg)\n print(f\"Joke sent for {update.message.from_user.first_name} \n {update.message.from_user.last_name} (username: {update.message.from_user.username}).\")", "async def trigger_build(self, *, branch=None, message=None):", "def info_embed(card: dict) -> dict:\n card_type = card_types[card[\"char_type\"]]\n title = effective_card_name(card)\n description = (\n \"{cost}pp {craft} {rarity} {card_type}\\n\"\n \"Trait: {trait}\\n\"\n \"Card Set: {card_set}\"\n ).format(\n cost=card[\"cost\"],\n craft=crafts[card[\"clan\"]],\n rarity=rarities[card[\"rarity\"]],\n card_type=card_type,\n trait=card[\"tribe_name\"],\n card_set=card_sets[card[\"card_set_id\"]],\n )\n if card_type == \"Follower\":\n base_text = reformat_text(card[\"org_skill_disc\"])\n evo_text = reformat_text(card[\"org_evo_skill_disc\"])\n fields = [\n dict(name=\"Base\", value=\"{atk}/{life}\\n\".format(**card) + base_text),\n dict(\n name=\"Evolved\", value=\"{evo_atk}/{evo_life}\\n\".format(**card) + evo_text\n ),\n ]\n else:\n text = reformat_text(card[\"org_skill_disc\"])\n fields = [dict(name=card_type, value=text)]\n return dict(title=title, description=description, fields=fields)", "def on_merge(self, to_be_merged, merge_result, context):\n pass", "def merge(self):\n commits = self._github_api.get_pr_commits(self.number)\n\n def format_commit_author(commit):\n author = commit['commit']['author']\n name = author['name']\n email = author['email']\n return f'{name} <{email}>'\n commit_authors = [format_commit_author(commit) for commit in commits]\n co_authored_by_re = re.compile(\n r'^Co-authored-by:\\s*(.*)', re.MULTILINE)\n\n def extract_co_authors(commit):\n message = commit['commit']['message']\n return co_authored_by_re.findall(message)\n commit_co_authors = []\n for commit in commits:\n commit_co_authors.extend(extract_co_authors(commit))\n\n all_commit_authors = commit_authors + commit_co_authors\n distinct_authors = sorted(set(all_commit_authors),\n key=lambda x: commit_authors.count(x),\n reverse=True)\n\n for i, author in enumerate(distinct_authors):\n print(\"Author {}: {}\".format(i + 1, author))\n\n if len(distinct_authors) > 1:\n primary_author, distinct_other_authors = get_primary_author(\n self.cmd, distinct_authors)\n else:\n # If there is only one author, do not prompt for a lead author\n primary_author = distinct_authors.pop()\n distinct_other_authors = []\n\n commit_title = f'{self.title} (#{self.number})'\n commit_message_chunks = []\n if self.body is not None:\n # Remove comments (i.e. <-- comment -->) from the PR description.\n body = re.sub(r\"<!--.*?-->\", \"\", self.body, flags=re.DOTALL)\n # avoid github user name references by inserting a space after @\n body = re.sub(r\"@(\\w+)\", \"@ \\\\1\", body)\n commit_message_chunks.append(body)\n\n committer_name = run_cmd(\"git config --get user.name\").strip()\n committer_email = run_cmd(\"git config --get user.email\").strip()\n\n authors = (\"Authored-by:\" if len(distinct_other_authors) == 0\n else \"Lead-authored-by:\")\n authors += \" %s\" % primary_author\n if len(distinct_authors) > 0:\n authors += \"\\n\" + \"\\n\".join([\"Co-authored-by: %s\" % a\n for a in distinct_other_authors])\n authors += \"\\n\" + \"Signed-off-by: %s <%s>\" % (committer_name,\n committer_email)\n commit_message_chunks.append(authors)\n\n commit_message = \"\\n\\n\".join(commit_message_chunks)\n\n # Normalize line ends and collapse extraneous newlines. We allow two\n # consecutive newlines for paragraph breaks but not more.\n commit_message = \"\\n\".join(commit_message.splitlines())\n commit_message = re.sub(\"\\n{2,}\", \"\\n\\n\", commit_message)\n\n if DEBUG:\n print(\"*** Commit title ***\")\n print(commit_title)\n print()\n print(\"*** Commit message ***\")\n print(commit_message)\n\n if DEBUG:\n merge_hash = None\n else:\n result = self._github_api.merge_pr(self.number,\n commit_title,\n commit_message)\n if not result['merged']:\n message = result['message']\n self.cmd.fail(f'Failed to merge pull request: {message}')\n merge_hash = result['sha']\n\n print(\"Pull request #%s merged!\" % self.number)\n print(\"Merge hash: %s\" % merge_hash)", "def sendpr(m='This is PR', b='lf-dev', h=None):\n command = 'hub pull-request -m \"%s\" -b %s' % (m,b)\n\n current_branch_cmd = shlex.split('git rev-parse --abbrev-ref HEAD')\n process = subprocess.Popen(current_branch_cmd, stdout=subprocess.PIPE)\n current_branch, err = process.communicate()\n print('current_branch', current_branch)\n if not h:\n cmd = shlex.split(command)\n else:\n command = command + '-h %s' % (h)\n cmd = shlex.split(command)\n current_branch = h\n\n cmd = shlex.split(command)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n output, err = process.communicate()\n message = m + \" PR from %s @ %s reviewers @%s @%s \\n URL: %s \\n %s >>> %s\" % (DEV_NAME,\n datetime.datetime.now().strftime(\"%y-%m-%d-%H-%M\"),\n REVIEWER[0], REVIEWER[1], output , b,\n current_branch)\n data = {\n \"color\":\"green\",\n \"message\":message,\n \"notify\":True,\n \"message_format\":\"text\"\n }\n req = urllib2.Request(HIPCHAT_WEB_HOOK)\n req.add_header(\"Content-Type\", \"application/json\")\n urllib2.urlopen(req, json.dumps(data))", "def post_mod(handled_mods, mod, user):\n publishedfileid = int(mod[\"publishedfileid\"])\n app_id = int(mod[\"consumer_app_id\"])\n game_name = get_game_name(handled_mods, app_id)\n\n embed = {}\n wk_obj = {'embeds': [{}]}\n embed[\"title\"] = \"%s\" % (mod[\"title\"])\n embed[\"type\"] = \"rich\"\n embed[\n \"url\"] = \"http://steamcommunity.com/sharedfiles/filedetails/?id=%i\" % (\n publishedfileid)\n embed[\"description\"] = re.sub(r\"\\[.*?\\]\", '', mod[\"description\"].replace(\n \"\\r\\n\", \" \"))[:200] + '\\u2026'\n embed[\"color\"] = 3447003\n embed[\"timestamp\"] = datetime.datetime.utcfromtimestamp(\n mod[\"time_created\"]).isoformat()\n\n embed[\"author\"] = {}\n embed[\"author\"][\"name\"] = user[\"personaname\"]\n embed[\"author\"][\"url\"] = user[\"profileurl\"]\n embed[\"author\"][\"proxy_icon_url\"] = user[\"profileurl\"]\n embed[\"author\"][\"icon_url\"] = user[\"avatar\"]\n\n embed[\"thumbnail\"] = {}\n embed[\"thumbnail\"][\"url\"] = mod[\"preview_url\"]\n embed[\"thumbnail\"][\"proxy_url\"] = user[\"profileurl\"]\n embed[\"thumbnail\"][\"height\"] = 84\n embed[\"thumbnail\"][\"width\"] = 84\n\n if game_name:\n embed[\"footer\"] = {}\n embed[\"footer\"][\"text\"] = \"New %s mod release\" % game_name\n\n wk_obj[\"embeds\"][0] = embed\n headers = {'Content-type': 'application/json'}\n for hook in HOOKS:\n if app_id in hook[\"ids\"]:\n hashed = keyhash(hook[\"url\"])\n if publishedfileid not in handled_mods[hashed][str(app_id)]:\n if DRY_RUN:\n handled_mods[hashed][str(app_id)].append(publishedfileid)\n else:\n try:\n req = requests.post(hook[\"url\"],\n data=json.dumps(wk_obj),\n headers=headers)\n if req.status_code == 204:\n handled_mods[hashed][str(app_id)].append(publishedfileid)\n except requests.exceptions.RequestException as req_exc:\n print(req_exc)", "def details(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=str(owner))", "def dispatch_push(self, p, tweaks, badge):\n pass", "def format_payload(self):\n # Initializes the default payload structure.\n payload = {}\n embed = {\n 'author': {},\n 'footer': {},\n 'image': {},\n 'thumbnail': {},\n 'fields': []\n }\n\n # Attaches data to the payload if provided.\n if self.content:\n payload['content'] = self.content\n\n if self.title:\n embed['title'] = self.title\n\n if self.description:\n embed['description'] = self.description\n\n if self.url:\n embed['url'] = self.url\n\n if self.color:\n embed['color'] = self.color\n\n if self.timestamp:\n embed['timestamp'] = self.timestamp\n\n if self.author_name:\n embed['author']['name'] = self.author_name\n\n if self.author_url:\n embed['author']['url'] = self.author_url\n\n if self.author_icon:\n embed['author']['icon_url'] = self.author_icon\n\n if self.thumbnail_url:\n embed['thumbnail']['url'] = self.thumbnail_url\n\n if self.image:\n embed['image']['url'] = self.image\n\n if self.fields:\n embed['fields'] = self.fields\n\n if self.footer_icon:\n embed['footer']['icon_url'] = self.footer_icon\n\n if self.footer_text:\n embed['footer']['text'] = self.footer_text\n\n # If the embed object has content it gets appended to the payload\n if embed:\n payload['embeds'] = []\n payload['embeds'].append(embed)\n\n return payload", "def _build_common_message(msg_title,msg_text,msg_type):\n data = {\n 'message': {\n \"topic\" : '',\n 'data': {\n 'title': '',\n 'message': '',\n 'type' : ''\n }\n }\n }\n data['message']['topic'] = FCM_TOPIC\n data['message']['data']['title'] = msg_title\n data['message']['data']['message'] = datetime.datetime.now().strftime(\"%H:%M:%S\") + \" \" + msg_text\n data['message']['data']['type'] = msg_type\n return data", "def _build_common_message(msg_title,msg_text,msg_type):\n data = {\n 'message': {\n \"topic\" : '',\n 'data': {\n 'title': '',\n 'message': '',\n 'type' : ''\n }\n }\n }\n data['message']['topic'] = FCM_TOPIC\n data['message']['data']['title'] = msg_title\n data['message']['data']['message'] = datetime.datetime.now().strftime(\"%H:%M:%S\") + \" \" + msg_text\n data['message']['data']['type'] = msg_type\n return data", "def current_build_proto(self):\n return self._build_proto", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "def build_shared_embeddings(\n dicts: Dict[str, Dictionary],\n langs: List[str],\n embed_dim: int,\n build_embedding: callable,\n pretrained_embed_path: Optional[str] = None,\n ):\n shared_dict = dicts[langs[0]]\n if any(dicts[lang] != shared_dict for lang in langs):\n raise ValueError(\n \"--share-*-embeddings requires a joined dictionary: \"\n \"--share-encoder-embeddings requires a joined source \"\n \"dictionary, --share-decoder-embeddings requires a joined \"\n \"target dictionary, and --share-all-embeddings requires a \"\n \"joint source + target dictionary.\"\n )\n return build_embedding(shared_dict, embed_dim, pretrained_embed_path)", "async def cringo_stat_embed(user: DiscordUser) -> Embed:\n\n s = await CringoStatistic.get_by_discord_user(user)\n\n if s.plays == 0:\n embed = c.crimbed(\n title='Hold up—',\n descr=\"You haven't played any games of CRINGO yet!\",\n thumb_name='jester',\n footer='Play >cringo today!',\n )\n else:\n embed = c.crimbed(\n title='CRINGO! stats for {}'.format(user),\n descr=None,\n thumb_name='jester',\n footer='As of {d.year}-{d.month:02d}-{d.day:02d} · Regular CRINGO! only'.format(d=s.created_at),\n )\n\n ess = '' if s.plays == 1 else 's'\n ess2 = '' if s.wins == 1 else 's'\n ess3 = '' if s.full_cards == 1 else 's'\n\n # list of tuples (name, value) for embed.add_field\n field_list = [\n (\n 'Gameplay',\n '**{}** game{ess} played, **{}** win{ess2}'.format(s.plays, s.wins, ess=ess, ess2=ess2)\n ),\n (\n 'crimsoCOIN won',\n '**\\u20A2{:.2f}**'.format(s.coin_won)\n ),\n (\n 'High score',\n '**{}** points'.format(s.high_score)\n ),\n (\n 'Average score (expected: 2260)',\n '**{:.1f}** points/game'.format(s.mean_score)\n ),\n (\n 'Matches/game (expected: 14.4)',\n '**{:.1f}** matches/game'.format(s.matches / s.plays)\n ),\n (\n 'Lines/game: (expected: 6.34)',\n '**{:.2f}** lines/game'.format(s.lines / s.plays)\n ),\n (\n 'Full cards (expected in {} game{ess}: {:.4f})'.format(s.plays, 0.1296 * s.plays, ess=ess),\n '**{}** full card{ess3}'.format(s.full_cards, ess3=ess3)\n ),\n ]\n\n for field in field_list:\n embed.add_field(name=field[0], value=field[1], inline=False)\n\n return embed", "def discord_webhook(self, product_item):\n\n data = {}\n data[\"username\"] = CONFIG['USERNAME']\n data[\"avatar_url\"] = CONFIG['AVATAR_URL']\n data[\"embeds\"] = []\n\n embed = {}\n \n if product_item == 'initial':\n embed[\"description\"] = \"Thank you for using Yasser's Sneaker Monitors. This message is to let you know \" \\\n \"that everything is working fine! You can find more monitoring solutions at \" \\\n \"https://github.com/yasserqureshi1/Sneaker-Monitors \"\n else:\n embed[\"title\"] = product_item[0] + ' - ' + product_item[1] + ' - ' + product_item[2]\n embed[\"description\"] = product_item[3]\n embed[\"thumbnail\"] = {'url': product_item[4]}\n embed['url'] = product_item[5]\n\n embed[\"color\"] = CONFIG['COLOUR']\n embed[\"footer\"] = {'text': 'Made by Yasser & Bogdan'}\n embed[\"timestamp\"] = str(datetime.utcnow())\n data[\"embeds\"].append(embed)\n\n result = rq.post(self.webhook, data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n result.raise_for_status()\n except rq.exceptions.HTTPError as err:\n print(err)\n logging.error(msg=err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(result.status_code))\n logging.info(msg=\"Payload delivered successfully, code {}.\".format(result.status_code))", "def layout_join():\n return html.Div([\n # upload button (wrap in Div for tooltip to target)\n html.Div(\n dcc.Upload(\n html.Button(\n 'Add (join)',\n id='join_button',\n style={\n 'background': '#607D8B',\n 'color': 'white'\n }\n ),\n id='join',\n # put small space between neighboring button\n style={'marginLeft': '5%'}\n ),\n id='join_div'\n ),\n # tooltip for upload method\n dbc.Tooltip(\n ('Add data by matching values with uploaded file. ' \n 'File must contain headers for all parameters'),\n target='join_div',\n placement='bottom',\n style={'fontSize': 12}\n )\n ])", "def generate_patch_build(self, domain):\n # TODO change name of def\n base_path = self.paths[\"api_doc_dir\"]\n self.generate_apidoc_patches()\n from django_swagger_utils.apidoc_gen.generators.patch_generator import PatchGenerator\n patch_generator = PatchGenerator(self.app_name, self.parser, self.paths, base_path)\n patch_generator.filter_for_deleted_apis()\n\n process = subprocess.Popen(['which', 'apidoc'], stdout=subprocess.PIPE)\n\n output = process.communicate()[0]\n if output:\n\n with open(self.paths[\"base_dir\"] + \"/apidoc.json\", 'w') as outfile:\n apidoc_content = {\"url\": \"https://ib-backend-dev.apigateway.in\",\n \"version\": \"0.0.1\",\n \"description\": \"\",\n \"name\": \"iBHubs_backend API Documentation\",\n \"title\": \"iBHubs_backend Documenation\"}\n json.dump(apidoc_content, outfile, indent=4)\n # by default we assume user is working at no specific branch so we fix\n # url to default above url as above , then we check if any specific parametr is given\n # and replace url with required url\n if domain != '' and domain:\n with open(self.paths[\"apidoc\"]) as src_json:\n apidoc_content = json.load(src_json)\n apidoc_content['url'] = \"https://\" + domain\n with open(self.paths[\"apidoc\"], 'w') as outfile:\n json.dump(apidoc_content, outfile, indent=4)\n try:\n os.mkdir(\"docs\")\n except OSError:\n pass\n # the below command is responsible for creating docs\n process = subprocess.Popen(['apidoc', '-i', self.base_dir,\n '-o', os.path.join(self.base_dir, 'docs'),\n '-e', 'django_swagger_utils/*',\n '-e', 'static/*',\n ], stdout=subprocess.PIPE)\n print process.communicate()[0]\n ################################################\n # hosting apidoc\n ################################################\n # obtaining the path of static folder of django-swagger-utils\n # django_swagger_utils_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\n # static_folder_path = os.path.join(django_swagger_utils_path, \"static\")\n # import shutil\n # # create a folder apidoc , delete if previously exists\n # if os.path.exists(os.path.join(static_folder_path, \"apidoc\")):\n # shutil.rmtree(os.path.join(static_folder_path, \"apidoc\"))\n # apidoc_path = os.path.join(static_folder_path, \"apidoc\")\n #\n # os.mkdir(apidoc_path)\n\n # from distutils.dir_util import copy_tree\n # copydocs from docs to apidoc in swagger utils\n # try:\n # copy_tree(os.path.join(self.base_dir, 'docs'), apidoc_path)\n # except Exception as err:\n # print err\n\n # browse to localhost:<port>/static/apidoc/index.html\n\n else:\n raise CommandError(\"Help: Install apidoc: [ sudo npm install -g apidoc ]\")", "def add_embed_field(embed, name, value, inline = False):\n return embed.add_field(\n name,\n (\n f'```\\n'\n f'{value}\\n'\n f'```'\n ),\n inline = inline,\n )", "def git_webhook():\n client = MongoClient(os.getenv('MONGODB_URI', 'mongodb://localhost:27017'))\n database = client.get_database()\n content = {\n \"event\": request.headers['X-GitHub-Event'],\n \"payload\" : request.json,\n \"date\": datetime.utcnow()\n }\n log.info(\"Content Received - \", request.headers['X-GitHub-Delivery'])\n inserted_id = database.events.insert_one(content).inserted_id\n log.info(\"Content Inserted - \", inserted_id)\n return jsonify({\n \"message\": \"Okay!\"\n })", "def get_info_embed(info_dict, searched_title):\n\n if info_dict is None:\n info_dict = {}\n\n base_dict = {\n 'title': info_dict.get('game_title', '_Unknown Title_'),\n 'type': 'rich',\n 'description': \"Not what you wanted? [Click here]({}) to try a full search\".format(BASE_URL),\n 'url': info_dict.get('info_link', BASE_URL),\n # 'timestamp': '',\n 'color': COLOR,\n 'footer': {\n 'text': \"Deal info from isthereanydeal.com\"\n },\n # 'image': {'url': '', 'height': '', 'width': ''},\n 'thumbnail': {\n 'url': info_dict.get('thumbnail_link', THUMBNAIL_URL),\n },\n 'author': {\n 'name': 'IsThereAnyDeal',\n 'url': BASE_URL,\n 'icon_url': THUMBNAIL_URL,\n },\n 'fields': [],\n }\n\n # Change description if the search was successful\n if 'search_url' in info_dict:\n base_dict['description'] = (\n 'Not what you wanted? [Click here]({}) to see the full search.'\n .format(info_dict['search_url'])\n )\n\n # Add fields for current/historic best if they exist\n add_deal_field(base_dict, info_dict, 'current_best', 'Current Best')\n add_deal_field(base_dict, info_dict, 'historic_best', 'Historic Best')\n\n return discord.Embed.from_dict(base_dict)", "def patch_namespaced_build(self, body, namespace, name, **kwargs):\n\n all_params = ['body', 'namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_build\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_build`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `patch_namespaced_build`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_build`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/builds/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Build',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "async def webhook(request: Request) -> Message:\n last_message_id = await request.state.redis_pool.get(\"last-webhook-message\")\n\n now = datetime.now()\n\n # Generate payload that will be sent in payload_json\n data = {\n \"content\": \"\",\n \"embeds\": [{\n \"title\": \"Pixels State\",\n \"image\": {\n \"url\": f\"attachment://pixels_{now.timestamp()}.png\"\n },\n \"footer\": {\n \"text\": \"Last updated\"\n },\n \"timestamp\": now.isoformat()\n }]\n }\n\n # Run Pillow stuff in executor because these actions are blocking\n loop = asyncio.get_event_loop()\n image = await loop.run_in_executor(\n None,\n partial(\n Image.frombytes,\n \"RGB\",\n (Sizes.WIDTH, Sizes.HEIGHT),\n await request.state.canvas.get_pixels()\n )\n )\n\n # Increase size of image so that this looks better in Discord\n image = await loop.run_in_executor(\n None,\n partial(\n image.resize,\n Sizes.WEBHOOK_SIZE,\n Image.NEAREST\n )\n )\n\n # BytesIO gives a file-like interface for saving\n # and later this is able to get actual content that will be sent.\n file = io.BytesIO()\n await loop.run_in_executor(None, partial(image.save, file, format=\"PNG\"))\n\n # Name file to pixels_TIMESTAMP.png\n files = {\n \"file\": (f\"pixels_{now.timestamp()}.png\", file.getvalue(), \"image/png\")\n }\n\n async with AsyncClient(timeout=None) as client:\n # If the last message exists in cache, try to edit it\n if last_message_id is not None:\n data[\"attachments\"] = []\n edit_resp = await client.patch(\n f\"{Discord.WEBHOOK_URL}/messages/{int(last_message_id)}\",\n data={\"payload_json\": json.dumps(data)},\n files=files\n )\n\n if edit_resp.status_code != 200:\n log.warning(f\"Non 200 status code from Discord: {edit_resp.status_code}\\n{edit_resp.text}\")\n last_message_id = None\n\n # If no message is found in cache, the message is missing or the edit failed, send a new message\n if last_message_id is None:\n # If we are sending a new message, don't specify attachments\n data.pop(\"attachments\", None)\n # Username can only be set when sending a new message\n data[\"username\"] = \"Pixels\"\n create_resp = (await client.post(\n Discord.WEBHOOK_URL,\n data={\"payload_json\": json.dumps(data)},\n files=files\n )).json()\n\n await request.state.redis_pool.set(\"last-webhook-message\", create_resp[\"id\"])\n\n return Message(message=\"Webhook posted successfully.\")", "def get_status_embed_and_components(self):\n embed = Embed('Moving channel messages')\n add_embed_thumbnail(embed, self.source_channel.guild)\n add_embed_field(embed, 'From', self.source_channel.name, True)\n add_embed_field(embed, 'To', self.target_channel.name, True)\n add_embed_field(embed, 'Elapsed time', seconds_to_elapsed_time(LOOP_TIME() - self.started_at))\n add_embed_field(embed, 'Messages moved', str(self.total_moved_messages), True)\n add_embed_field(embed, 'Estimated percentage', format(self.get_estimated_percentage(), '.02f'), True)\n add_embed_field(embed, 'Last message id', str(self.last_message_id), True)\n \n state = self.state\n footer = STATUS_FOOTERS.get(state, None)\n if (footer is not None):\n embed.add_footer(footer)\n \n components_factory = STATUS_COMPONENTS.get(state, None)\n if components_factory is None:\n components = None\n else:\n components = components_factory(self)\n \n return embed, components", "def build(self, definition_id, to_format, container_name):\n url = f\"{self.base_url}/build\"\n payload = {\"definition_id\": definition_id, \"to_format\": to_format, \"container_name\": container_name}\n response = requests.post(url, json=payload, headers=self.headers)\n build_id = response.text\n\n return build_id", "def build_reply(results, is_list):\n \n \n\n #results and requests should be of the same size\n #additionally, a failed result should be represented by an empty entry\n reply_string = \"\"\n #sort(results)\n for entry in range(len(results)):\n entry_string = \"\"\n entry_dicts = sort_results(results[entry])\n tournament_set = set()\n if is_list:\n entry_string += make_section(entry_dicts)\n for row in entry_dicts:\n if row[\"tournament\"] not in tournament_set:\n #make a tournament heading\n entry_string += row[\"tournament\"] + \":\" + ENDL\n tournament_set.add(row[\"tournament\"])\n entry_string += video_format.format(bracket=row[\"bracket\"],\n video_id=row[\"video\"]\n )\n reply_string += entry_string + LINE\n else:\n pass\n #additionally add a footer to the message that gives info on the bot\n return reply_string", "def _generate_commit(\n self, msg: Optional[str] = None, author: Optional[str] = None\n ) -> dict:\n if author:\n mes_author = author\n else:\n mes_author = self._author\n if not msg:\n msg = f\"Commit via python client {__version__}\"\n ci = {\"commit_info\": {\"author\": mes_author, \"message\": msg}}\n return ci", "def build_irc_msg(command, params, final_param_multi_word=False,\n source=None):\n\n if final_param_multi_word:\n final_param = ':' + params[-1]\n else:\n final_param = params[-1]\n\n if source:\n prefix = ':' + source\n else:\n prefix = ''\n\n if len(params) > 1:\n parts = [prefix, command, ' '.join(params[:-1]), final_param]\n else:\n parts = [prefix, command, final_param]\n\n return ' '.join(parts).strip() + '\\r\\n'", "def build_message(cmd, data):\r\n\tif len(cmd) > CMD_FIELD_LENGTH or len(data) > MAX_DATA_LENGTH:\r\n\t\treturn None\r\n\tfull_cmd = cmd + \" \"*(CMD_FIELD_LENGTH-len(cmd))\r\n\tdata_len = str(len(data))\r\n\tfull_data_len = \"0\"*(LENGTH_FIELD_LENGTH-len(data_len))+data_len\r\n\tfull_msg = DELIMITER.join([full_cmd, full_data_len, data])\r\n\treturn full_msg", "def embed(self):\n\n # aliases\n CARDS = self.game.emojis\n U200B = self.game.u200b_ZWSP\n U3000 = self.game.u3000_IS\n U2022 = self.game.u2022_bullet\n\n # combinations\n LF = f'\\n{U200B}'\n LFLF = f'{U200B}\\n{U200B}'\n BULLET_SEP = f'{U3000}{U2022}{U3000}'\n\n # helper functions\n spacer = lambda n: f'{U200B}{U3000 * n}{U200B}'\n pad_right = lambda n: f'{U3000 * n}{U200B}'\n pad_left = lambda n: f'{U200B}{U3000 * n}'\n\n hand: BlackjackDealerHand = self.game.dealer_hand\n\n if self.game.dealer_status == 'Busted':\n title_status = ' Busted'\n else:\n title_status = \"'s Turn\"\n title = f\"{U200B}\\n**__Kaa (Dealer){title_status}__**{LF}\"\n embed = discord.Embed(\n # title=f\"**{player['name']}**{LF}\",\n title=title,\n color=self.game.embed_color,\n )\n\n # blackjack title and icon\n embed.set_author(\n name='Blackjack' + pad_right(30),\n icon_url=self.game.thumbnail_url,\n )\n\n # footer showing current player pic, and the position in queue\n text = (\n f'Phase 5: Dealer Turn{BULLET_SEP}'\n f'Game will continue momentarily'\n )\n embed.set_footer(\n icon_url=self.game.bot.user.avatar_url,\n text=text,\n )\n\n\n # dealer cards field\n name = 'Cards'\n value = ''\n card: Card\n for card in hand.iter_all():\n value += CARDS[card.format_short()]\n embed.add_field(name=name, value=value, inline=True)\n\n # blank field for formatting\n embed.add_field(\n name=U200B,\n value=U200B,\n inline=True,\n )\n\n name = 'Hard[/Best]'\n # value = f'{pad_left(1)}{hand.value_hard}'\n value = f'{hand.value_hard}'\n if hand.value_hard != hand.value:\n value += f'/{hand.value}'\n if self.game.dealer_status == 'Busted':\n value += ' (Busted)'\n value += LF # added for bottom padding\n\n embed.add_field(name=name, value=value, inline=True)\n\n\n\n\n # players\n name = 'Players'\n value = self.player_hands\n embed.add_field(name=name, value=value, inline=True)\n\n # blank field for formatting\n embed.add_field(name=U200B, value=U200B, inline=True)\n\n name = U200B\n value = self.player_values\n embed.add_field(name=name, value=value, inline=True)\n\n return embed", "async def embed_editor(self, guild):\n if self.embed_pooling:\n return\n self.embed_pooling = True\n await asyncio.sleep(3.0)\n current_embed = self.games_info[guild.id][0].embeds[0].to_dict()\n current_embed['fields'][0]['value'] = '\\n'.join(f'{p}' for p in self.games_info[guild.id][2]) or \"None\"\n self.embed_pooling = False\n await self.games_info[guild.id][0].edit(embed=discord.Embed.from_dict(current_embed))", "def create_message_embed(self, data):\n\n # get the language object\n lang = data.bot.lang\n\n embed = self.generate_embed()\n\n if lang == \"en\":\n help_text = \"Role Selection\"\n\n elif lang == \"de\":\n help_text = \"Rollenvergabe\"\n\n embed.add_field(name=help_text, value=data.message_text, inline=False)\n\n for role in data.roles.all():\n embed.add_field(name=role.name, value=role.emoji, inline=False)\n\n return embed", "def build():\n local('wintersmith build')", "def populate_mdc(request):\n populate_default_mdc(request)\n req_id = request.headers.get('X-ONAP-RequestID', g.empty_value)\n request_json = request.get_json()\n if req_id == g.empty_value:\n req_id = get_request_id(request_json)\n g.request_id = req_id\n MDC.put('requestID', req_id)\n MDC.put('partnerName', get_partner_name(request_json))", "async def augment(self, ctx, *, augment: str):\n try:\n augment = self.get_entry('Augment', augment.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n type = augment['Type']\n price = augment['Sell Price']\n miranium = augment.get('Required Miranium')\n mat_1 = augment.get('Material 1')\n mat_2 = augment.get('Material 2')\n mat_3 = augment.get('Material 3')\n drop = augment.get('Drop')\n resource = augment.get('Precious Resource')\n\n total_tickets = 0\n\n embed = discord.Embed(title=augment['Name'], color=self.colors[augment[\"Rarity\"]])\n embed.add_field(name='Effect', value=augment['Effect'], inline=False)\n\n if type != 'Augment': # Remove when augment json fully updated\n embed.add_field(name='Type', value=type)\n\n if price != 0: # Remove when augment json fully updated\n embed.add_field(name='Sell Price', value=price)\n\n if miranium:\n embed.add_field(name='Required Miranium', value=miranium)\n\n if mat_1:\n name = mat_1[\"Name\"]\n amount = mat_1[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 1', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if mat_2:\n name = mat_2[\"Name\"]\n amount = mat_2[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 2', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if mat_3:\n name = mat_3[\"Name\"]\n amount = mat_3[\"Amount\"]\n\n tickets = self.materials[name.lower()]['price'] * amount\n total_tickets += tickets\n\n embed.add_field(name='Material 3', value=f'{amount} {name}\\n({tickets} Tickets)')\n\n if drop:\n embed.add_field(name='Drop', value=drop)\n if resource:\n embed.add_field(name='Precious Resource', value=f'{resource[\"Amount\"]} {resource[\"Name\"]}', inline=False)\n\n if total_tickets != 0:\n embed.add_field(name='Total Tickets', value=total_tickets)\n\n await ctx.send(embed=embed)", "def test_check_update_properly_build_request_when_custom_data_given():\n request = UpdateDetailRequest('v1', 'MyDevice', '{\"AnyCustomData\":\"any_value\"}')\n update_helper = UpdateCheckHelper(_api_key, _base_url)\n built_request = update_helper.build_request(request)\n body = json.loads(built_request.body)\n\n assert body['unitId'] == request.unit_id\n assert body['versionId'] == request.version_id\n assert body['customClientData'] == request.custom_client_data\n\n headers = built_request.headers\n assert headers['Authorization'] == _api_key\n assert headers['Content-Type'] == 'application/json'", "def merge_app(request, pk):\n\n context = {}\n\n app = get_object_or_404(MacOSApp, pk=pk)\n\n if request.method == 'POST':\n form = AppMergeForm(request.POST, pk=pk)\n if form.is_valid():\n selected = form.cleaned_data.get('options')\n parent = MacOSApp.objects.get(pk=selected)\n app.merged_into = parent\n app.save()\n if parent.description in [None, ''] and app.description not in [None, '']:\n parent.description = app.description\n if parent.version in [None, ''] and app.version not in [None, '']:\n parent.version = app.version\n if parent.developer in [None, ''] and app.developer not in [None, '']:\n parent.developer = app.developer\n if parent.developer_website in [None, ''] and app.developer_website not in [None, '']:\n parent.developer_website = app.developer_website\n parent.save()\n messages.success(request, 'Applications merged successfully')\n return HttpResponseRedirect(reverse('mdm:apps'))\n else:\n form = AppMergeForm(pk=pk)\n context['form'] = form\n app_name = MacOSApp.objects.get(pk=pk).name\n context['msg'] = 'Merge ' + app_name + ' into...'\n return render(request, 'form_crispy.html', context)", "def pull_request(data):\n logger.info(\"Event: pull_request\")\n logger.info(\"Data: %s\", json.dumps(data))\n\n if git.is_pr_merge_event(data):\n logger.info(\"is_pr_merge_event\")\n\n files = git.fetch_files_to_sync(data)\n logger.info(\"files: %s\", files)\n\n files2 = git.filter_files(files)\n logger.info(\"filter_files: %s\", files2)\n\n logger.info(\"start sync\")\n s3.sync_files(files2)\n\n logger.info(\"done\")\n\n return data", "def format_bot_response(message):\n message_the_mods_link = message_link(\n to='/r/TranscribersOfReddit',\n subject='Bot Questions'\n )\n\n footer = ' | '.join([\n f'v{__version__}',\n f'This message was posted by a bot.',\n f'[FAQ]({faq_link})',\n f'[Source]({source_link})',\n f'Questions? [Message the mods!]({message_the_mods_link})',\n ])\n return f'{message}\\n\\n---\\n\\n{footer}'", "def test_embed_call_with_key(self):\n view = views.unsafe_embed_list\n request = self.factory.get(\"\")\n request.user = self.anonymous_user\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"):\n models.ListItem.objects.create(\n book_list=self.list,\n user=self.local_user,\n book=self.book,\n approved=True,\n order=1,\n )\n\n embed_key = str(self.list.embed_key.hex)\n\n with patch(\"bookwyrm.views.list.list.is_api_request\") as is_api:\n is_api.return_value = False\n result = view(request, self.list.id, embed_key)\n\n self.assertIsInstance(result, TemplateResponse)\n validate_html(result.render())\n self.assertEqual(result.status_code, 200)", "def define(update, context):\n word = update.message.text\n output = make_output(word)\n if output:\n response_message = output\n else:\n response_message = 'Sorry, I was unable to complete that request.'\n context.bot.send_message(\n chat_id=update.effective_chat.id, text=response_message)", "def _build_response_document(\n document, resource, embedded_fields, latest_doc=None):\n # need to update the document field since the etag must be computed on the\n # same document representation that might have been used in the collection\n # 'get' method\n document[config.DATE_CREATED] = date_created(document)\n document[config.LAST_UPDATED] = last_updated(document)\n # TODO: last_update should include consideration for embedded documents\n\n # generate ETag\n if config.IF_MATCH:\n document[config.ETAG] = document_etag(document)\n\n # hateoas links\n if config.DOMAIN[resource]['hateoas']:\n _lookup_field = config.DOMAIN[resource]['item_lookup_field']\n document[config.LINKS] = {'self':\n document_link(resource,\n document[_lookup_field])}\n\n # add version numbers\n resolve_document_version(document, resource, 'GET', latest_doc)\n\n # media and embedded documents\n _resolve_media_files(document, resource)\n _resolve_embedded_documents(document, resource, embedded_fields)", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)" ]
[ "0.5688839", "0.51731527", "0.514443", "0.5102154", "0.50153357", "0.5012565", "0.4874328", "0.48101324", "0.47793704", "0.46545884", "0.46417007", "0.46417007", "0.46417007", "0.4641089", "0.46038324", "0.45967078", "0.45881125", "0.4585066", "0.45759517", "0.4558107", "0.45423108", "0.45390224", "0.45260185", "0.45243666", "0.4512736", "0.44850183", "0.4480135", "0.44765753", "0.4467348", "0.44617182", "0.444332", "0.44281977", "0.4421807", "0.4417603", "0.44086823", "0.44059372", "0.43982753", "0.4398024", "0.43962663", "0.43936163", "0.43896288", "0.438391", "0.43836778", "0.4374554", "0.43675077", "0.43673894", "0.4346243", "0.4342887", "0.4337695", "0.4324436", "0.43196484", "0.43140262", "0.4313325", "0.43126315", "0.4311559", "0.43112665", "0.43087184", "0.42860007", "0.42726836", "0.4270692", "0.42458168", "0.42450857", "0.42441", "0.42419267", "0.42237672", "0.42229915", "0.42216164", "0.42216164", "0.42210928", "0.4208722", "0.42064437", "0.42033434", "0.41938156", "0.41909868", "0.41904745", "0.4180201", "0.4167316", "0.41671056", "0.41580918", "0.41503865", "0.41487885", "0.4145942", "0.41452783", "0.41439974", "0.41436926", "0.4138356", "0.41383123", "0.41366342", "0.41327977", "0.41193762", "0.41183266", "0.41171974", "0.41158634", "0.411477", "0.41142657", "0.41123393", "0.4112185", "0.41119772", "0.410381", "0.41009936" ]
0.6398354
0
Test of function that open another window
def credits_window(): credits = tk.Toplevel() credits_lbl = tk.Label(credits, text='Software Developed By Allan / SpideyKeiiti\n' 'Made for Prototype purposes for Streets Of Rage Remake Community!') credits_lbl.pack()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_window_loaded(self):", "def test_openDialog_pass(self):\n self.run_script(\"\"\"\n foo.openDialog(\"foo\")\n foo.openDialog(\"chrome://foo/bar\")\n \"\"\")\n self.assert_silent()", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def get_test_window(self, window_id, parent):\n pass", "def showWindow(*args, **kwargs)->None:\n pass", "def test_switch_to_current_window(self):\n\n # locators\n switch_to_window_button = 'openwindow'\n courses_in_new_window = '//div[@class=\"course-listing-title\"]'\n\n # steps\n locate_switch_to_window_button = WebDriverWait(self.driver, 10).until(\n ec.visibility_of_element_located((By.ID, switch_to_window_button))\n )\n locate_switch_to_window_button.click()\n\n # take all window handles into list\n all_window_handles = self.driver.window_handles\n\n # switch to newest window handle\n self.driver.switch_to.window(all_window_handles[1])\n locate_courses_in_new_window = WebDriverWait(self.driver, 10).until(\n lambda driver: self.driver.find_elements_by_xpath(courses_in_new_window)\n )\n # print courses titles into console\n for element in locate_courses_in_new_window:\n print(\"Course from new window: \" + element.text)\n self.driver.close()\n self.driver.switch_to.window(all_window_handles[0])", "def test_open_mainpage(open_browser, url_param):\n open_browser.get(url_param)\n assert open_browser.current_url == url_param\n open_browser.close()", "def test_openDialog_flag_var(self):\n self.run_script(\"\"\"\n foo.openDialog(bar)\n \"\"\")\n self.assert_notices()", "def test_auto_open(self):\n # todo implement", "def TransferToWindow(self):\n return True", "def show(self, window):\r\n\r\n return", "def TransferFromWindow(self):\n return True", "def build_second_window():\r\n\r\n new_window = tk.Tk()\r\n windows.append(new_window)\r\n new_window.protocol(\"WM_DELETE_WINDOW\", new_round(new_window))\r\n\r\n ask = tk.Label(new_window, text='Would You Like To Play Again?', bg='Cyan')\r\n ask.pack(fill=tk.X)\r\n\r\n frame = tk.Frame(new_window)\r\n frame.pack()\r\n\r\n yes_button = tk.Button(frame, text='Yes', bg='green',\r\n command=new_round(new_window))\r\n yes_button.pack(side=tk.LEFT)\r\n\r\n no_button = tk.Button(frame, text='No', bg='red',\r\n command=close)\r\n no_button.pack(side=tk.LEFT)", "def test_open(self):\n page, resources = self.ghost.open(base_url)\n self.assertEqual(page.url, base_url)\n \n self.ghost.click(\"#run\")", "def into_new_window(self):\r\n t1 = time.time()\r\n try:\r\n all_handle = self.driver.window_handles\r\n flag = 0\r\n while len(all_handle) < 2:\r\n time.sleep(1)\r\n all_handle = self.driver.window_handles\r\n flag += 1\r\n if flag == 5:\r\n break\r\n self.driver.switch_to.window(all_handle[-1])\r\n self.my_print(\"{0} Switch to the new window,new window's url: {1}, Spend {2} seconds\".format(success,\r\n self.driver.current_url,time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable switch to the new window, Spend {1} seconds\".format(fail, time.time() - t1))\r\n raise", "def openInstructions(self, e):\n\n\t\tif (not self.open_window):\n\t\t\tself.open_window = True\n\t\t\tself.instructions_window.close.focus_set()\n\t\t\tself.main_menu_window.liftFrame(self.instructions_window.instructions_frame)\n\t\telif (self.open_window):\n\t\t\tself.open_window = False\n\t\t\tself.menu_window.playButton.focus_set()\n\t\t\tself.main_menu_window.lowerFrame(self.instructions_window.instructions_frame)", "def batch_test_open():\n try:\n WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.CLASS_NAME, \"cdk-overlay-pane\")))\n ActionChains(browser).send_keys(Keys.ESCAPE).perform()\n except:\n print(\"No migration pop-up\")\n\n WebDriverWait(browser, 2).until(EC.element_to_be_clickable((By.LINK_TEXT, config.app_name)))\n browser.find_element_by_link_text(config.app_name).click()\n WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.CLASS_NAME, 'nav-section')))\n buttons = browser.find_elements_by_class_name('nav-section')\n buttons[1].click()\n WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//button[contains(text(), '\n '\"Batch testing\")]')))\n browser.find_element_by_xpath('//button[contains(text(), \"Batch testing\")]').click()", "def open_window():\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())", "def open_web_crawler_window(self, event):\n self.gui.open_web_crawler_window(self.root)", "def open_new_window(self, selector):\n current_window = self.driver.current_window_handle\n element = self.get_element(selector)\n element.click()\n all_handles = self.driver.window_handles\n for handle in all_handles:\n if handle != current_window:\n self.driver.switch_to.window(handle)", "def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)", "def newwindow(url):\n\n # Open the URL\n webbrowser.open_new(url)", "def open_generatorWindow(self):\n self.window = generatorWindow(self)\n self.hide()", "def get_main_window():\n\n pass", "def test_openWindowWithWrongSettingsFile(self):\n self.createWrongSettingsFile()\n return self.assertRaises(SettingsCorrupted, ConfigurationWindow)", "def testOnHelp(self):\n webbrowser.open = MagicMock()\n\n # invoke the tested method\n self.widget.onHelp()\n\n # see that webbrowser open was attempted\n webbrowser.open.assert_called_once()", "def create_window(session):\n def create_window():\n windows_before = session.handles\n name = session.execute_script(\"window.open()\")\n assert len(session.handles) == len(windows_before) + 1\n new_windows = list(set(session.handles) - set(windows_before))\n return new_windows.pop()\n return create_window", "def doSwitchToWindow(self, windowName, timeout=30.0):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n \n cmdId = self.getAllWindowHandles()\n rsp = self.hasWindowHandles(timeout=timeout, commandId=cmdId)\n if rsp is None: ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n listHandles = elementVall.get('handles').getItems()\n \n matched = False\n for (i,h) in listHandles:\n cmdId = self.switchToWindow( windowName=h )\n rsp = self.isWindowsSwitched(timeout=timeout, commandId=cmdId)\n if rsp is None: ret = False; break;\n\n cmdId = self.getTitle( )\n rsp = self.hasWindowTitle(timeout=timeout, commandId=cmdId)\n if rsp is None: ret = False; break;\n elementVall = rsp.get('GUI', 'value')\n title = elementVall.get('value')\n \n while not len(title):\n time.sleep(0.5)\n cmdId = self.getTitle( )\n rsp = self.hasWindowTitle(timeout=timeout, commandId=cmdId)\n if rsp is None: ret = False; break;\n elementVall = rsp.get('GUI', 'value')\n title = elementVall.get('value')\n \n if windowName in title: \n matched = True\n break\n \n if not matched:\n ret = False\n \n return ret", "def test2(self):\n self.f = MagicMock(return_value=check_win(\n [' ', 'o', 'x', 'x', 'o', 'o', 'x', 'o', 'x']))\n self.assertEqual(self.f(), True)", "def test_cover_open_close(self):\n with patch.dict(TYPES, {'WindowCoveringBasic': self.mock_type}):\n state = State('cover.open_window', 'open',\n {ATTR_SUPPORTED_FEATURES: 3})\n get_accessory(None, state, 2, {})", "def verify_popup(self, type):", "def test_launch_composition(self):\n pass", "def test_open_browser(self, os_type, webbrowser_expect, popen_expect):\n from streamlit import env_util\n\n env_util.IS_WINDOWS = os_type == \"Windows\"\n env_util.IS_DARWIN = os_type == \"Darwin\"\n env_util.IS_LINUX_OR_BSD = os_type == \"Linux\"\n\n with patch(\"streamlit.env_util.is_executable_in_path\", return_value=True):\n with patch(\"webbrowser.open\") as webbrowser_open:\n with patch(\"subprocess.Popen\") as subprocess_popen:\n util.open_browser(\"http://some-url\")\n self.assertEqual(webbrowser_expect, webbrowser_open.called)\n self.assertEqual(popen_expect, subprocess_popen.called)", "def openGameTools(*args):\n pyqt.showDialog(gameTools)", "def switch_state():\n\tDmg.OpenWindow()", "def menu_screen(win):\n\tpass", "def __openBookmarkInNewWindow(self):\n self.__openBookmark(newWindow=True)", "def test_close_help(self):\r\n self.driver.switch_to.window(self.driver.window_handles[1])\r\n assert \"Help and Tutorials\" in self.driver.title\r\n self.driver.close()", "def test_openDialog(self):\n\n def test_uri(self, uri):\n self.setUp()\n self.setup_err()\n self.run_script('foo.openDialog(\"%s\")' % uri)\n self.assert_failed(with_warnings=True)\n\n uris = ['http://foo/bar/',\n 'https://foo/bar/',\n 'ftp://foo/bar/',\n 'data:asdf']\n for uri in uris:\n yield test_uri, self, uri", "def open(self):\n self.state = True\n self.mainwindow.sendMessage('a')\n print(\"opening \" + self.name)", "def test_info(manager):\n manager.test_window(\"one\")\n manager.c.sync()\n info = manager.c.window.info()\n assert info[\"name\"] == \"one\"\n assert info[\"group\"] == \"a\"\n assert info[\"wm_class\"][0] == \"TestWindow\"\n assert \"x\" in info\n assert \"y\" in info\n assert \"width\" in info\n assert \"height\" in info\n assert \"id\" in info", "def show(self,window):\n self.showFunctions(window)", "def tool_open_clicked(self, widget, data=None):\n self.open_chooser.show()", "def doModal(*args):", "def doModal(*args):", "def doModal(*args):", "def doModal(*args):", "def show_popup(self, view, docstring, location=None):", "def test_nsIFile_launch():\n\n assert _do_test_raw('foo.launch()').failed()", "def on_OpenExplorer_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n\n url=\"http://kfc.matrix.io\"\n\n self.browser.openurl(url)\n self.OnlyDisplay(f\"start {url}\")\n #MATRIXWebutil.open_new(url)\n #MATRIXWebutil.open_new_tab(url)", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def _showView(self, win, fn=None):\n raise RuntimeError('Not implemented')", "def popup_info(self, force_object, event):\n self.window.log('window popup called')\n po = force_object\n\n def callb():\n if type(po) == Physics.PhysicsObject:\n fow = PhysicsWindow.PhysicsObjectWindow(self.window, po, event.x, event.y)\n return callb", "def showWindow(*args, **kwargs):\n\ttry:\n\t\targs[0].run()\n\texcept AttributeError: # Regular windows don't have run\n\t\targs[0].show()", "def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()", "def run_app(self):\n windows_name = 'Little Fighter 2'\n try:\n hwnd = winauto.findTopWindow(wantedText=windows_name)\n except winauto.WinGuiAutoError:\n import os\n os.startfile(self.lf2_path)\n hwnd = []\n while not hwnd:\n hwnd = winauto.findTopWindows(wantedText=windows_name)\n hwnd = hwnd[0]\n\n win_hwnd = win32ui.FindWindow(None, windows_name)\n win_hwnd.SetForegroundWindow()\n win_hwnd.SetFocus()\n return hwnd, win_hwnd", "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def start_test(url):\n \n Debug.user(' ################# start Test ######################')\n App.open('firefox --private-window '+url)\n wait(\"1501595436606.png\", 10)\n\n click(\"1501595453560.png\")\n\n if exists():\n \n click()\n else:\n click()\n \n\n\n if exists(\"1499781534684.png\"):\n click(\"1499781552298.png\")\n type('root')\n click(\"1499781563870.png\")\n else:\n pass\n click(\"1499781591282.png\")", "def edit_pane_test_open(event):\n c = event['c']\n\n if not hasattr(c, '__edit_pane_test'):\n c.__edit_pane_test = True\n\n\n class MinimalDemoProvider:\n\n def ns_provides(self):\n return [(\"Demo editor\", \"__demo_provider_minimal_slider\")]\n\n def ns_provide(self, id_):\n if id_ == \"__demo_provider_minimal_slider\":\n w = LeoEditPane(c=c, mode='split')\n return w\n return None\n\n def ns_provider_id(self):\n return \"__demo_provider_minimal\"\n\n c.free_layout.get_top_splitter().register_provider(MinimalDemoProvider())\n\n s = c.free_layout.get_top_splitter()\n s.open_window(\"__demo_provider_minimal_slider\")", "def OpenIntegrationWindow( raiseOnExitCode=False ):\n global submissionInfo\n\n integrationPath = os.path.join( submissionInfo[\"RepoDirs\"][\"submission/Integration/Main\"], \"IntegrationUIStandAlone.py\" )\n scenePath = NodegraphAPI.GetSourceFile()\n if not scenePath:\n raise SceneNotSavedError()\n argArray = [\"-ExecuteScript\", integrationPath, \"-v\", \"2\", \"-d\", \"Katana\", \"Draft\", \"Shotgun\", \"FTrack\", \"--path\", scenePath]\n try:\n pipelineToolStatus = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=True)\n except subprocess.CalledProcessError as e:\n pipelineToolStatus = HandlePipelineToolsCalledProcessError( e )\n\n return pipelineToolStatus", "def create_new_window():\n logging.debug(\"Function create_new_window() called\")\n\n new_window = tk.Toplevel()\n new_window.title(\"Test functions\")\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_origins()\",\n command=lambda: dss.update_origins(\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n ),\n )\n ButtonCal.grid(row=1, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_classes()\",\n command=lambda: dss.update_classes(\n class_list_=class_list,\n champions_list_=champions_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=2, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_classes_and_origins()\",\n command=lambda: dss.update_classes_and_origins(\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=3, column=0)\n\n # is_in_game = tk.IntVar()\n # dss.create_gui_counter_with_plus_minus(window_tk=new_window, origin_index=1, counter=is_in_game, shift_between_upside_downside=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_champions_to_buy_from_ocr_detection()\",\n command=lambda: dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=4, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_from_ocr()\",\n command=lambda: dss.show_nonzero_counters_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n ),\n )\n ButtonCal.grid(row=5, column=0)\n\n Labeling = tk.Label(\n new_window, text=\"Care additional points in below\", font=BOLDED_FONT\n )\n Labeling.grid(row=6, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_points_for_nonzero_counters_from_ocr()\",\n command=lambda: dss.show_points_for_nonzero_counters_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n ),\n )\n ButtonCal.grid(row=7, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_with_points_from_ocr() OCR button\",\n command=lambda: dss.show_nonzero_counters_with_points_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n origin_list_=origin_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=8, column=0)\n\n Labeling = tk.Label(new_window, text=\"with Game\", font=BOLDED_FONT)\n Labeling.grid(row=0, column=1)\n\n ButtonCal = tk.Button(\n new_window, text=\"update_curent_ss()\", command=lambda: dss.update_curent_ss()\n )\n ButtonCal.grid(row=1, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_curent_cropped_ss_with_champions()\",\n command=lambda: dss.update_curent_cropped_ss_with_champions(),\n )\n ButtonCal.grid(row=2, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_ocr_results_champions()\",\n command=lambda: dss.update_ocr_results_champions(\n cropped_ss_with_champion_card_names=dss.crop_img,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=3, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_sorted_champions_to_buy()\",\n command=lambda: dss.update_sorted_champions_to_buy(\n ocr_results_sorted=dss.ocr_results_champions,\n champions_list_for_ocr_=champions_list_for_ocr,\n ),\n )\n ButtonCal.grid(row=4, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_champions_to_buy_from_ocr_detection()\",\n command=lambda: dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=5, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"draw_rectangles_show_points_show_buttons_reset_counters() scan&go\",\n command=lambda: dss.draw_rectangles_show_points_show_buttons_reset_counters(\n rgb_colours_list_=rgb_colours_list,\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr_=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n df_=df,\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n round_counter=CounterOcrResultsRound,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=6, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(4)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=4,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=0, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(5)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=5,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=1, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(6)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=6,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=2, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(7)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=7,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=3, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"append_counters_to_input_list(champion_info)\",\n command=lambda: dss.append_counters_to_input_list(\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=4, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"append_counters_to_input_list(champion_to_buy_info)\",\n command=lambda: dss.append_counters_to_input_list(\n input_list=champion_to_buy_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=5, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"calculate_card_position_on_screen(2)\",\n command=lambda: dss.calculate_card_position_on_screen(\n card_index=2,\n X_FIRST_CHAMPION_CARD_=dss.X_FIRST_CHAMPION_CARD,\n PADDING_BETWEEN_CHAMPION_CARDS_=dss.PADDING_BETWEEN_CHAMPION_CARDS,\n W_CHAMPION_CARD_=dss.W_CHAMPION_CARD,\n ),\n )\n ButtonCal.grid(row=0, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"build_list_of_champion_cards_rectangles()\",\n command=lambda: dss.build_list_of_champion_cards_rectangles(\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n Y_FIRST_CHAMPION_CARD_=dss.Y_FIRST_CHAMPION_CARD,\n W_CHAMPION_CARD_=dss.W_CHAMPION_CARD,\n H_CHAMPION_CARD_=dss.H_CHAMPION_CARD,\n ),\n )\n ButtonCal.grid(row=1, column=3)\n\n Labeling = tk.Label(\n new_window, text=\"Another cases below this row\", font=BOLDED_FONT\n )\n Labeling.grid(row=9, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"check_nonzero_counters()\",\n command=lambda: dss.check_nonzero_counters(\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n ),\n )\n ButtonCal.grid(row=10, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters()\",\n command=lambda: dss.show_nonzero_counters(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n row_offset=0,\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n SHIFT_BETWEEN_ORIGINS_=dss.SHIFT_BETWEEN_ORIGINS,\n ),\n )\n ButtonCal.grid(row=11, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_classes_and_origins()\",\n command=lambda: dss.update_classes_and_origins(\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=12, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_points_for_nonzero_counters()\",\n command=lambda: dss.show_points_for_nonzero_counters(\n tk_window=MainWindow,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n row_offset=2,\n show_mode=1,\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n SHIFT_BETWEEN_ORIGINS_=dss.SHIFT_BETWEEN_ORIGINS,\n ),\n )\n ButtonCal.grid(row=13, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_with_points()\",\n command=lambda: dss.show_nonzero_counters_with_points(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n origin_list_=origin_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=14, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"reset_counters_in_list()\",\n command=lambda: dss.reset_counters_in_list(\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy\n ),\n )\n ButtonCal.grid(row=10, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_champions_to_buy_from_ocr_detection()\",\n command=lambda: dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=11, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_with_points_from_ocr()\",\n command=lambda: dss.show_nonzero_counters_with_points_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n origin_list_=origin_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=12, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"build_list_of_champion_cards_rectangles()\",\n command=lambda: dss.build_list_of_champion_cards_rectangles(\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n Y_FIRST_CHAMPION_CARD_=dss.Y_FIRST_CHAMPION_CARD,\n W_CHAMPION_CARD_=dss.W_CHAMPION_CARD,\n H_CHAMPION_CARD_=dss.H_CHAMPION_CARD,\n ),\n )\n ButtonCal.grid(row=13, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"draw_rectangles_show_points_show_buttons_reset_counters() scan&go\",\n command=lambda: dss.draw_rectangles_show_points_show_buttons_reset_counters(\n rgb_colours_list_=rgb_colours_list,\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr_=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n df_=df,\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n round_counter=CounterOcrResultsRound,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=14, column=1)\n\n ButtonCal = tk.Button(\n new_window, text=\"update_curent_ss()\", command=lambda: dss.update_curent_ss()\n )\n ButtonCal.grid(row=10, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_curent_cropped_ss_with_rounds()\",\n command=lambda: dss.update_curent_cropped_ss_with_rounds(),\n )\n ButtonCal.grid(row=11, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_ocr_results_round()\",\n command=lambda: dss.update_ocr_results_round(\n reader_=reader, round_counter=CounterOcrResultsRound\n ),\n )\n ButtonCal.grid(row=12, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"full_state_update_champions_ocr()\",\n command=lambda: dss.full_state_update_rounds_ocr(\n reader_=reader, round_counter=CounterOcrResultsRound\n ),\n )\n ButtonCal.grid(row=13, column=2)\n\n ButtonCal = tk.Button(\n new_window, text=\"update_curent_ss()\", command=lambda: dss.update_curent_ss()\n )\n ButtonCal.grid(row=10, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_curent_cropped_ss_with_gold()\",\n command=lambda: dss.update_curent_cropped_ss_with_gold(),\n )\n ButtonCal.grid(row=11, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_ocr_results_gold()\",\n command=lambda: dss.update_ocr_results_gold(\n reader_=reader,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=12, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"full_state_update_gold_ocr()\",\n command=lambda: dss.full_state_update_gold_ocr(\n reader_=reader,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=13, column=3)\n\n logging.debug(\"Function create_new_window() end\")", "def consultar(self):\n self.new_window = tk.Toplevel(self.menu)\n Consultar(self.new_window)", "def go_to_page_window(self):\n if self.window is not None:\n if self.window != self.app.driver.current_window_handle:\n try:\n self.app.log.info(f'Switching to page window {self.name}')\n self.app.driver.switch_to.window(self.window)\n except exceptions.NoSuchWindowException:\n handles = self.app.driver.window_handles\n self.app.log.error(f'No window \"{self.window}\" in windows handles list: {handles}')\n raise exceptions.NoSuchWindowException\n else:\n if self.is_window_exist() is False:\n self.app.log.error(f'Unable to switch to the page \"{self.name}\"')\n raise NotImplementedError", "def TransferToWindow(self):\n\t\treturn True # Prevent wxDialog from complaining.", "def open_machine_learner_window(self, event):\n self.gui.open_machine_learner_window(self.root)", "def test_instr_view_display(instr_view):\n show_and_close_widget(instr_view)", "def open_top():\n _open_url_path('')", "def test_node_info_popup(self):\n def test_popup(node):\n node.details.click()\n with NodeInfo() as details:\n self.assertEqual(\n node.name.text, details.header.text,\n 'Node name')\n details.close.click()\n details.wait_until_exists()\n\n with Nodes()as n:\n test_popup(n.nodes_discovered[0])\n test_popup(n.nodes_offline[0])\n test_popup(n.nodes_error[0])", "def quick_open_preview(self, window):\n if not self.current_history_entry:\n return\n\n view = self.current_view\n other_view = self.get_view_from_another_group(window, view.file_name())\n\n # Only try to open and position the file if it is transient\n if self.is_transient_view(window, view):\n if not self.REOPEN_IN_CURRENT_GROUP and other_view:\n # Focus the other view instead of opening a clone\n self.debug(\"Focussing existing view in group %d\" % window.get_view_index(other_view)[0])\n self.__close_preview(window)\n window.focus_view(other_view)\n # Changing focus to another group requires reopening the panel, unfortunately\n return True\n else:\n (group, index) = self.__calculate_view_index(window, self.current_history_entry)\n view = window.open_file(self.current_history_entry['filename'])\n window.set_view_index(view, group, index)\n\n # Refocus on the newly opened file rather than the original one\n self.__clear_context()\n self.__track_calling_view(window)", "def wait_for_and_switch_to_popup(self, num_windows=2, timeout=10):\n self.wait_until(lambda d: len(d.window_handles) == num_windows, timeout)\n self.selenium.switch_to.window(self.selenium.window_handles[-1])\n self.wait_page_ready()", "def startWindowCheck(self):\n\t\ttry:\n\t\t\tassert( type( threading.current_thread() ) ) == threading.Thread\n\t\t\tprint(f\"\\n{bcolors.OKCYAN}[+]{bcolors.ENDC}Thread \\'%s\\' created with PID: %d\" % ( threading.current_thread().name, getpid() ))\n\t\texcept Exception as e:\n\t\t\tprint(f\"{bcolors.FAIL}[!]{bcolors.ENDC}Unable to run startWindowCheck function.\\n\")\n\t\t\tprint(\"{}\".format(e))\n\t\t\treturn -1\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tif self.multipleWindows():\n\t\t\t\t\tprint(f\"{bcolors.OKGREEN}[+]{bcolors.ENDC} Multiple Windows open.\\n\")\n\t\t\t\t\tself.handles = self.driver.window_handles\n\t\t\t\t\tfor h in self.handles:\n\t\t\t\t\t\tprint(\"\\t[+] Handle : {}\".format(h))\n\t\t\t\t\tsleep(30) # Need a better way to wait / wake thread\n\t\t\t\telse:\n\t\t\t\t\tsleep(10)\n\t\texcept Exception as e:\n\t\t\tprint(f\"{bcolors.OKGREEN}[-]{bcolors.ENDC} WindowChecker thread returning.\\n\")\n\t\t\treturn 1\n\t\texcept KeyboardInterrupt:\n\t\t\tprint(f\"{bcolors.OKGREEN}[-]{bcolors.ENDC} WindowChecker thread returning.\\n\")\n\t\t\treturn 2", "def move_to_win(self):\n self.external_win = PlotWindow(plot=self.pw, parent=self)\n self.external_win.closeWin.connect(lambda: self.layout().takeAt(1))\n self.external_win.closeWin.connect(lambda: self.layout().insertWidget(1, self.pw))\n self.external_win.closeWin.connect(lambda: self.btn_open.setEnabled(True))\n self.external_win.show()", "def _ensure_valid_window(session):\n try:\n session.window_handle\n except webdriver.NoSuchWindowException:\n session.window_handle = session.handles[0]", "def click_add():\n # TODO: 1. In frontend_script.py, create function \"create_window()\" that takes a Toplevel() as a parameter.\n # TODO: 2. In this file, implement the code below\n # new_window = Toplevel(root)\n # frontend_script.create_window(new_window)", "def handle_new_window(event):\n url = event.GetURL()\n webbrowser.open(url)", "def testStageOpens(self):\n self.assertTrue(self._stage)", "def test_open_windows(open_command, first_app_config, tmp_path):\n # Create the project folder to mock a created project.\n open_command.project_path(first_app_config).mkdir(parents=True)\n\n # Create a stub java binary\n create_file(tmp_path / \"briefcase\" / \"tools\" / \"java17\" / \"bin\" / \"java\", \"java\")\n\n # Create a stub sdkmanager\n create_sdk_manager(tmp_path, extension=\".bat\")\n\n open_command(first_app_config)\n\n open_command.tools.os.startfile.assert_called_once_with(\n tmp_path / \"base_path\" / \"build\" / \"first-app\" / \"android\" / \"gradle\"\n )", "def multipleWindows(self):\n\t\treturn False if (len(self.driver.window_handles) == 1) else True", "def open_settings_window(self):\n self.screen_blank_timer.stop()\n self.settings_window.show()\n # Ensure the window is raised in top, useful when main window is fullscreened\n # and settings window is accidentally sent to the background\n getattr(self.settings_window, \"raise\")()\n self.settings_window.activateWindow()\n event_logger.debug(\"Settings window opened\")", "def BtnOpenOverlayFolder():\n location = os.path.join(os.path.dirname(__file__), \"Overlay\")\n os.startfile(location)\n return", "def open_location(self):\n try:\n self.assertEqual(self.test_location, self.selenium.get_location())\n except AssertionError, self.e:\n self.verificationErrors.append(str(self.e))", "def __window_confirm(self, text):\n return True", "def open_keyboard(self, instance):\n self.popup.open()", "def _doOpenTool(self):\n self._cmdOpenTool()", "def raise_window(window):\n window.attributes('-topmost', 1)\n window.attributes('-topmost', 0)", "def openStartPyWindow(parent=None, py_filename=None):\n result = False\n obj = None\n try:\n obj = iqStartPyWindow()\n obj.py_filename = py_filename\n new_title = _(u'Python module') + ' <%s>' % os.path.basename(py_filename)\n obj.getGtkTopObject().set_title(new_title)\n obj.init()\n obj.getGtkTopObject().run()\n result = True\n except:\n log_func.fatal(u'Error open window <start_py_window>')\n\n if obj and obj.getGtkTopObject() is not None:\n obj.getGtkTopObject().destroy()\n return result", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def contextualhelpverificationhome(window,contextualhelpbutton):\n try:\n testcaseDescription = \"contextual help\"\n filename = testcaseDescription + \"fail\" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n atomacclick(contextualhelpbutton)\n appbuttons = getAppButtons(window)\n for i in range(1,5):\n time.sleep(3)\n screenshot(filename)\n atomacclick(appbuttons[26])\n time.sleep(3)\n atomacclick(appbuttons[26])\n except Exception as er:\n return False\n print \"Not able to click on contextualhelpverification\"", "def set_window(self, handle):\n pass", "def test_func2():\n app = QtWidgets.QApplication(sys.argv)\n connection = sqlite3.connect(\"bookstore.db\")\n bk1 = QtWidgets.QMainWindow()\n ui = Ui_bk1()\n ui.strID = \"2013323-3237\"\n ui.strName = \"sfsdfsdff\"\n ui.strAuthor = \"sfsdssdfsdff\"\n ui.setupUi(bk1)\n bk1.show()\n assert ui.Check() == True, \"Passed\"\n connection.close()\n app.exit()", "def _cb(self, hwnd, extra):\n if hwnd in self.windows:\n pass\n\n window = Window(\n hwnd=hwnd,\n text=win32gui.GetWindowText(hwnd),\n rectangle=win32gui.GetWindowRect(hwnd))\n\n self.windows[hwnd] = window", "def startWindow():\n\n m = mainWindow()\n\n # Show Window\n m.show()\n\n # Return to stay alive\n return m", "def focus_on(window):\n return Cmd(\"{}wincmd w\", window)", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def openScore(self, e):\n\n\t\tif (not self.open_window):\n\t\t\tself.open_window = True\n\t\t\tself.score_window.close_score.focus_set()\n\t\t\tself.main_menu_window.liftFrame(self.score_window.score_frame)\n\t\telif (self.open_window):\n\t\t\tself.open_window = False\n\t\t\tself.menu_window.playButton.focus_set()\n\t\t\tself.main_menu_window.lowerFrame(self.score_window.score_frame)", "def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)", "def Help(window, referenceid):\n try:\n allobjects = getAllObjects(window)\n atomacclick(allobjects[53])\n ldtp.wait(2)\n Runwindow = getChildwindows(referenceid)\n buttons = getAppButtons(Runwindow)\n atomacclick(buttons[0])\n buttons = getAppButtons(Runwindow)\n ldtp.wait(4)\n except Exception as er:\n return False", "def openMenuHandler(self, action):\n\n button_text = action.text()\n\n if button_text == 'Open Command File':\n self.openFile()\n\n elif button_text == 'Open Scenario':\n self.openScenarioFile()", "def __window_focus(self):\n pass", "def ev_windowshown(self, event: tcod.event.WindowEvent) -> T | None:", "def open_options_window(self):\n window_options = OptionsWindow(self.master)\n window_options.lift() # Show above main window\n # TODO: block the user from interacting with the main window\n # while the options window is open\n window_options.focus_force()" ]
[ "0.70974696", "0.703137", "0.68839926", "0.6804897", "0.6764545", "0.6681683", "0.65590286", "0.6494494", "0.6479063", "0.64303726", "0.61622506", "0.6118606", "0.60861903", "0.60753334", "0.60275924", "0.6021859", "0.6018818", "0.6013958", "0.6012709", "0.6001387", "0.5998173", "0.5987263", "0.59839535", "0.59770787", "0.59748703", "0.5970651", "0.59420294", "0.59321326", "0.5931386", "0.5904289", "0.58936214", "0.5889536", "0.5881314", "0.58563745", "0.58545023", "0.58358395", "0.5820253", "0.58186036", "0.58175445", "0.58054405", "0.580385", "0.5801038", "0.5795414", "0.5794241", "0.5794241", "0.5794241", "0.5794241", "0.5782152", "0.5777382", "0.57730275", "0.5769256", "0.5759586", "0.5734031", "0.57313365", "0.57305366", "0.57096505", "0.56832147", "0.56724197", "0.5663738", "0.5660508", "0.5653656", "0.56487995", "0.5635414", "0.5627303", "0.5625803", "0.5622926", "0.5621892", "0.5610235", "0.56049466", "0.559562", "0.559262", "0.55909914", "0.55871004", "0.55734235", "0.5568681", "0.55685973", "0.55594563", "0.5548984", "0.5539522", "0.5527955", "0.5525888", "0.5524146", "0.55204517", "0.55183583", "0.55178106", "0.55151767", "0.5514905", "0.5510992", "0.5509366", "0.5508258", "0.5505355", "0.55027163", "0.54999375", "0.54884946", "0.54874104", "0.54730815", "0.5468889", "0.54593575", "0.54587847", "0.5454935", "0.545011" ]
0.0
-1
Function that represents the window which Character Mods can be applied.
def chars_window(): path_dir = r'Sor_Mods_Storage\chars' char_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen chars = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(chars, image=mainTitleImg) title = tk.Label(chars, text="Characters Mods") comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys())) def apply_char_mod(): char_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars') value = f'Character Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def _get_window_width(self):", "def modifiers_coding_map_creator(self):\n self.mapCreatorWindow = map_creator.ModifiersMapCreatorWindow()\n self.mapCreatorWindow.move(self.pos())\n self.mapCreatorWindow.resize(CODING_MAP_RESIZE_W, CODING_MAP_RESIZE_H)\n self.mapCreatorWindow.show()", "def __window_print(self):\n pass", "def window(windowX, windowY, occurrency):\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0", "def renderWindowEditor(*args, autoResize: bool=True, blendMode: Union[int, bool]=0, caption:\n Union[AnyStr, bool]=\"\", changeCommand: Union[List[AnyStr, AnyStr, AnyStr,\n AnyStr], bool]=None, clear: Union[List[int, int, float, float, float],\n bool]=None, cmEnabled: bool=True, colorManage: bool=True, compDisplay:\n Union[int, bool]=0, compImageFile: Union[AnyStr, bool]=\"\", control:\n bool=True, currentCamera: Union[AnyStr, bool]=\"\", currentCameraRig:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", displayImage:\n Union[int, bool]=0, displayImageViewCount: Union[int, bool]=0,\n displayStyle: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr, bool]=\"\",\n doubleBuffer: bool=True, drawAxis: bool=True, editorName: bool=True,\n exists: bool=True, exposure: Union[float, bool]=0.0, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n frameImage: bool=True, frameRegion: bool=True, gamma: Union[float,\n bool]=0.0, highlightConnection: Union[AnyStr, bool]=\"\", loadImage:\n AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", marquee: Union[List[float, float, float, float],\n bool]=None, nbImages: bool=True, nextViewImage: bool=True,\n outputColorManage: bool=True, panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", pcaption: Union[AnyStr, bool]=\"\", realSize:\n bool=True, refresh: bool=True, removeAllImages: bool=True, removeImage:\n bool=True, resetRegion: bool=True, resetViewImage: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectionConnection:\n Union[AnyStr, bool]=\"\", showRegion: Union[List[int, int], bool]=None,\n singleBuffer: bool=True, snapshot: Union[List[AnyStr, int, int],\n bool]=None, snapshotMode: bool=True, stateString: bool=True, stereo:\n Union[int, bool]=0, stereoImageOrientation: Union[List[AnyStr, AnyStr],\n bool]=None, stereoMode: Union[AnyStr, bool]=\"\", toggle: bool=True,\n unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", viewImageCount:\n Union[int, bool]=0, viewTransformName: Union[AnyStr, bool]=\"\",\n writeImage: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def _feature_window_function(window_type, window_size, blackman_coeff):\n if window_type == HANNING:\n return torch.hann_window(window_size, periodic=False)\n elif window_type == HAMMING:\n return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46)\n elif window_type == POVEY:\n # like hanning but goes to zero at edges\n return torch.hann_window(window_size, periodic=False).pow(0.85)\n elif window_type == RECTANGULAR:\n return torch.ones(window_size, dtype=torch.get_default_dtype())\n elif window_type == BLACKMAN:\n a = 2 * math.pi / (window_size - 1)\n window_function = torch.arange(window_size, dtype=torch.get_default_dtype())\n # can't use torch.blackman_window as they use different coefficients\n return blackman_coeff - 0.5 * torch.cos(a * window_function) + \\\n (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)\n else:\n raise Exception('Invalid window type ' + window_type)", "def getwinsize(self):", "def login_window(window):\n\n \"define my variables\"\n enter_name = Text(Point(130,150), \"Enter your Nickname:\")\n backround_Login = Image(Point(130,130),r'Login_Backround.gif')\n max_chr = Text(Point(130,110), \"Maximum character!\")\n name = Text(Point(130,130),\"\")\n illegal_name = Text(Point(130,110),\"Illegal Name!\")\n \"\"\"make my setting\"\"\"\n window.setCoords(0, 0, 256, 256)#sets the window coordinates ;bottom left is (0, 0) and top right is (256, 256)\n window.setBackground(\"White\")\n max_chr.setTextColor(\"Red\")\n illegal_name.setTextColor(\"Red\")\n\n backround_Login.draw(window)\n enter_name.draw(window)\n\n while not window.isClosed():\n new_chr = window.getKey()\n max_chr.undraw()\n illegal_name.undraw()\n if new_chr == \"Return\":\n if len(name.getText()) < 1:\n illegal_name.draw(window)\n else:\n break\n if new_chr == \"space\":\n name.setText(name.getText() + \" \")\n continue\n if new_chr == \"BackSpace\":\n name.setText(name.getText() + new_chr)\n name = delete_chr(name)\n else:\n if len(new_chr)>1:\n continue\n if (ord(new_chr) > 126 or ord(new_chr) < 33):\n continue\n else:\n name.setText(name.getText() + new_chr)\n if len(name.getText()) < 11:\n name.undraw()\n name.draw(window)\n else:\n max_chr.draw(window)\n name.setText(name.getText()[:-1])\n name.undraw()\n name.draw(window)\n enter_name.undraw()\n name.undraw()\n return name.getText()", "def getRenWin(self):\n return self.renWinInteract.GetRenderWindow()", "def win(self):\n return \"Win\"", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def _get_code_command_windows():\n while 1:\n print('Use \\'E\\', \\'S\\', \\'W\\', \\'N\\'' +\\\n '[+ 1-9] to move. Or \\'q\\' to give up.')\n hitkeys = input()\n if len(hitkeys) > 0:\n char_ = hitkeys[0].upper()\n if char_ in 'ESNW':\n if len(hitkeys) == 2:\n num_ = hitkeys[1]\n if num_ in '123456789':\n return char_ + num_\n else:\n return char_ + '1'\n elif char_ == 'Q':\n return 'end'", "def enemy_window():\n path_dir = r'Sor_Mods_Storage\\enemies'\n enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n enemies = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n\n imgRandom_label = tk.Label(enemies, image=mainTitleImg)\n title = tk.Label(enemies, text=\"Enemies Mods\")\n\n comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys()))\n\n def apply_enemy_mod():\n char_selected = comboBox_enemies.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies')\n value = f'Enemy Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod)\n\n title.grid(row=0, column=0)\n comboBox_enemies.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def get_main_window():\n\n pass", "def current_window(self):\n pass", "def gen_window(self, field):\n return random.choice(range(10, 200, 10))", "def stage_window():\n path_dir = r'Sor_Mods_Storage\\stages'\n stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n stages = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(stages, image=mainTitleImg)\n title = tk.Label(stages, text=\"Stage Mods\")\n\n comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys()))\n\n def apply_stage_mod():\n stage_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if stage_selected == '':\n value = f'{value} Please Select an Stage Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages')\n value = f'Enemy Mod {stage_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def update_window_formatting(self):\n self.update_sequence_window()\n if self.pDB_open:\n self.pDB_open.refresh_primer()\n if self.show_comp_sequence.get==1:\n self.sequ_win.refresh_DNAseq()\n return", "def answer(window_string):\n window = Window(window_string)\n if(window.f==1):\n return window.w * window.h\n else:\n return -1", "def get_word_window(self, pattern, tokens, constraints):\n split_pattern = pattern.split()\n if len(split_pattern) > 1:\n textsnippets = self.__get_word_window_more_words_help(split_pattern, tokens, constraints)\n else:\n textsnippets = self.__get_word_window_one_word_help(pattern, tokens, constraints)\n print(textsnippets)\n return textsnippets", "def show_window_fields(self):\n self.analyze()\n items = []\n for ba in analyzer.window_actions:\n items.append(\n \"{0} : {1}\".format(\n ba.full_name(), layout_fields(ba)))\n\n return rstgen.ul(items)", "def __window_prompt(self, text):\n return True", "def wm(self):\n return self.position", "def alt_tab_win(number: int):\n _alt_tab(number)", "def menu_screen(win):\n\tpass", "def _window(self, get_lims=False):\n\t\timg_h, img_w = self.od_model.img_shape\n\t\th_llim = 0\n\t\tw_llim = img_w // 3\n\t\th_ulim = img_h - (img_h // 4)\n\t\tw_ulim = 1- wllim\n\n\t\tif get_lims:\n\t\t\treturn (h_llim, h_ulim), (w_llim, w_ulim)\n\n\t\twindow = slice(h_llim, h_ulim), slice(w_llim, w_ulim)\n\t\treturn window", "def pallete_window():\n path_dir = r'Sor_Mods_Storage\\palletes'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n palletes = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(palletes, image=mainTitleImg)\n title = tk.Label(palletes, text=\"Pallete Mods\")\n\n comboBox_palletes = ttk.Combobox(palletes, values=list(char_mods_dict.keys()))\n\n def apply_pallete_mod():\n pallete_selected = comboBox_palletes.get()\n result_window = tk.Toplevel()\n\n value = ''\n if pallete_selected == '':\n value = f'{value} Please Select an Pallete to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=pallete_selected, type='palletes')\n value = f'Enemy Mod {pallete_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(palletes, text='Apply', command=apply_pallete_mod)\n\n title.grid(row=0, column=0)\n comboBox_palletes.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def columnWin( self ):\n\n for x in list(range(0,3)):\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+3]\n thirdVal = self.__grid[x+6]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if compiledVal.lower() == 'xxx':\n return 'X'\n\n elif compiledVal.lower() == 'ooo':\n return 'O'\n\n return None", "def window(self):\n return self.attribute('VW')", "def get_win_method(self):\n return self._how_to_win", "def get_w(self):\n raise NotImplementedError", "def __init__(self, len_x, len_y, win_func, *args, **kwargs):\n self._gen_window(len_x, len_y, win_func, *args, **kwargs)", "def showWindow(*args, **kwargs)->None:\n pass", "def _get_window_start(self, waveforms):", "def refrwindow(self):\n return self.attribute('RW')", "def current_swing_mode(self):\n return None", "def apply_window(audio):\n\treturn audio * numpy.hanning(len(audio))", "def dwindow(window):\r\n \r\n h=window\r\n nh=len(h)\r\n lh=(nh-1)/2\r\n stepheight=(h[0]+h[-1])/2.\r\n ramp=float((h[-1]-h[0]))/nh\r\n h2=np.zeros(nh+2)\r\n h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp\r\n dwin[0]=dwin[0]+stepheight\r\n dwin[-1]=dwin[-1]-stepheight\r\n \r\n return dwin", "def visualize_attention(window_name, tokens_and_weights):\n root = tk.Tk()\n root.title(window_name)\n text_widget = tk.Text(root)\n text = ''\n\n # List of indices, where each element will be a tuple in the form: (start_index, end_index)\n low_attention_indices = []\n medium_attention_indices = []\n high_attention_indices = []\n very_high_attention_indices = []\n\n # Iterate over tokens and weights and assign start and end indices depending on attention weight\n current_index = 0\n for token_and_weight in tokens_and_weights:\n token, weight = token_and_weight[0], token_and_weight[1]\n text += token + ' '\n\n if weight >= 0.80:\n very_high_attention_indices.append((current_index, current_index + len(token)))\n elif weight >= 0.60:\n high_attention_indices.append((current_index, current_index + len(token)))\n elif weight >= 0.40:\n medium_attention_indices.append((current_index, current_index + len(token)))\n elif weight >= 0.20:\n low_attention_indices.append((current_index, current_index + len(token)))\n\n current_index += len(token) + 1\n\n text_widget.insert(tk.INSERT, text)\n text_widget.pack(expand=1, fill=tk.BOTH)\n\n # Add Tkinter tags to the specified indices in text widget\n for indices in low_attention_indices:\n text_widget.tag_add('low_attention', '1.' + str(indices[0]), '1.' + str(indices[1]))\n\n for indices in medium_attention_indices:\n text_widget.tag_add('medium_attention', '1.' + str(indices[0]), '1.' + str(indices[1]))\n\n for indices in high_attention_indices:\n text_widget.tag_add('high_attention', '1.' + str(indices[0]), '1.' + str(indices[1]))\n\n for indices in very_high_attention_indices:\n text_widget.tag_add('very_high_attention', '1.' + str(indices[0]), '1.' + str(indices[1]))\n\n # Highlight attention in text based on defined tags and the corresponding indices\n text_widget.tag_config('low_attention', background='#FDA895')\n text_widget.tag_config('medium_attention', background='#FE7D61')\n text_widget.tag_config('high_attention', background='#FC5430')\n text_widget.tag_config('very_high_attention', background='#FF2D00')\n\n root.mainloop()", "def get_absolute_window_words(self, pano_windows, window):\n words = []\n im, scale_w, scale_h, window_w, window_h = self.__resize(window.im)\n with torch.no_grad():\n # char_bboxes, char_scores, word_instances = ...\n _, _, word_instances = self.charnet(im, scale_w, scale_h, window_w, window_h)\n\n for word in word_instances:\n # To combat google's watermark of street-view messing with the words\n if word.text == 'GOOGLE':\n continue\n old_word_bbox = word.word_bbox.copy()\n # update absolute position\n word.word_bbox[::2] = [x_coord + window.pos_x for x_coord in word.word_bbox[::2]]\n word.word_bbox[1::2] = [y_coord + window.pos_y for y_coord in word.word_bbox[1::2]]\n word_abs = word\n # open a new window for near-border words\n if self.__word_is_near_border(old_word_bbox, 50, window_w, window_h):\n zoom_w = pano_windows.get_window_at_pos(word.word_bbox[0], word.word_bbox[1], 50)\n z_im, z_scale_w, z_scale_h, z_window_w, z_window_h = self.__resize(zoom_w.im)\n with torch.no_grad():\n _, _, z_word_instances = self.charnet(z_im, z_scale_w, z_scale_h,\n z_window_w, z_window_h)\n\n for z_word in z_word_instances: # Swap only the word that intersects\n z_word.word_bbox[::2] = [x_coord + zoom_w.pos_x for\n x_coord in z_word.word_bbox[::2]]\n z_word.word_bbox[1::2] = [y_coord + zoom_w.pos_y for\n y_coord in z_word.word_bbox[1::2]]\n if self._do_words_intersect(word, z_word):\n word_abs = z_word # save only the new word from the window\n break\n\n words.append(word_abs)\n return words", "def show(self, window):\r\n\r\n return", "def renderWindows(XWindow, YWindow, occurrencyWindow, windowModel = False):\n\t\tdef renderDoors(XDoor, YDoor, occurrencyDoor, doorModel = False):\n\t\t\t\"\"\"\n\t\t\trenderWindows accept the door's cells and the occurrency, and optionally a door generating function \n\t\t\t\"\"\"\n\t\t\tdef renderRoof(vertices, pitchAngle, height):\n\t\t\t\t\"\"\"\n\t\t\t\trenderRoof accept the vertices of the base roof, a pitch angle and the desired height \n\t\t\t\tof the roof\n\t\t\t\t\"\"\"\n\t\t\t\tdef renderLadder(ladderHeight, interStep, riser):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\trenderLadder is the inner function used to assembly all together, it takes the \n\t\t\t\t\tdesired height of the ladder, an interstep between two step and a riser for the single\n\t\t\t\t\tstep.\n\t\t\t\t\t\"\"\"\n\n\t\t\t\t\t#building the ladder model and the ladder box\n\t\t\t\t\tladderModel = ladder.make_ladder(ladderHeight, interStep, riser)\n\t\t\t\t\twith open(\"lines/ladder.lines\", \"rb\") as ladderFile:\n\t\t\t\t\t\treader = csv.reader(ladderFile, delimiter=\",\")\n\t\t\t\t\t\trow = next(reader)\n\t\t\t\t\t\tladderModel = T([1,2])([float(row[0])*xfactor, float(row[1])*yfactor])(ladderModel)\n\t\t\t\t\tladderBOX = CUBOID([SIZE([1])(ladderModel)[0]/xfactor,SIZE([2])(ladderModel)[0]/yfactor, SIZE([3])(ladderModel)[0]/zfactor])\n\t\t\t\t\tladderBOX = T([1,2])([float(row[0])-SIZE([1])(ladderBOX)[0]/2., float(row[1])-SIZE([2])(ladderBOX)[0]/2.])(ladderBOX)\n\n\t\t\t\t\t#building roof model\n\t\t\t\t\tif isinstance(vertices, basestring):\n\t\t\t\t\t\twith open(\"lines/\" + vertices + \".lines\", \"rb\") as file:\n\t\t\t\t\t\t\treader = csv.reader(file, delimiter=\",\")\n\t\t\t\t\t\t\tnewVertices = []\n\t\t\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\t\t\tnewVertices.append([float(row[0]), float(row[1])])\n\t\t\t\t\tif newVertices:\n\t\t\t\t\t\troofModel = roof.roofBuilder(newVertices, pitchAngle, height)\n\t\t\t\t\telse:\n\t\t\t\t\t\troofModel = roof.roofBuilder(vertices, pitchAngle, height)\n\t\t\t\t\troofModel = T([3])([nStorey*3/zfactor])(roofModel)\n\t\t\t\t\troofModel = S([1,2,3])([xfactor*1.09,yfactor*1.09,zfactor])(roofModel)\n\t\t\t\t\troofModel = T([1,2])([-SIZE([1])(roofModel)[0]*0.05,-SIZE([2])(roofModel)[0]*0.05]) (roofModel)\n\n\t\t\t\t\t#building full house model with windows and doors\n\t\t\t\t\tfullHouse = []\n\t\t\t\t\tfor story in range(nStorey):\n\t\t\t\t\t\thouseModel = house.build_house(story, windowModel, doorModel, ladderBOX)\n\t\t\t\t\t\tfullHouse.append(houseModel)\n\t\t\t\t\t\tfullHouse.append(T([3])([3]))\n\t\t\t\t\tfullHouse = STRUCT(fullHouse)\n\n\t\t\t\t\t#returning the result\n\t\t\t\t\treturn STRUCT([roofModel, ladderModel, fullHouse])\n\n\t\t\t\treturn renderLadder\n\n\t\t\treturn renderRoof\n\n\t\treturn renderDoors", "def options(self):\n opt = self.main_window.toplevel()\n cur_l = tkinter.Scale(opt, length=200, label=\"Number of lines:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_rows)\n cur_l.set(self.game.n_row) # initial position of the cursor\n cur_l.pack()\n cur_h = tkinter.Scale(opt, length=200, label=\"Number of columns:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_cols)\n cur_h.set(self.game.n_col)\n cur_h.pack()", "def rowWin( self ):\n\n for x in [0,3,6]:\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+1]\n thirdVal = self.__grid[x+2]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if compiledVal.lower() == 'xxx':\n return 'X'\n\n elif compiledVal.lower() == 'ooo':\n return 'O' \n \n return None", "def __editShowCodeInfo(self):\n self.showEditorInfo(self.activeWindow())", "def create_board_window():\n wn = turtle.Screen()\n wn.setworldcoordinates(0, 0, WIDTH+1, HEIGHT+1)\n t = turtle.Turtle()\n t.pensize(1)\n t.speed(0)\n t.hideturtle()\n return (wn, t)", "def Window(self, w):\r\n\r\n self.window = w\r\n return self", "def window(self):\n\tif getattr(self.android.settings, 'LV_AVOID_FOCUSED_COMMAND',\n\t\t\t\tself.android.internal.device.google_experience):\n\t\treturn window.previous(self)\n\n def fallback_window_command():\n try:\n w=self.android.internal.transport.view_server_query( 'FOCUSED\\n' )[0]\n except:\n w=\"\"\n return w\n\n try:\n # can't use GET_FOCUS command in secure builds, so fall back to FOCUSED command\n if self.android.device.is_secure_build():\n raise Exception()\n\t w=self.android.internal.transport.view_server_query('GET_FOCUS\\n')[0].split()[1]\n except:\n w = fallback_window_command()\n\n\tself.android.log.verbose(android.ui.TAG, \"Current window: '%s'\" % w)\n\treturn w", "def GetWindow(self):\r\n\r\n return self.window", "def showKey():\n\tnewWindow=topWindow(window)\n\t#Config Context Bar\n\tnewWindow.context.removePlaceholder(0)\n\t#Context commands\n\tnewWindow.context.updateContextButton(0,command=lambda n=newWindow: n.grab_release())\n\tnewWindow.context.updateContextButton(1,command=lambda n=newWindow: n.destroy())\n\t#Add Table\n\tnewTable=table(newWindow,\"Pod Templates\",False)\n\tnewTable.pack(fill=BOTH,expand=True)\n\t#Add the content to the table\n\tfor t in podTemplate.templateColours:\n\t\trowName=t+\" colour:\"\n\t\trowColour=podTemplate.templateColours[t]\n\t\tnewTable.addRow(rowName,t,rowColour)\n\n\tnewWindow.run()", "def window(self):\n return self._window", "def window(self):\n return self._window", "def editor():\n pass", "def get_w(self):\n return self.w", "def display_characters(self):\n return f'{self.character_set}'", "def get_asymwindow(self):\n asymwindow = sum(\n [\n np.concatenate(\n [\n np.zeros(self.win_length-(i*self.hop_length)),\n self.window[:i*self.hop_length] ** 2\n ], axis=0\n ) for i in range(1, self.r_overlap)\n ]\n )\n\n return asymwindow", "def showEditorWindow(parent, title, allowEditting = True):\n frame = wx.Frame(parent, -1, title, size=(630, 320), style = wx.DEFAULT_FRAME_STYLE)\n panel = RichTextPanel(allowEditting, frame, -1)\n #frame.Fit()\n #frame.SetMinSize(frame.GetSize())\n frame.Show()\n return panel", "def default_window():\n X = [0, .125, 1.4375, 1.5625, 2.9375, 3.0625, 4.4375, 4.5625, 5.875, 6.0]\n Y = [0, .125, 2.875, 3.0]\n Z = [0, .125]\n V, F = True, False\n occupancy = [\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]]\n ]\n return w7.window(X, Y, Z, occupancy)", "def display_character(window, name, path_template):\n # Could be improved a lot.\n border_size = 20\n path = \".\".join((path_template, \"200\", \"png\"))\n pic = pygame.image.load(path)\n pic_w, pic_h = pic.get_size()\n text = ft_title.render(\" \".join((\"<-\", name, \"->\")), 1, WHITE)\n text_w, text_h = text.get_size()\n pygame.draw.rect(window, GREY, (SCREEN_W/2 - pic_w/2 - border_size,\n SCREEN_H/2 - pic_h/2 - text_h - border_size,\n pic_w + border_size*2, pic_h + border_size*2),\n border_size)\n window.blit(pic, (SCREEN_W/2 - pic_w/2, SCREEN_H/2 - pic_h/2 - text_h))\n window.blit(text, (SCREEN_W/2 - text_w/2, SCREEN_H/2 + pic_h/2 - text_h/2))", "def maya_window():\n return to_qwidget(\"MayaWindow\")", "def is_managar(window):\n enter_code = Text(Point(130,150), \"Enter Manager Code(0 if no managar):\")\n code = Text(Point(130,130),\"\")\n max_chr = Text(Point(130,110), \"Maximum character!\")\n illegal_code = Text(Point(130,110),\"Illegal Code!\")\n wrong_code = Text(Point(130,110),\"Wrong Code!\")\n\n wrong_code.setTextColor(\"red\")\n illegal_code.setTextColor(\"red\")\n max_chr.setTextColor(\"red\")\n\n enter_code.draw(window)\n code.draw(window)\n while (True):\n new_chr = window.getKey()\n max_chr.undraw()\n wrong_code.undraw()\n illegal_code.undraw()\n if new_chr == \"Return\":\n if len(code.getText()) > 4:\n max_chr.draw(window)\n wrong_code.undraw()\n if len(code.getText()) < 1:\n illegal_code.draw(window)\n wrong_code.undraw()\n if code.getText() == MANAGAR_CODE:\n return True\n if code.getText() == \"0\":\n return False\n if code.getText() != MANAGAR_CODE and len(code.getText()) > 0:\n wrong_code.draw(window)\n\n if new_chr == \"Space\":\n code.setText(code + \" \")\n if new_chr == \"BackSpace\":\n code.setText(code.getText() + new_chr)\n code = delete_chr(code)\n else:\n if len(new_chr)> 1:\n continue\n if (ord(new_chr) > 126 or ord(new_chr) < 33):\n continue\n else:\n code.setText(code.getText() + new_chr)\n if len(code.getText()) < 5:\n code.undraw()\n code.draw(window)\n else:\n max_chr.draw(window)\n code.setText(code.getText()[:-1])", "def cycleManipulatorSpace():\n validateSelect()\n current_context = pm.currentCtx()\n context_title = pm.contextInfo(current_context, t=True)\n\n if 'Move' in context_title:\n context_mode = pm.manipMoveContext('Move', q=True, mode=True)\n if context_mode == 0:\n pm.manipMoveContext('Move', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Parent space.')\n elif context_mode == 1:\n pm.manipMoveContext('Move', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n else:\n pm.manipMoveContext('Move', edit=True, mode=0)\n pm.displayInfo('In Object space.')\n\n elif 'Rotate' in context_title:\n context_mode = pm.manipRotateContext('Rotate', q=True, mode=True)\n if context_mode == 0:\n pm.manipRotateContext('Rotate', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n elif context_mode == 1:\n pm.manipRotateContext('Rotate', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Gimbal space.')\n else:\n pm.manipRotateContext('Rotate', edit=True, mode=0)\n pm.displayInfo('In Object space.')\n\n elif 'Scale' in context_title:\n context_mode = pm.manipScaleContext('Scale', q=True, mode=True)\n if context_mode == 0:\n pm.manipScaleContext('Scale', edit=True, mode=context_mode + 1)\n pm.displayInfo('In Parent space.')\n elif context_mode == 1:\n pm.manipScaleContext('Scale', edit=True, mode=context_mode + 1)\n pm.displayInfo('In World space.')\n else:\n pm.manipScaleContext('Scale', edit=True, mode=0)\n pm.displayInfo('In Object space.')", "def calc_pos_mod(nmodule):\n pass", "def WIN_BONUS() -> int:\n return 2", "def Butler_mize_window(self):\n\n wind = np.ones([1, dim_matrix*2])\n a_0 = (1-alpha_0)/2\n a_1 = 1/2\n a_2 = a_1-a_0\n for i in range(0, dim_matrix*2):\n wind[0, i] = (a_0-a_1*np.cos(2*np.pi*(i)/(2*dim_matrix-1)) +\n a_2*np.cos(4*np.pi*(i)/(2*dim_matrix-1)))\n # normal blackman function\n for i in range(1+x_m+2*x_0+dim_matrix, x_m+2*x_0+2*dim_matrix):\n # creates the right half\n wind[0, i] = wind[0, i]\n for i in range(0, x_m): # define left side\n wind[0, i] = x_m+1\n wind[0, x_m+dim_matrix] = 1\n wind[0, x_m+dim_matrix-1] = 1\n # these two lines define the center;\n # they make positions 127, 128 both 1\n\n wind[0, x_m] = 0 # makes left side zero\n wind[0, 2*dim_matrix-1] = 0 # makes right side zero\n dwind = np.ones([dim_matrix, dim_matrix])\n # create the array for the next step\n for i in range(0, dim_matrix):\n dwind[:, i] = wind[0, (dim_matrix-i):2*dim_matrix-i]\n wind2 = np.ones([1, dim_matrix])\n for i_2 in range(dim_matrix-round(dim_matrix/4), dim_matrix):\n wind2[0, i_2] = abs(np.sin(np.pi/2*((i_2)/\n (round(dim_matrix/4)))))\n # Taper\n wind2[0, dim_matrix-1] = 0\n wind3 = (wind2*(np.ones([dim_matrix, dim_matrix])))\n windowed = self.baseline_corrected*wind3*np.transpose(wind3)*dwind\n\n return windowed", "def createModFrame(self, number,name):\n frame = self.data.modFrames[number]\n display = frame(self.container, self)\n display.grid(row=0, column=0, sticky=\"nsew\")\n if(name not in self.data.activeMod):self.data.activeMod[name]= module()\n self.data.activeMod[name].modFramesNext.append(display)", "def draw_windows():\n martin.begin_fill() # lines 88-118 draw out a row consisting of 3 rectangles for windows\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n\n martin.forward(30)\n martin.begin_fill()\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n\n martin.forward(30)\n martin.begin_fill()\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n martin.hideturtle()", "def drawShortcuts(self, screen, is_player):\n self.draw_text(screen, \"Shortcuts\", 40, 500, 40, 255, 255, 255, False)\n self.draw_text(screen, \"Pause : Escape\", 40, 500, 70, 255, 255, 255, False)\n if is_player:\n self.draw_text(screen, \"Move up : z\", 40, 500, 100, 255, 255, 255, False)\n self.draw_text(screen, \"Move down : s\", 40, 500, 130, 255, 255, 255, False)\n self.draw_text(screen, \"Move left : q\", 40, 500, 160, 255, 255, 255, False)\n self.draw_text(screen, \"Move right : d\", 40, 500, 190, 255, 255, 255, False)\n self.draw_text(screen, \"Random move : Space\", 40, 500, 220, 255, 255, 255, False)\n self.draw_text(screen, \"AI move : a\", 40, 500, 250, 255, 255, 255, False)", "def contextwin(l, win):\n assert (win % 2) == 1\n assert win >= 1\n l = list(l)\n\n lpadded = win // 2 * [-1] + l + + win // 2 * [-1]\n\n out = [lpadded[i:(i + win)] for i in range(len(l))]\n\n assert len(out) == len(l)\n return out", "def win(self):\n return self._get(\"win\")", "def stackingWindows():\n space = 50\n offset = 70\n cv2.moveWindow(\"Original image\", space, space)\n cv2.moveWindow(\"Keypoints original\", space, hsize + space + offset)\n cv2.moveWindow(\"Color matched\", wsize + space, space)\n cv2.moveWindow(\"Keypoints Dark\", wsize + space, hsize + space + offset)", "def focus_on(window):\n return Cmd(\"{}wincmd w\", window)", "def show(self,window):\n self.showFunctions(window)", "def cx():", "def character(game):\n\n while True:\n game.window.clear()\n\n game.window.addstr('{} the level {} adventurer'.format(game.player.name,\n game.player.level()))\n game.window.addstr('\\n\\nWielding a {} in the right hand'.format(\n game.player.get_slot('right hand').name))\n game.window.addstr('\\n\\n{} hp'.format(game.player.health))\n\n key = game.window.getkey()\n\n if key == 'q':\n break", "def onKeyboard(self,win, key, scancode, action, mods):\r\n\r\n print(\"keyboard: \", win, key, scancode, action, mods)\r\n\r\n if key == glfw.KEY_ESCAPE:\r\n self.exitNow = True\r\n\r\n if key == glfw.KEY_LEFT_SHIFT:\r\n if action == glfw.PRESS:\r\n self.pressedShift = True\r\n return\r\n\r\n if action == glfw.RELEASE:\r\n self.pressedShift = False\r\n return\r\n\r\n return\r\n\r\n if key == glfw.KEY_S:\r\n if self.pressedShift:\r\n self.scene.backgroundcolor = (0.0, 0.0, 0.0, 0.0)\r\n return\r\n else:\r\n self.scene.color = (0.0,0.0,0.0)\r\n return\r\n\r\n\r\n\r\n if key == glfw.KEY_W:\r\n if self.pressedShift:\r\n self.scene.backgroundcolor = (1.0, 1.0, 1.0, 1.0)\r\n return\r\n else:\r\n self.scene.color=(1.0,1.0,1.0)\r\n return\r\n\r\n\r\n\r\n if key == glfw.KEY_R:\r\n if self.pressedShift:\r\n self.scene.backgroundcolor=(1.0, 0.0, 0.0, 0.0)\r\n else:\r\n self.scene.color = (1.0, 0.0, 0.0)\r\n\r\n\r\n if key == glfw.KEY_B:\r\n if self.pressedShift:\r\n self.scene.backgroundcolor = (0.0, 0.0, 1.0, 0.0)\r\n else:\r\n self.scene.color = (0.0, 0.0, 1.0)\r\n\r\n\r\n if key == glfw.KEY_G:\r\n if self.pressedShift:\r\n self.scene.backgroundcolor = (0.0, 1.0, 0.0, 0.0)\r\n else:\r\n self.scene.color = (0.0, 1.0, 0.0)\r\n\r\n if key == glfw.KEY_H:\r\n if action == glfw.RELEASE:\r\n self.scene.toggle_shadow()\r\n\r\n if key == glfw.KEY_O:\r\n self.isOrthogonal = True\r\n self.set_orthogonal()\r\n\r\n if key == glfw.KEY_P:\r\n self.isOrthogonal = False\r\n self.set_projective()", "def update_edit_fit_window(self):\n model = self._get_selected_model()\n try:\n window = float(self.edit_fit_window.text())\n except:\n return None\n else:\n model.metadata[\"window\"] = window\n return None", "def create_window_constants() -> None:\r\n\r\n self.WIDTH = 1000\r\n self.HEIGHT = 600\r\n\r\n self.WIDGET_PAD = 5 # Widget padding\r\n self.MAIN_BG = '#eeeeee' # Main background\r\n\r\n self.FONT_LARGE = ('Courier',24)\r\n self.FONT_NORMAL = ('Courier', 12)\r\n self.FONT_SMALL = ('Courier', 10)", "def chat_window(window, chat_lines, write_box):\n for i in xrange(25):\n chat_lines[i] = Entry(Point(130,245-(i*9)),80)\n chat_lines[i].draw(window)\n chat_lines[i].setFill(\"white\")\n write_box.draw(window) # draw it to the window\n help(chat_lines)", "def render(self):\n i_start, _, width = self.screen_status\n\n if self.replay_tt is not None:\n aix_replay = bisect.bisect_left(\n self.replay_elapsed, self.tt.elapsed_seconds\n )\n aix_replay = min(self.replay_tt.n_actions - 1, aix_replay)\n i_replay = self.replay_uactions[aix_replay][0]\n\n if self.target_wpm is not None:\n i_target = self.tt.elapsed_seconds * 5 * self.target_wpm / 60\n i_target = min(self.tt.n_characters - 1, int(i_target))\n\n # rended text\n i_print = i_start\n current_ix_print = i_start\n for i, (alist, ch) in enumerate(zip(self.tt.actions, self.tt.text)):\n y, x = divmod(i_print, width)\n\n if i == self.current_ix or not alist:\n # character that we stand on needs to have backspace styling\n status = STATUS_BACKSPACE # same styling\n else:\n status = alist[-1].status\n\n if self.replay_tt is not None and i == i_replay != self.current_ix:\n if status in {STATUS_BACKSPACE, STATUS_CORRECT}:\n # Make sure the normal cursor is visible\n status = \"replay\"\n\n if self.target_wpm is not None and i == i_target != self.current_ix:\n if status in {STATUS_BACKSPACE, STATUS_CORRECT}:\n # Make sure the normal cursor is visible\n status = \"target\"\n\n if i == self.current_ix:\n current_ix_print = i_print\n\n if ch == \"\\n\":\n i_print += width - (i_print % width)\n self.pens[status].addch(self.stdscr, y, x, \" \")\n elif ch == \"\\t\":\n i_print += 4\n self.pens[status].addstr(self.stdscr, y, x, 4 * \" \")\n else:\n i_print += 1\n self.pens[status].addch(self.stdscr, y, x, ch)\n\n # render cursor\n self.cursor.move_abs(self.y_start, self.x_start + current_ix_print)\n\n self.stdscr.refresh()", "def access_window(self):\n return self._access_window", "def Win(self):\n print ( 10*\"*\")\n print (\"Player \" + self.character + \" says:\")\n print (\"I Won\")\n print ( 10*\"*\")", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def cutover_window(self):\n return self._cutover_window", "def get_window_info (self):\n \n # g.trace(self.w,self.h,self.x,self.y)\n \n return self.w,self.h,self.x,self.y", "def openCmdWindow(self): \n panel = Toplevel(self.root)\n panel.wm_title('Command Panel')\n\n # create text input entry\n text0 = tki.Label(panel,\n text='This Controller map keyboard inputs to Tello control commands\\n'\n 'Adjust the trackbar to reset distance and degree parameter',\n font='Helvetica 10 bold'\n )\n text0.pack(side='top')\n\n text1 = tki.Label(panel, text=\n 'W - Move Tello Up\\t\\t\\tArrow Up - Move Tello Forward\\n'\n 'S - Move Tello Down\\t\\t\\tArrow Down - Move Tello Backward\\n'\n 'A - Rotate Tello Counter-Clockwise\\tArrow Left - Move Tello Left\\n'\n 'D - Rotate Tello Clockwise\\t\\tArrow Right - Move Tello Right',\n justify='left')\n text1.pack(side='top')\n\n self.btn_landing = tki.Button(\n panel, text='Land', relief='raised', command=self.telloLanding)\n self.btn_landing.pack(side='bottom', fill='both',\n expand='yes', padx=10, pady=5)\n\n self.btn_takeoff = tki.Button(\n panel, text='Takeoff', relief='raised', command=self.telloTakeOff)\n self.btn_takeoff.pack(side='bottom', fill='both',\n expand='yes', padx=10, pady=5)\n\n # binding arrow keys to drone control\n self.tmp_f = tki.Frame(panel, width=100, height=2)\n self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)\n self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)\n self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)\n self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)\n self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)\n self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)\n self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)\n self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)\n self.tmp_f.pack(side='bottom')\n self.tmp_f.focus_set()\n\n self.btn_landing = tki.Button(\n panel, text='Flip', relief='raised', command=self.openFlipWindow)\n self.btn_landing.pack(side='bottom', fill='both',\n expand='yes', padx=10, pady=5)\n\n self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01, \n digits=3, label='Distance(m)',\n resolution=0.01)\n self.distance_bar.set(0.2)\n self.distance_bar.pack(side='left')\n\n self.btn_distance = tki.Button(panel, text='Reset Distance', relief='raised',\n command=self.updateDistancebar,\n )\n self.btn_distance.pack(side='left', fill='both',\n expand='yes', padx=10, pady=5)\n\n self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')\n self.degree_bar.set(30)\n self.degree_bar.pack(side='right')\n\n self.btn_distance = tki.Button(panel, text='Reset Degree', relief='raised', \n command=self.updateDegreebar)\n self.btn_distance.pack(side='right', fill='both',\n expand='yes', padx=10, pady=5)", "def display(field):\n side = int(math.sqrt(len(field))) # in number of elements (tiles)\n \n def pos():\n cy, cx = win.getyx()\n stdscr.addstr(0, 0, \"cy: \"+str(cy)+\", cx: \"+str(cx))\n\n def br():\n while True:\n c = stdscr.getch()\n if c == curses.KEY_RIGHT:\n break\n win.refresh()\n\n win.addstr(0, 0, '┏')\n for _ in range(side-1):\n win.addstr('━━━━━━')\n win.addstr('┳')\n win.addstr('━━━━━━')\n win.addstr('┓ ')\n\n for y in range(side):\n \n win.addstr('┃')\n for x in range(side):\n #stdscr.addstr(0, 0, \"side: \" + str(x))\n idx = y * side + x\n if field[idx] == 0:\n win.addstr(' '.center(6))\n else:\n n = field[idx]\n color = curses.color_pair(0)\n if n < 0:\n field[idx] *= -1\n n = field[idx]\n color = curses.A_BOLD | curses.A_STANDOUT\n elif n == 4:\n color = curses.color_pair(3)\n elif n == 8:\n color = curses.color_pair(4)\n elif n >= 16:\n color = curses.color_pair(1)\n \n #win.addstr(str(n).center(6), color)\n \n n = str(n)\n left = (6-len(n)) // 2\n right = 6 - (left + len(n))\n win.addstr(left*' ')\n win.addstr(n, color)\n win.addstr(right*' ')\n\n \n win.addstr('┃')\n win.addstr(' ')\n if y == side-1:\n break\n else: \n win.addstr('┣')\n for _ in range(side-1):\n win.addstr('━━━━━━')\n win.addstr('╋')\n win.addstr('━━━━━━')\n win.addstr('┫ ')\n \n win.addstr('┗')\n for _ in range(side-1):\n win.addstr('━━━━━━')\n win.addstr('┻')\n win.addstr('━━━━━━')\n #pos()\n #br()\n win.addstr('┛')\n\n #win.border()\n win.refresh()", "def automatic_window(self):\n \n #Create window and label\n automatic_window = tk.Toplevel(self)\n windowtext = self.translate('How many days do you want the simulation to run for?') \n automatic_window.title(windowtext)\n automatic_window.config(bg=self.default_background)\n lbl_text = tk.Label(automatic_window, text=windowtext,\n bg=self.default_background)\n lbl_text.grid(column=0, row=0)\n \n #Create input box\n self.auto_var = tk.IntVar()\n self.auto_var.set(1)\n auto_menu = tk.Entry(automatic_window)\n auto_menu.insert(0,0)\n auto_menu.configure(width=5)\n auto_menu.grid(column=0, row=1)\n\n #Create button to initate the simulation\n auto_run_button = tk.Button(automatic_window, text=self.translate('Run Simulation'), \n command = lambda: self.auto_run(automatic_window, int(auto_menu.get())),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n auto_run_button.grid(column=0, row=2)\n \n #Center the window on the screen\n automatic_window.withdraw()\n automatic_window.update_idletasks() # Update \"requested size\" from geometry manager\n x = (self.screenwidth - automatic_window.winfo_reqwidth()) / 2\n y = (self.screenheight - automatic_window.winfo_reqheight()) / 2\n automatic_window.geometry(\"+%d+%d\" % (x, y))\n automatic_window.deiconify()", "def __udp_define_window(self, i, total):\n return range(i, min(i + const.window_size, total))", "def getWin(self):\n return self.__win", "def create_window(session):\n def create_window():\n windows_before = session.handles\n name = session.execute_script(\"window.open()\")\n assert len(session.handles) == len(windows_before) + 1\n new_windows = list(set(session.handles) - set(windows_before))\n return new_windows.pop()\n return create_window", "def softDeformerUI():\n# TODO - add some kind of help text to each tab\n if cmds.window(\"softModWin\", exists=True):\n cmds.deleteUI(\"softModWin\")\n widgets[\"window\"] = cmds.window(\"softModWin\", t=\"zbw_softDeformer\", w=300, h=130)\n widgets[\"tabLO\"] = cmds.tabLayout()\n widgets[\"smCLO\"] = cmds.columnLayout(\"SoftMod\", w=300)\n\n cmds.separator(h=10)\n widgets[\"smdTFG\"] = cmds.textFieldGrp(l=\"Deformer Name\", w=300, cw=[(1, 100), (2, 190)],\n cal=[(1, \"left\"), (2, \"left\")], tx=\"softMod_DEF\")\n widgets[\"checkCBG\"] = cmds.checkBoxGrp(l=\"AutoCheck if there are deformers?\", v1=1, cw=[(1, 200)],\n cal=[(1, \"left\"), (2, \"left\")])\n widgets[\"frontCBG\"] = cmds.checkBoxGrp(l=\"Auto move to front of chain\", v1=1, cw=[(1, 200)],\n cal=[(1, \"left\"), (2, \"left\")])\n widgets[\"scaleFFG\"] = cmds.floatFieldGrp(l=\"Control Scale\", v1=1, pre=2, cw=[(1, 150), (2, 50)],\n cal=[(1, \"left\"), (2, \"left\")])\n widgets[\"autoscaleCBG\"] = cmds.checkBoxGrp(l=\"autoscale control?\", v1=1, cw=[(1, 200)],\n cal=[(1, \"left\"), (2, \"left\")])\n widgets[\"bpFrameIFG\"] = cmds.intFieldGrp(l=\"BindPose/origin Frame\", cw=[(1, 150), (2, 50)],\n cal=[(1, \"left\"), (2, \"left\")])\n widgets[\"mainCtrlTFBG\"] = cmds.textFieldButtonGrp(l=\"Parent Object:\", cw=[(1, 75), (2, 175), (3, 75)], cal=[(1,\n \"left\"), (2, \"left\"), (3, \"left\")], bl=\"<<<\", bc=partial(set_parent_object, \"mainCtrlTFBG\"))\n cmds.separator(h=10, style=\"single\")\n widgets[\"smbutton\"] = cmds.button(l=\"Create Deformer\", w=300, h=40, bgc=(.6, .8, .6),\n c=partial(create_soft_mod_deformer, False))\n cmds.separator(h=5)\n widgets[\"wavebutton\"] = cmds.button(l=\"Soft Wave (use falloff to scale wave)\", w=300, h=30, bgc=(.8, .8, .6),\n c=partial(create_soft_mod_deformer, True))\n\n # third tab to do softselect to joint\n cmds.setParent(widgets[\"tabLO\"])\n widgets[\"jointCLO\"] = cmds.columnLayout(\"softJoint\", w=300)\n widgets[\"jntNameTFG\"] = cmds.textFieldGrp(l=\"Joint Name\", w=300, cw=[(1, 100), (2, 190)],\n cal=[(1, \"left\"), (2, \"left\")], tx=\"softSelect_JNT\")\n widgets[\"jntCPOMCBG\"] = cmds.checkBoxGrp(l=\"Joint to closest point on mesh?\", v1=1, cw=[(1, 200)],\n cal=[(1, \"left\"), (2, \"left\")])\n widgets[\"jntRotCBG\"] = cmds.checkBoxGrp(l=\"Joint orient to surface?\", v1=1, cw=[(1, 200)],\n cal=[(1, \"left\"), (2, \"left\")])\n widgets[\"jntAutoCBG\"] = cmds.checkBoxGrp(l=\"Create initial jnt if not bound?\", v1=1, cw=[(1, 200)],\n cal=[(1, \"left\"), (2, \"left\")])\n cmds.separator(h=10)\n widgets[\"jntbutton\"] = cmds.button(l=\"Create Joint\", w=300, h=40, bgc=(.6, .8, .6), c=soft_selection_to_joint)\n\n\n\n cmds.window(widgets[\"window\"], e=True, w=5, h=5, rtf=True)\n cmds.showWindow(widgets[\"window\"])", "def get_multiscale_windows(img):\n window_list = list()\n method = \"right\"\n if method == \"right\":\n # for the included video this is fine to improve speed\n # but for other videos, method == \"full\" should be used\n window_list += slide_window(img,\n xy_overlap = (0.75, 0.75),\n x_start_stop = [620, 620 + 6*96],\n y_start_stop = [385, 385 + 2*96],\n xy_window = (96, 96))\n window_list += slide_window(img,\n xy_overlap = (0.75, 0.75),\n x_start_stop = [620, None],\n y_start_stop = [385, 385 + 2*128],\n xy_window = (128, 128))\n elif method == \"full\":\n window_list += slide_window(img,\n xy_overlap = (0.75, 0.75),\n x_start_stop = [620 - 6*96, 620 + 6*96],\n y_start_stop = [385, 385 + 2*96],\n xy_window = (96, 96))\n window_list += slide_window(img,\n xy_overlap = (0.75, 0.75),\n y_start_stop = [385, 385 + 2*128],\n xy_window = (128, 128))\n else:\n raise ValueError(method)\n return window_list", "def get_all_windows(self):\n success, result = self.manager.c.eval(\n textwrap.dedent(\n \"\"\"\n [win.wid for win in self.core.mapped_windows]\n \"\"\"\n )\n )\n assert success\n return eval(result)", "def wts(self, xCord, yCord, txt, col=0):\n\t\theight, width = self.screen.getmaxyx()\n\t\ttxt = txt[:width - yCord]\t# do not draw outside screen\n\t\tif xCord > height:\n\t\t\t self.screen.addstr(1, 1, 'WARNING!! Program tried to write BELOW window! (height=' + str(height) + ', X-coordinate=' + str(xCord) + ')', curses.color_pair(0))\n\t\telif yCord > width:\n\t\t\t self.screen.addstr(1, 1, 'WARNING!! Program tried to write LEFT OF window! (width=' + str(width) + ', Y-coordinate=' + str(yCord) + ')', curses.color_pair(0))\n\t\telse:\n\t\t\tself.screen.addstr(xCord, yCord, str(txt), curses.color_pair(col))\n\t\treturn True", "def get_optimal_window(mutation_position_relative, seq_len_wo_special, model_window):\n half_model_window = model_window // 2\n if seq_len_wo_special <= model_window:\n return [0,seq_len_wo_special]\n elif mutation_position_relative < half_model_window:\n return [0,model_window]\n elif mutation_position_relative >= seq_len_wo_special - half_model_window:\n return [seq_len_wo_special - model_window, seq_len_wo_special]\n else:\n return [max(0,mutation_position_relative-half_model_window), min(seq_len_wo_special,mutation_position_relative+half_model_window)]", "def __call__(self) -> abjad.Selection:\n if (self._repetition_chance == 0.0\n or random.random() > self._repetition_chance):\n if not self._is_first_window or self._process_on_first_call:\n if self._mode == 'out':\n self._remove_element()\n else:\n self._add_element()\n elif not self._include_empty_measures and self._mode == 'in':\n self._add_element()\n self._mask_to_selection()\n return self.current_window", "def enum_callback(hwnd, _):\n if GetWindowText(hwnd)[:7] == 'models\\\\':\n SetForegroundWindow(hwnd)\n ShowWindow(hwnd, SW_MAXIMIZE)\n global rect\n rect = GetWindowRect(hwnd)" ]
[ "0.60196453", "0.5610308", "0.5482973", "0.5434321", "0.5421594", "0.5412486", "0.5364167", "0.5364138", "0.5340363", "0.5302406", "0.530197", "0.5284308", "0.5282597", "0.5280957", "0.52492845", "0.52104515", "0.517051", "0.5156331", "0.5152639", "0.51405567", "0.51250046", "0.5121234", "0.51160693", "0.5083318", "0.50644124", "0.50596154", "0.50479925", "0.50252306", "0.5023388", "0.50059193", "0.50008094", "0.49930498", "0.49830726", "0.49816436", "0.49647152", "0.495642", "0.4949736", "0.4938498", "0.49127156", "0.4907299", "0.48995763", "0.48894545", "0.4879059", "0.48745337", "0.48717362", "0.4867521", "0.4866111", "0.4863139", "0.4856317", "0.48481193", "0.48432335", "0.4835429", "0.48224667", "0.48224667", "0.48219368", "0.4815671", "0.48105708", "0.48089427", "0.48067784", "0.4804288", "0.48001918", "0.47879943", "0.47878549", "0.4776396", "0.47748524", "0.47731408", "0.47725436", "0.4766331", "0.4759025", "0.47531945", "0.474856", "0.47447038", "0.47408494", "0.4738372", "0.4738098", "0.47202718", "0.47140217", "0.47133636", "0.47097152", "0.4702783", "0.46998474", "0.46973386", "0.46927917", "0.4691668", "0.46914986", "0.46880773", "0.46808594", "0.46803063", "0.4675767", "0.46757507", "0.46625018", "0.46622717", "0.46582878", "0.46526483", "0.46469128", "0.4644863", "0.464377", "0.46361753", "0.46345964", "0.4631911" ]
0.65728295
0
Function that represents the window which Enemy Mods can be applied.
def enemy_window(): path_dir = r'Sor_Mods_Storage\enemies' enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen enemies = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(enemies, image=mainTitleImg) title = tk.Label(enemies, text="Enemies Mods") comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys())) def apply_enemy_mod(): char_selected = comboBox_enemies.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies') value = f'Enemy Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod) title.grid(row=0, column=0) comboBox_enemies.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def get_main_window():\n\n pass", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def maya_window():\n return to_qwidget(\"MayaWindow\")", "def stage_window():\n path_dir = r'Sor_Mods_Storage\\stages'\n stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n stages = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(stages, image=mainTitleImg)\n title = tk.Label(stages, text=\"Stage Mods\")\n\n comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys()))\n\n def apply_stage_mod():\n stage_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if stage_selected == '':\n value = f'{value} Please Select an Stage Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages')\n value = f'Enemy Mod {stage_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def GetWindow(self):\r\n\r\n return self.window", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def chars_window():\n path_dir = r'Sor_Mods_Storage\\chars'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n chars = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(chars, image=mainTitleImg)\n title = tk.Label(chars, text=\"Characters Mods\")\n\n comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))\n\n def apply_char_mod():\n char_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')\n value = f'Character Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def show(self, window):\r\n\r\n return", "def window(self):\n return self._window", "def window(self):\n return self._window", "def showWindow(*args, **kwargs)->None:\n pass", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def current_window(self):\n pass", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def draw(self): \n pygame.event.clear()\n self.window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])\n self.paintScreen()\n self.paintAvatar()\n self.paintTags()\n self.paintCustomizeZone()\n self.paintButtons()\n self.window.zOrder = 90000\n self.window.depth = 2\n return self.window", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def automatic_window(self):\n \n #Create window and label\n automatic_window = tk.Toplevel(self)\n windowtext = self.translate('How many days do you want the simulation to run for?') \n automatic_window.title(windowtext)\n automatic_window.config(bg=self.default_background)\n lbl_text = tk.Label(automatic_window, text=windowtext,\n bg=self.default_background)\n lbl_text.grid(column=0, row=0)\n \n #Create input box\n self.auto_var = tk.IntVar()\n self.auto_var.set(1)\n auto_menu = tk.Entry(automatic_window)\n auto_menu.insert(0,0)\n auto_menu.configure(width=5)\n auto_menu.grid(column=0, row=1)\n\n #Create button to initate the simulation\n auto_run_button = tk.Button(automatic_window, text=self.translate('Run Simulation'), \n command = lambda: self.auto_run(automatic_window, int(auto_menu.get())),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n auto_run_button.grid(column=0, row=2)\n \n #Center the window on the screen\n automatic_window.withdraw()\n automatic_window.update_idletasks() # Update \"requested size\" from geometry manager\n x = (self.screenwidth - automatic_window.winfo_reqwidth()) / 2\n y = (self.screenheight - automatic_window.winfo_reqheight()) / 2\n automatic_window.geometry(\"+%d+%d\" % (x, y))\n automatic_window.deiconify()", "def create_board_window():\n wn = turtle.Screen()\n wn.setworldcoordinates(0, 0, WIDTH+1, HEIGHT+1)\n t = turtle.Turtle()\n t.pensize(1)\n t.speed(0)\n t.hideturtle()\n return (wn, t)", "def details_window(self, instance: Union[Nobleman, Location]):\n window = tk.Toplevel()\n window.title(instance.name)\n window.protocol(\"WM_DELETE_WINDOW\",\n partial(self.close_details_window, instance))\n self.register_extra_window(instance, window)\n self.generate_window_content(instance, window)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def renderWindowEditor(*args, autoResize: bool=True, blendMode: Union[int, bool]=0, caption:\n Union[AnyStr, bool]=\"\", changeCommand: Union[List[AnyStr, AnyStr, AnyStr,\n AnyStr], bool]=None, clear: Union[List[int, int, float, float, float],\n bool]=None, cmEnabled: bool=True, colorManage: bool=True, compDisplay:\n Union[int, bool]=0, compImageFile: Union[AnyStr, bool]=\"\", control:\n bool=True, currentCamera: Union[AnyStr, bool]=\"\", currentCameraRig:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", displayImage:\n Union[int, bool]=0, displayImageViewCount: Union[int, bool]=0,\n displayStyle: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr, bool]=\"\",\n doubleBuffer: bool=True, drawAxis: bool=True, editorName: bool=True,\n exists: bool=True, exposure: Union[float, bool]=0.0, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n frameImage: bool=True, frameRegion: bool=True, gamma: Union[float,\n bool]=0.0, highlightConnection: Union[AnyStr, bool]=\"\", loadImage:\n AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", marquee: Union[List[float, float, float, float],\n bool]=None, nbImages: bool=True, nextViewImage: bool=True,\n outputColorManage: bool=True, panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", pcaption: Union[AnyStr, bool]=\"\", realSize:\n bool=True, refresh: bool=True, removeAllImages: bool=True, removeImage:\n bool=True, resetRegion: bool=True, resetViewImage: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectionConnection:\n Union[AnyStr, bool]=\"\", showRegion: Union[List[int, int], bool]=None,\n singleBuffer: bool=True, snapshot: Union[List[AnyStr, int, int],\n bool]=None, snapshotMode: bool=True, stateString: bool=True, stereo:\n Union[int, bool]=0, stereoImageOrientation: Union[List[AnyStr, AnyStr],\n bool]=None, stereoMode: Union[AnyStr, bool]=\"\", toggle: bool=True,\n unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", viewImageCount:\n Union[int, bool]=0, viewTransformName: Union[AnyStr, bool]=\"\",\n writeImage: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def get_window(self):\n if self.isWindow:\n return self\n else:\n return self.window", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window), QtWidgets.QWidget)", "def pallete_window():\n path_dir = r'Sor_Mods_Storage\\palletes'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n palletes = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(palletes, image=mainTitleImg)\n title = tk.Label(palletes, text=\"Pallete Mods\")\n\n comboBox_palletes = ttk.Combobox(palletes, values=list(char_mods_dict.keys()))\n\n def apply_pallete_mod():\n pallete_selected = comboBox_palletes.get()\n result_window = tk.Toplevel()\n\n value = ''\n if pallete_selected == '':\n value = f'{value} Please Select an Pallete to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=pallete_selected, type='palletes')\n value = f'Enemy Mod {pallete_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(palletes, text='Apply', command=apply_pallete_mod)\n\n title.grid(row=0, column=0)\n comboBox_palletes.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def _get_window_width(self):", "def window(self) -> pulumi.Input['AssetModelMetricWindowArgs']:\n return pulumi.get(self, \"window\")", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n if sys.version_info.major >= 3:\n return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)\n else:\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def get_parent_window(self): # real signature unknown; restored from __doc__\n pass", "def build(theme: str) -> sg.Window:\n\n # yapf: disable\n sg.theme(theme)\n des=['Top 10 de palabras que se encuentran primero de todas las partidas','Porcentaje de partidas por estado (terminada, cancelada,abandonadas)','Porcentaje de partidas finalizadas según género',\n 'Cantidad de partidas que se juegan para cada día de la semana','Promedio de tiempo de partidas finalizadas por nivel.','Porcentaje de palabras encontradas en las partidas timeout.'\n ]\n tab_layout=[[[sg.Text(des[x],font=(f\"{WINDOW_FONT}\", WINDOW_FONT_SIZE))],[sg.Canvas(key=f\"-CANVAS{x}-\")]] for x in range(len(des))]\n\n layout = [[sg.Text(f\"Estadisticas\",font=(WINDOW_TITLE_FONT, WINDOW_FONT_SIZE * 2))],\n [sg.TabGroup([[sg.Tab(f'Gráfico {l+1}',tab_layout[l],element_justification='center') for l in range(len(des))]])],\n [sg.Button(\"Menu\",key=\"-BACK BUTTON-\")]\n ]\n # yapf: enable\n stat_window = sg.Window(\"Stats\",layout,finalize=True,element_justification='center',margins=(10, 10),size=(900, 700))\n info = pd.read_csv(os.path.join(os.getcwd(), GAME_INFO_PATH),encoding='utf-8')\n draw_figure(stat_window['-CANVAS0-'].TKCanvas, top_10_palabras(info))\n stat_window.refresh() #Esta linea permite que se muestre más rápido el primer gráfico, dando tiempo a que se creen los demás\n draw_figure(stat_window['-CANVAS1-'].TKCanvas, partidas_por_estado(info))\n draw_figure(stat_window['-CANVAS2-'].TKCanvas, partidas_por_genero(info))\n draw_figure(stat_window['-CANVAS3-'].TKCanvas, partidas_por_dia(info))\n draw_figure(stat_window['-CANVAS4-'].TKCanvas,promedio_tiempo_por_nivel(info))\n draw_figure(stat_window['-CANVAS5-'].TKCanvas,cant_encontradas_en_timeout(info))\n\n return stat_window", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def win_popup(self):\n content = BoxLayout(orientation='vertical')\n message_label = Label(text=self.win_message)\n button_layer = BoxLayout(orientation='horizontal')\n dismiss_button = Button(text='QUIT', size_hint=(1, 1))\n next_button = Button(id='next', text='NEXT ROUND', size_hint=(1, 1))\n button_layer.add_widget(dismiss_button)\n button_layer.add_widget(next_button)\n content.add_widget(message_label)\n content.add_widget(button_layer)\n popup = Popup(title=self.winner,\n content=content, size_hint=(0.3, 0.25))\n dismiss_button.bind(on_release=(lambda a: self.exit_game()),\n on_press=popup.dismiss)\n next_button.bind(on_release=(lambda a: self.next_round()),\n on_press=popup.dismiss)\n popup.open()", "def menu_screen(win):\n\tpass", "def init_window(self, game, width, height, scale):\n self.controller = game\n self.window.geometry(\"{0}x{1}\".format((width * scale)+5, (height * scale)+5))\n self.window.resizable(False, False)\n\n self.canvas = tk.Canvas(self.window, width=width * scale, height=height * scale)\n self.canvas.grid(row=0, column=0, sticky=\"nesw\")\n\n self.draw_grid(width, height, scale)\n\n self.window.bind(\"<Button-1>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<B1-Motion>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<space>\", lambda a: game.toggle_pause())\n self.window.bind(\"<Return>\", lambda a: game.do_step())\n self.window.bind(\"<BackSpace>\", lambda a: game.reset())\n self.set_menu()", "def getMayaWindow():\n ptr = openmayaui.MQtUtil.mainWindow()\n return wrapInstance(long(ptr), QtWidgets.QMainWindow)", "def default_window():\n X = [0, .125, 1.4375, 1.5625, 2.9375, 3.0625, 4.4375, 4.5625, 5.875, 6.0]\n Y = [0, .125, 2.875, 3.0]\n Z = [0, .125]\n V, F = True, False\n occupancy = [\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]]\n ]\n return w7.window(X, Y, Z, occupancy)", "def window(self):\n return self.attribute('VW')", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)", "def popup_info(self, force_object, event):\n self.window.log('window popup called')\n po = force_object\n\n def callb():\n if type(po) == Physics.PhysicsObject:\n fow = PhysicsWindow.PhysicsObjectWindow(self.window, po, event.x, event.y)\n return callb", "def gui(self):\n return gui", "def show(self,window):\n self.showFunctions(window)", "def draw_window_pane():\n houseturtle.begin_fill()\n for y in range(4):\n houseturtle.pendown()\n houseturtle.forward(35)\n houseturtle.left(90)\n houseturtle.penup()\n houseturtle.end_fill()", "def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])", "def getwinsize(self):", "def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h=560)\n\n #######################\n #top bar layout\n #######################\n\n #rowlayout\n widgets[\"bannerFLO\"] = cmds.formLayout(w=1000, h=50, bgc=(.300,.3,.300))\n widgets[\"bannerImage\"] = cmds.image(image=\"{0}/banner_shotWin.png\".format(pi.images))\n widgets[\"spotImage\"] = cmds.iconTextButton(style=\"iconOnly\", image = \"{0}/defaultSpotImage.jpg\".format(pi.images), w=50, h=50, ann=ann[\"spotIcon\"], c=changeSpotIcon)\n widgets[\"projectText\"] = cmds.text(l=\"Project Name: Spot Name\", font = \"boldLabelFont\")\n widgets[\"sceneText\"] = cmds.text(l=\"Current Scene\") \n widgets[\"projectButton\"] = cmds.button(l=\"Change Job\", w = 100, h= 40, bgc= (.5,.5,.5), ann = ann[\"proj\"], c=setProject)\n widgets[\"refreshButton\"] = cmds.button(l=\"Refresh\", w = 60, h= 40, bgc= (.2,.2,.2), c = populateWindow)\n widgets[\"exploreButton\"] = cmds.button(l=\"Explore\\nReference\", w = 60, h= 40, bgc= (.7,.5,.3), c=exploreReference)\n\n cmds.formLayout(widgets[\"bannerFLO\"], e=True, af = [(widgets[\"bannerImage\"], \"top\", 0),\n (widgets[\"bannerImage\"], \"left\", 0),\n (widgets[\"projectText\"], \"left\", 400),\n (widgets[\"projectText\"], \"top\", 5),\n (widgets[\"sceneText\"], \"top\", 25),\n (widgets[\"spotImage\"], \"left\", 335), \n (widgets[\"sceneText\"], \"left\", 400),\n (widgets[\"projectButton\"], \"left\", 740),\n (widgets[\"projectButton\"], \"top\", 5),\n (widgets[\"refreshButton\"], \"left\", 850),\n (widgets[\"refreshButton\"], \"top\", 5),\n (widgets[\"exploreButton\"], \"left\", 920),\n (widgets[\"exploreButton\"], \"top\", 5), \n ])\n\n ######################\n #bottom layout\n ########################\n cmds.setParent(widgets[\"mainCLO\"])\n widgets[\"lowFLO\"] = cmds.formLayout()\n widgets[\"lowTLO\"] = cmds.tabLayout(bgc = (.2, .2, .2 ))\n\n ################\n #shots tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"shotsFLO\"] = cmds.formLayout(\"Shots - Anim, Light and FX\",w=1000, h=500, bgc = (.4,.4,.4))\n \n ##############\n #shot asset List layout\n ###############\n widgets[\"shotAssListCLO\"] = cmds.columnLayout(w=240, bgc = (.5, .5,.5))\n widgets[\"shotAssListFLO\"] = cmds.formLayout(w=240, h= 500)\n widgets[\"shotAssListTSL\"] = cmds.textScrollList(w=240, h=465, ams=True) \n\n widgets[\"shotAssListTitleText\"] = cmds.text(l=\"Referenced Assets In Current Scene\", font = \"boldLabelFont\", al=\"center\", ann=ann[\"reffedAssets\"])\n\n cmds.formLayout(widgets[\"shotAssListFLO\"], e=True, af = [\n (widgets[\"shotAssListTSL\"], \"top\", 35),\n (widgets[\"shotAssListTSL\"], \"left\", 0),\n \n (widgets[\"shotAssListTitleText\"], \"top\", 5),\n (widgets[\"shotAssListTitleText\"], \"left\", 20),\n ])\n\n ##############\n #shot List layout\n ###############\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotListCLO\"] = cmds.columnLayout(w=130, bgc = (.5, .5, .5))\n widgets[\"shotListFLO\"] = cmds.formLayout(w=130, h= 500)\n widgets[\"shotListTSL\"] = cmds.textScrollList(w=130, h=460)\n widgets[\"shotListTitleText\"] = cmds.text(l=\"Shot List\", font = \"boldLabelFont\", ann=ann[\"shotList\"])\n widgets[\"shotListCharText\"] = cmds.text(l=\"Shots\")\n\n cmds.formLayout(widgets[\"shotListFLO\"], e=True, af = [\n (widgets[\"shotListTSL\"], \"top\", 40), \n (widgets[\"shotListTSL\"], \"left\", 0),\n (widgets[\"shotListTitleText\"], \"top\", 5),\n (widgets[\"shotListTitleText\"], \"left\", 30),\n (widgets[\"shotListCharText\"], \"top\", 25),\n (widgets[\"shotListCharText\"], \"left\", 5),\n ])\n\n ##############\n #shot Status layout\n ############### \n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotInfoAssListTLO\"] = cmds.tabLayout(w=200, h=500)\n widgets[\"shotInfoFLO\"] = cmds.formLayout(\"ShotInfo\", w=200, h=500, bgc= (.5, .5, .5))\n widgets[\"shotInfoTitleText\"] = cmds.text(l=\"Shot Information\", font = \"boldLabelFont\")\n widgets[\"shotInfoNameText\"] = cmds.text(l=\"<Shot Name>\", font = \"boldLabelFont\", al=\"center\", w=200)\n widgets[\"shotInfoVariantText\"] = cmds.text(l=\"<Var Name>\", font = \"boldLabelFont\", al=\"center\", w=200) \n widgets[\"shotInfoPic\"] = cmds.image(image = \"{0}/kitten-photo-632-3.jpg\".format(pi.images), w= 154, h=154)\n widgets[\"shotAnnCB\"] = cmds.checkBox(l=\"Tooltips popups?\", value=tooltips, changeCommand=tooltipSet)\n\n cmds.formLayout(widgets[\"shotInfoFLO\"], e=True, af =[\n (widgets[\"shotInfoNameText\"], \"top\", 60),\n (widgets[\"shotInfoNameText\"], \"left\", 0),\n (widgets[\"shotInfoVariantText\"], \"top\", 80),\n (widgets[\"shotInfoVariantText\"], \"left\", 0), \n (widgets[\"shotInfoPic\"], \"top\", 110),\n (widgets[\"shotInfoPic\"], \"left\", 23),\n (widgets[\"shotInfoTitleText\"], \"top\", 5),\n (widgets[\"shotInfoTitleText\"], \"left\", 35),\n (widgets[\"shotAnnCB\"], \"top\", 420),\n (widgets[\"shotAnnCB\"], \"left\", 50), \n ])\n\n cmds.setParent(widgets[\"shotInfoAssListTLO\"])\n widgets[\"shotAssRigListTLO\"] = cmds.tabLayout(\"ProjAssets\", w=200, h=500) \n widgets[\"shotAssRigCharListCLO\"] = cmds.columnLayout(\"Chars\", w=200, h=500)\n widgets[\"shotAssRigCharListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigPropListCLO\"] = cmds.columnLayout(\"Props\", w=200, h=500)\n widgets[\"shotAssRigPropListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigSetListCLO\"] = cmds.columnLayout(\"Sets\", w=200, h=500)\n widgets[\"shotAssRigSetListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAnmMstListCLO\"] = cmds.columnLayout(\"Anm\", w=200, h=500)\n widgets[\"shotAnmMstListTSL\"] = cmds.textScrollList(w=200, h=450) \n ###############\n #Shot Action layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotActionFLO\"] = cmds.formLayout(w=150, h=500, bgc =(.5, .5, .5))\n widgets[\"shotActionRefAssBut\"] = cmds.button(l=\"-> Ref Asset In ->\", w=130, h=20, bgc = (.7,.7,.7), c=referenceAsset, ann=ann[\"refAsset\"]) \n widgets[\"shotActionReplaceBut\"] = cmds.button(l=\"-> Replace Reference ->\", w=130, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"replace\"], c=replaceReference)\n widgets[\"shotActionRefMultBut\"] = cmds.button(l=\"-> Ref Multiple ->\", w=100, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"refMult\"], c=referenceMultiple)\n widgets[\"shotActionRefMultIFG\"] = cmds.intFieldGrp(w=20, v1=1)\n widgets[\"shotActionReloadBut\"] = cmds.button(l=\"Reload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=reloadReference, ann=ann[\"reload\"])\n widgets[\"shotActionUnloadBut\"] = cmds.button(l=\"Unload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=unloadReference, ann=ann[\"unload\"])\n widgets[\"shotActionRemoveBut\"] = cmds.button(l=\"Remove Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=removeReference, ann=ann[\"remove\"])\n widgets[\"shotActionQIncrBut\"] = cmds.button(l=\"Quick Increment\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=quickIncrement, ann=ann[\"qkIncr\"])\n widgets[\"shotActionNewShotBut\"] = cmds.button(l=\"Create new shot\", en=True, w=130, h=20, bgc = (.7,.7,.7), c=createNewShot, ann=ann[\"crtShot\"]) \n widgets[\"shotActionTitle\"] = cmds.text(l=\"Shot Actions\", font = \"boldLabelFont\")\n\n # create an embedded tab layout for each type of button!\n widgets[\"shotActionTypeTLO\"] = cmds.tabLayout(\"Specific Actions\", w=150, h=180, bgc=(.2,.2,.2))\n\n widgets[\"shotActionTypeAnmSLO\"] = cmds.scrollLayout(\"Anm\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeAnmFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .45, .4))\n widgets[\"shotActionExpAnimBut\"] = cmds.button(l=\"Export Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=exportAnimation, ann=ann[\"expAnim\"])\n widgets[\"shotActionImpAnimBut\"] = cmds.button(l=\"Import Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=importAnimation, ann=ann[\"impAnim\"])\n widgets[\"shotActionRefToBut\"] = cmds.button(l=\"-> Reference To\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=referenceTo, ann=ann[\"refTo\"])\n widgets[\"shotActionCtrlMkBut\"] = cmds.button(l=\"Ctrl On Selection\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=controlMaker, ann=ann[\"ctrlMk\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeLgtSLO\"] = cmds.scrollLayout(\"Lgt\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeLgtFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .4, .45))\n widgets[\"shotActionGenericBut\"] = cmds.button(l=\"Render Setup\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=renderSetup, ann=ann[\"rendGen\"])\n\n widgets[\"shotActionMtlBut\"] = cmds.button(l=\"-> Apply Mtl To Sel ->\", w=130, h=20, en=False, bgc = (.7,.7,.7), ann=ann[\"mtlApply\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeFxSLO\"] = cmds.scrollLayout(\"Fx\", w=150, h=240, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeFxFLO\"] = cmds.formLayout(w=150,h=180, bgc=(.45, .4, .4))\n \n\n#---------------- add any fx buttons here and then postion them below \n\n cmds.formLayout(widgets[\"shotActionTypeLgtFLO\"], e=True, af = [\n (widgets[\"shotActionGenericBut\"], \"top\", 10),\n (widgets[\"shotActionGenericBut\"], \"left\", 2),\n (widgets[\"shotActionMtlBut\"], \"top\", 40),\n (widgets[\"shotActionMtlBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionTypeAnmFLO\"], e=True, af = [\n (widgets[\"shotActionExpAnimBut\"], \"top\", 10),\n (widgets[\"shotActionExpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionImpAnimBut\"], \"top\", 40),\n (widgets[\"shotActionImpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionRefToBut\"], \"top\", 70),\n (widgets[\"shotActionRefToBut\"], \"left\", 2),\n (widgets[\"shotActionCtrlMkBut\"], \"top\", 100),\n (widgets[\"shotActionCtrlMkBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionFLO\"], e=True, af = [\n (widgets[\"shotActionTitle\"], \"top\", 5),\n (widgets[\"shotActionTitle\"], \"left\", 35),\n (widgets[\"shotActionRefAssBut\"], \"top\", 30),\n (widgets[\"shotActionRefAssBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultBut\"], \"top\", 60),\n (widgets[\"shotActionRefMultBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultIFG\"], \"top\", 60),\n (widgets[\"shotActionRefMultIFG\"], \"left\", 110),\n (widgets[\"shotActionReloadBut\"], \"top\", 90),\n (widgets[\"shotActionReloadBut\"], \"left\", 10),\n (widgets[\"shotActionUnloadBut\"], \"top\", 120),\n (widgets[\"shotActionUnloadBut\"], \"left\", 10),\n (widgets[\"shotActionRemoveBut\"], \"top\", 150),\n (widgets[\"shotActionRemoveBut\"], \"left\", 10),\n (widgets[\"shotActionReplaceBut\"], \"top\", 180),\n (widgets[\"shotActionReplaceBut\"], \"left\", 10),\n (widgets[\"shotActionQIncrBut\"], \"top\", 210),\n (widgets[\"shotActionQIncrBut\"], \"left\", 10),\n (widgets[\"shotActionTypeTLO\"], \"top\", 270),\n (widgets[\"shotActionTypeTLO\"], \"left\", 0), \n (widgets[\"shotActionNewShotBut\"], \"top\", 470),\n (widgets[\"shotActionNewShotBut\"], \"left\", 10), \n ])\n\n ###############\n #Shot anmLgt tab layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"anmLgtFLO\"] = cmds.formLayout(w=250, h=500, bgc = (.4, .4, .4))\n widgets[\"anmLgtTLO\"] = cmds.tabLayout(w=250, h=500, bgc = (.4,.4,.4), changeCommand = varTabChange)\n ###############\n #shot anm tab layout\n ###############\n widgets[\"anmTabCLO\"] = cmds.columnLayout(\"ANM\", w=250, bgc = (.4, .45, .4))\n #################\n #anm info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"anmVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"anmLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"anmLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n cmds.separator(h=5)\n\n #################\n #anm 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmWSFLO\"] = cmds.frameLayout(\"Animation Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"anmWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.45,.4))\n\n widgets[\"anmWSOpenBut\"] = cmds.button(l=\"Open Latest\\nAnim\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"anmWSIncrBut\"] = cmds.button(l=\"Increment Anim Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), ann=ann[\"incrWS\"], c = partial(incrementWorkshop, \"anm\"))\n widgets[\"anmWSPrevBut\"] = cmds.button(l=\"Previous Anim Workshops\", w=160, bgc = (.7,.7,.7), en=False, ann=ann[\"prevWS\"])\n widgets[\"anmWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"anmWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"anm\"), ann=ann[\"crtVariant\"])\n widgets[\"anmVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, bgc = (.7,.7,.7), en=False, c=createShotIcon, ann=ann[\"crtIcon\"]) \n\n cmds.formLayout(widgets[\"anmWSFoLO\"], e=True, af = [\n (widgets[\"anmWSOpenBut\"], \"left\", 5),\n (widgets[\"anmWSOpenBut\"], \"top\", 10),\n (widgets[\"anmWSIncrBut\"], \"left\", 80),\n (widgets[\"anmWSIncrBut\"], \"top\", 10),\n (widgets[\"anmWSInfoBut\"], \"left\", 5),\n (widgets[\"anmWSInfoBut\"], \"top\", 65),\n (widgets[\"anmWSPrevBut\"], \"left\", 80),\n (widgets[\"anmWSPrevBut\"], \"top\", 65),\n (widgets[\"anmWSNewVarBut\"], \"left\", 5),\n (widgets[\"anmWSNewVarBut\"], \"top\", 105),\n (widgets[\"anmVarIconBut\"], \"left\", 170),\n (widgets[\"anmVarIconBut\"], \"top\", 105), \n ])\n #################\n #anm 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmMstFLO\"] = cmds.frameLayout(\"Animation Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"anmMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.45,.4))\n widgets[\"anmMstOpenBut\"] = cmds.button(l=\"Open Anim\\nMaster\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"anmMstIncrBut\"] = cmds.button(l=\"Publish Anim Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"anmMstBgIncrBut\"] = cmds.button(l=\"BG Publish Anim Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"])\n widgets[\"anmMstPrevBut\"] = cmds.button(l=\"Previous Anim Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"anmMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"])\n\n\n \n cmds.formLayout(widgets[\"anmMstFoLO\"], e=True, af = [\n (widgets[\"anmMstOpenBut\"], \"left\", 5),\n (widgets[\"anmMstOpenBut\"], \"top\", 10),\n (widgets[\"anmMstIncrBut\"], \"left\", 80),\n (widgets[\"anmMstIncrBut\"], \"top\", 10),\n (widgets[\"anmMstBgIncrBut\"], \"left\", 5),\n (widgets[\"anmMstBgIncrBut\"], \"top\", 65), \n (widgets[\"anmMstInfoBut\"], \"left\", 5),\n (widgets[\"anmMstInfoBut\"], \"top\", 95), \n (widgets[\"anmMstPrevBut\"], \"left\", 80),\n (widgets[\"anmMstPrevBut\"], \"top\", 95), \n \n ])\n ###############\n #shot Lgt tab layout\n ################ \n cmds.setParent(widgets[\"anmLgtTLO\"]) \n widgets[\"lgtTabCLO\"] = cmds.columnLayout(\"LGT\", w=250, bgc = (.4,.4,.45))\n #################\n #lgt info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"lgtVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"lgtLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"lgtLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtWSFLO\"] = cmds.frameLayout(\"Lighting Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"lgtWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.4,.45))\n\n widgets[\"lgtWSOpenBut\"] = cmds.button(l=\"Open Latest\\nLight\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"lgtWSIncrBut\"] = cmds.button(l=\"Increment Light Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"lgt\"), ann=ann[\"incrWS\"])\n widgets[\"lgtWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"])\n widgets[\"lgtWSPrevBut\"] = cmds.button(l=\"Previous Light Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"lgtWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"lgt\"), ann=ann[\"crtVariant\"]) \n widgets[\"lgtVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"])\n\n cmds.formLayout(widgets[\"lgtWSFoLO\"], e=True, af = [\n (widgets[\"lgtWSOpenBut\"], \"left\", 5),\n (widgets[\"lgtWSOpenBut\"], \"top\", 10),\n (widgets[\"lgtWSIncrBut\"], \"left\", 80),\n (widgets[\"lgtWSIncrBut\"], \"top\", 10),\n (widgets[\"lgtWSInfoBut\"], \"left\", 5),\n (widgets[\"lgtWSInfoBut\"], \"top\", 65),\n (widgets[\"lgtWSPrevBut\"], \"left\", 80),\n (widgets[\"lgtWSPrevBut\"], \"top\", 65),\n (widgets[\"lgtWSNewVarBut\"], \"left\", 5),\n (widgets[\"lgtWSNewVarBut\"], \"top\", 105),\n (widgets[\"lgtVarIconBut\"], \"left\", 170),\n (widgets[\"lgtVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtMstFLO\"] = cmds.frameLayout(\"Lighting Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"lgtMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.4,.45))\n widgets[\"lgtMstOpenBut\"] = cmds.button(l=\"Open\\nLight Master\", w=70, h=50, en=True, bgc = (.5,.7,.5), c=partial(openShotMaster, \"lgt\"), ann=ann[\"openMst\"])\n widgets[\"lgtMstIncrBut\"] = cmds.button(l=\"Publish Light Master\\n(Keep Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"lgtMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"lgtMstPrevBut\"] = cmds.button(l=\"Previous Light Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"lgtMstBgIncrBut\"] = cmds.button(l=\" BG Publish Light Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"lgtMstFoLO\"], e=True, af = [\n (widgets[\"lgtMstOpenBut\"], \"left\", 5),\n (widgets[\"lgtMstOpenBut\"], \"top\", 10),\n (widgets[\"lgtMstIncrBut\"], \"left\", 80),\n (widgets[\"lgtMstIncrBut\"], \"top\", 10),\n (widgets[\"lgtMstBgIncrBut\"], \"left\", 5),\n (widgets[\"lgtMstBgIncrBut\"], \"top\", 65), \n (widgets[\"lgtMstInfoBut\"], \"left\", 5),\n (widgets[\"lgtMstInfoBut\"], \"top\", 95),\n (widgets[\"lgtMstPrevBut\"], \"left\", 80),\n (widgets[\"lgtMstPrevBut\"], \"top\", 95),\n \n ]) \n\n ###############\n #shot anm tab layout\n ###############\n cmds.setParent(widgets[\"anmLgtTLO\"])\n widgets[\"fxTabCLO\"] = cmds.columnLayout(\"FX\", w=250, bgc = (.45, .4, .4))\n #################\n #fx info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"fxVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"fxLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"fxLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxWSFLO\"] = cmds.frameLayout(\"FX Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"fxWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.45,.4,.4))\n\n widgets[\"fxWSOpenBut\"] = cmds.button(l=\"Open Latest\\nFX\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"fxWSIncrBut\"] = cmds.button(l=\"Increment FX Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"fx\"), ann=ann[\"incrWS\"])\n widgets[\"fxWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"fxWSPrevBut\"] = cmds.button(l=\"Previous FX Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"fxWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"fx\"), ann=ann[\"crtVariant\"])\n widgets[\"fxVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"]) \n \n cmds.formLayout(widgets[\"fxWSFoLO\"], e=True, af = [\n (widgets[\"fxWSOpenBut\"], \"left\", 5),\n (widgets[\"fxWSOpenBut\"], \"top\", 10),\n (widgets[\"fxWSIncrBut\"], \"left\", 80),\n (widgets[\"fxWSIncrBut\"], \"top\", 10),\n (widgets[\"fxWSInfoBut\"], \"left\", 5),\n (widgets[\"fxWSInfoBut\"], \"top\", 65),\n (widgets[\"fxWSPrevBut\"], \"left\", 80),\n (widgets[\"fxWSPrevBut\"], \"top\", 65),\n (widgets[\"fxWSNewVarBut\"], \"left\", 5),\n (widgets[\"fxWSNewVarBut\"], \"top\", 105),\n (widgets[\"fxVarIconBut\"], \"left\", 170),\n (widgets[\"fxVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxMstFLO\"] = cmds.frameLayout(\"FX Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"fxMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.45,.4,.4))\n widgets[\"fxMstOpenBut\"] = cmds.button(l=\"Open\\nFX Master\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"fxMstIncrBut\"] = cmds.button(l=\"Publish FX Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"fxMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"fxMstPrevBut\"] = cmds.button(l=\"Previous FX Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"fxMstBgIncrBut\"] = cmds.button(l=\" BG Publish FX Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"fxMstFoLO\"], e=True, af = [\n (widgets[\"fxMstOpenBut\"], \"left\", 5),\n (widgets[\"fxMstOpenBut\"], \"top\", 10),\n (widgets[\"fxMstIncrBut\"], \"left\", 80),\n (widgets[\"fxMstIncrBut\"], \"top\", 10),\n (widgets[\"fxMstBgIncrBut\"], \"left\", 5),\n (widgets[\"fxMstBgIncrBut\"], \"top\", 65), \n (widgets[\"fxMstInfoBut\"], \"left\", 5),\n (widgets[\"fxMstInfoBut\"], \"top\", 95),\n (widgets[\"fxMstPrevBut\"], \"left\", 80),\n (widgets[\"fxMstPrevBut\"], \"top\", 95),\n \n ]) \n\n\n cmds.setParent(widgets[\"anmLgtFLO\"])\n widgets[\"anmLgtTitleText\"] = cmds.text(l=\"Variant Files\", font = \"boldLabelFont\", ann=ann[\"varFile\"]) \n\n cmds.formLayout(widgets[\"anmLgtFLO\"], e=True, af = [(widgets[\"anmLgtTitleText\"], \"top\", 5), (widgets[\"anmLgtTitleText\"], \"left\", 135)])\n\n ###################\n # - -- Shot Tab form setup\n ##################\n cmds.formLayout(widgets[\"shotsFLO\"], e=True, af = [\n (widgets[\"shotListCLO\"], \"left\", 0),\n (widgets[\"shotListCLO\"], \"top\", 0),\n (widgets[\"anmLgtFLO\"], \"left\", 134),\n (widgets[\"anmLgtFLO\"], \"top\", 0), \n (widgets[\"shotInfoAssListTLO\"], \"left\", 387),\n (widgets[\"shotInfoAssListTLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"left\", 594),\n (widgets[\"shotAssListCLO\"], \"top\", 0),\n (widgets[\"shotAssListCLO\"], \"left\", 752)\n ])\n\n ################\n #Misc tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"miscFLO\"] = cmds.formLayout(\"Other Shot Tools\",width=1000, height=500, backgroundColor = (.4,.4,.4))\n\n widgets[\"animationTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .35, .3))\n widgets[\"animationRCLO\"] = cmds.rowColumnLayout(\"animation\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"lightingTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .32, .35))\n widgets[\"lightingRCLO\"] = cmds.rowColumnLayout(\"lighting\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5]) \n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"fxTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.35, .3, .3))\n widgets[\"fxRCLO\"] = cmds.rowColumnLayout(\"fx\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"charlexTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.55, .55, .55))\n widgets[\"charlexRCLO\"] = cmds.rowColumnLayout(\"charlex_general\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.formLayout(widgets[\"miscFLO\"], e=True, af=[\n (widgets[\"charlexTLO\"], \"top\", 0),\n (widgets[\"charlexTLO\"], \"left\", 0),\n (widgets[\"animationTLO\"], \"top\", 0),\n (widgets[\"animationTLO\"], \"left\", 500),\n (widgets[\"lightingTLO\"], \"top\", 250),\n (widgets[\"lightingTLO\"], \"left\", 0),\n (widgets[\"fxTLO\"], \"top\", 250),\n (widgets[\"fxTLO\"], \"left\", 500) \n ])\n\n # get the dictionary of scripts, calls and annotations from the database\n dbPath =os.path.join(os.getenv(\"MAYA_ROOT\"), \"scripts\", \"chrlx_pipe\", \"chrlxScriptList.json\")\n with open(dbPath, \"r\") as f:\n scriptList = json.load(f)\n\n # populate the row column layouts with buttons and funcs from the database\n btl.buttonsToLayout(widgets[\"animationRCLO\"], scriptList[\"shot\"][\"animation\"], width=117, height=40, color=(.38, .3, .38))\n btl.buttonsToLayout(widgets[\"lightingRCLO\"], scriptList[\"shot\"][\"lighting\"], width=117, height=40, color=(.37,.34, .3))\n btl.buttonsToLayout(widgets[\"fxRCLO\"], scriptList[\"shot\"][\"fx\"], width=117, height=40, color=(.35, .3, .3))\n btl.buttonsToLayout(widgets[\"charlexRCLO\"], scriptList[\"shot\"][\"charlex\"], width=117, height=40, color=(.3, .3, .3))\n\n # widgets[\"miscCLO\"] = cmds.columnLayout(\"Other Pipeline Tools\",w=1000, h=500, bgc = (.4,.4,.4))\n # cmds.text(l=\"------ANIM STUFF-------\")\n # cmds.text(l=\"TODO - export cam(s) for nuke, etc\")\n # cmds.text(l=\"TODO - create a new prop from selected geo (propify)\") \n # cmds.text(l=\"TODO - blasting, rendering stuff?\")\n # cmds.text(l=\"TODO - export data (text file of scene locations?)\")\n # cmds.text(l=\"TODO - create render cam? Should this be in the main anim increment? (probably both)\")\n\n # cmds.text(l=\"------LGT STUFF--------\")\n # cmds.text(l=\"TODO - set up current scene for maxwell, arnold\")\n # cmds.text(l=\"TODO - convert an external image to icon (char or project)\")\n # cmds.text(l=\"TODO - revert ['ROLL BACK'] to master version? (replaces master and grabs that workshop\")\n # cmds.text(l=\"TODO - function to add your folder to the WIP folder in this project - save current to WIP folder\")\n # cmds.text(l=\"TODO - explore various frame (render) folders in explorer\")\n # cmds.text(l=\"TODO - various preset light setups/rigs? \")\n\n\n ######################\n #show window\n ######################\n cmds.window(widgets[\"win\"], e=True, w=1000, h=580)\n cmds.showWindow(widgets[\"win\"])\n\n #start us off\n populateWindow()", "def Window(self, w):\r\n\r\n self.window = w\r\n return self", "def access_window(self):\n return self._access_window", "def createWindow():\n\n windowName = \"ObjectSpawner\"\n\n if cmds.window(windowName, query=True, exists=True):\n cmds.deleteUI(windowName)\n\n cmds.window(windowName)\n\n populateUI()\n enableEditorDrop()\n\n cmds.showWindow(windowName)", "def get_window_info (self):\n \n # g.trace(self.w,self.h,self.x,self.y)\n \n return self.w,self.h,self.x,self.y", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def GetWindow(self):\r\n\r\n return self._wnd", "def main_window(self) -> MainWindow:\n return self._main_window", "def show_create(self):\n\t\t# Get a rectangle with amargin.\n\t\trect = self.renderer._get_rect()\n\t\trect = (rect[0] + 16, rect[1] + 16, rect[2] - 16, rect[3] - 16)\n\n\t\tself.f_tab = ow.Table(4, 2)\n\t\tself.f_tab.topleft = (rect[0], rect[1])\n\n\t\t# Name of the game textbox.\n\t\tself.e_gamename = ow.Entry(\"Ship Wreckyard\")\n\t\tself.l_gamename = ow.Label(\"Name of the game: \")\n\t\tself.f_tab.add_child(0, 0, self.l_gamename)\n\t\tself.f_tab.add_child(0, 1, self.e_gamename)\n\n\t\t# Number of players.\n\t\tself.e_players = ow.Entry(\"2\")\n\t\tself.l_players = ow.Label(\"Number of players: \")\n\t\tself.f_tab.add_child(1, 0, self.l_players)\n\t\tself.f_tab.add_child(1, 1, self.e_players)\n\n\t\t# Board size.\n\t\tself.l_boardw = ow.Label(\"Board width: \")\n\t\tself.e_boardw = ow.Entry(\"10\")\n\t\tself.l_boardh = ow.Label(\"Board height: \")\n\t\tself.e_boardh = ow.Entry(\"10\")\n\t\tself.f_tab.add_child(2, 0, self.l_boardw)\n\t\tself.f_tab.add_child(2, 1, self.e_boardw)\n\t\tself.f_tab.add_child(3, 0, self.l_boardh)\n\t\tself.f_tab.add_child(3, 1, self.e_boardh)\n\n\t\t# Create Game button.\n\t\tself.b_cancel = ow.Button(\"Cancel\")\n\t\tself.b_cancel.topleft = (rect[2] - self.b_cancel.width - 100, rect[3] - self.b_cancel.height)\n\t\tself.b_cancel.connect_signal(oc.SIG_CLICKED, self.do_lobby)\n\n\t\t# Cancel button.\n\t\tself.b_create = ow.Button(\"Start Game\")\n\t\tself.b_create.topleft = (rect[2] - self.b_create.width, rect[3] - self.b_create.height)\n\t\tself.b_create.connect_signal(oc.SIG_CLICKED, self.do_start_hosted)\n\n\t\t# Add all the widgets.\n\t\tself.renderer.add_widget(self.f_tab)\n\t\tself.renderer.add_widget(self.b_create)\n\t\tself.renderer.add_widget(self.b_cancel)", "def window(windowX, windowY, occurrency):\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0", "def _getMayaWindow():\n\n ptr = OpenMayaUI.MQtUtil.mainWindow ()\n if ptr is not None:\n return wrapInstance (long (ptr), QMainWindow)", "def create(self):\n\n cv2.namedWindow(winname=self.title, flags=self.style)", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def create_screen(self, width, height):", "def __window_print(self):\n pass", "def win_game(self):\n wingame = GLabel('YOU WIN! :D')\n wingame.font = '-50'\n self.window.add(wingame, x=self.window.width / 6, y=self.window.height * 0.666)", "def renderWindows(XWindow, YWindow, occurrencyWindow, windowModel = False):\n\t\tdef renderDoors(XDoor, YDoor, occurrencyDoor, doorModel = False):\n\t\t\t\"\"\"\n\t\t\trenderWindows accept the door's cells and the occurrency, and optionally a door generating function \n\t\t\t\"\"\"\n\t\t\tdef renderRoof(vertices, pitchAngle, height):\n\t\t\t\t\"\"\"\n\t\t\t\trenderRoof accept the vertices of the base roof, a pitch angle and the desired height \n\t\t\t\tof the roof\n\t\t\t\t\"\"\"\n\t\t\t\tdef renderLadder(ladderHeight, interStep, riser):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\trenderLadder is the inner function used to assembly all together, it takes the \n\t\t\t\t\tdesired height of the ladder, an interstep between two step and a riser for the single\n\t\t\t\t\tstep.\n\t\t\t\t\t\"\"\"\n\n\t\t\t\t\t#building the ladder model and the ladder box\n\t\t\t\t\tladderModel = ladder.make_ladder(ladderHeight, interStep, riser)\n\t\t\t\t\twith open(\"lines/ladder.lines\", \"rb\") as ladderFile:\n\t\t\t\t\t\treader = csv.reader(ladderFile, delimiter=\",\")\n\t\t\t\t\t\trow = next(reader)\n\t\t\t\t\t\tladderModel = T([1,2])([float(row[0])*xfactor, float(row[1])*yfactor])(ladderModel)\n\t\t\t\t\tladderBOX = CUBOID([SIZE([1])(ladderModel)[0]/xfactor,SIZE([2])(ladderModel)[0]/yfactor, SIZE([3])(ladderModel)[0]/zfactor])\n\t\t\t\t\tladderBOX = T([1,2])([float(row[0])-SIZE([1])(ladderBOX)[0]/2., float(row[1])-SIZE([2])(ladderBOX)[0]/2.])(ladderBOX)\n\n\t\t\t\t\t#building roof model\n\t\t\t\t\tif isinstance(vertices, basestring):\n\t\t\t\t\t\twith open(\"lines/\" + vertices + \".lines\", \"rb\") as file:\n\t\t\t\t\t\t\treader = csv.reader(file, delimiter=\",\")\n\t\t\t\t\t\t\tnewVertices = []\n\t\t\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\t\t\tnewVertices.append([float(row[0]), float(row[1])])\n\t\t\t\t\tif newVertices:\n\t\t\t\t\t\troofModel = roof.roofBuilder(newVertices, pitchAngle, height)\n\t\t\t\t\telse:\n\t\t\t\t\t\troofModel = roof.roofBuilder(vertices, pitchAngle, height)\n\t\t\t\t\troofModel = T([3])([nStorey*3/zfactor])(roofModel)\n\t\t\t\t\troofModel = S([1,2,3])([xfactor*1.09,yfactor*1.09,zfactor])(roofModel)\n\t\t\t\t\troofModel = T([1,2])([-SIZE([1])(roofModel)[0]*0.05,-SIZE([2])(roofModel)[0]*0.05]) (roofModel)\n\n\t\t\t\t\t#building full house model with windows and doors\n\t\t\t\t\tfullHouse = []\n\t\t\t\t\tfor story in range(nStorey):\n\t\t\t\t\t\thouseModel = house.build_house(story, windowModel, doorModel, ladderBOX)\n\t\t\t\t\t\tfullHouse.append(houseModel)\n\t\t\t\t\t\tfullHouse.append(T([3])([3]))\n\t\t\t\t\tfullHouse = STRUCT(fullHouse)\n\n\t\t\t\t\t#returning the result\n\t\t\t\t\treturn STRUCT([roofModel, ladderModel, fullHouse])\n\n\t\t\t\treturn renderLadder\n\n\t\t\treturn renderRoof\n\n\t\treturn renderDoors", "def winScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_COMPLETE:\n label = GLabel(text=\"Congratulations! You win!\", x = GAME_WIDTH/2,\n y = 50, font_size = 50, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def getWidgetClass(self):\n\t\treturn AbstraccionWindowWidget", "def build_window(self):\n\n main_frame = tk.Frame(self.root)\n main_frame.pack(fill='both')\n\n self.open_machine_learner_window_button = tk.Button(main_frame, text=\"Open Machine Learner\")\n self.open_machine_learner_window_button.bind('<Button-1>', self.open_machine_learner_window)\n self.open_machine_learner_window_button.pack(side=\"left\")\n\n self.open_web_crawler_window_button = tk.Button(main_frame, text=\"Open Web Crawler\")\n self.open_web_crawler_window_button.bind('<Button-1>', self.open_web_crawler_window)\n self.open_web_crawler_window_button.pack(side=\"left\")\n\n self.open_webpage_classifier_window_button = tk.Button(main_frame, text=\"Open WebPage Classifier\")\n self.open_webpage_classifier_window_button.bind('<Button-1>', self.open_webpage_classifier_window)\n self.open_webpage_classifier_window_button.pack(side=\"left\")\n\n self.run_steady_state_genetic_button = tk.Button(main_frame, text=\"Run Steady State\")\n self.run_steady_state_genetic_button.bind('<Button-1>', self.run_steady_state)\n self.run_steady_state_genetic_button.pack(side=\"left\")\n\n # Protocol for closing window using 'x' button\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing_event)", "def getWin(self):\n return self.__win", "def __handle_view_win_condition(self, gamestate_component):", "def get_active_window(self): # real signature unknown; restored from __doc__\n pass", "def create_window(self):\r\n pos_x = self.root.winfo_x()\r\n pos_y = self.root.winfo_y()\r\n\r\n about_window = tk.Toplevel(self)\r\n about_window.geometry('380x345' + f\"+{pos_x + 250}+{pos_y + 100}\")\r\n about_window.iconbitmap('icon.ico')\r\n about_window.resizable(False, False)\r\n\r\n # creates an 'Ok' buttons that allow the user to closes the About window\r\n ok_btn = HoverButton(about_window, text=\"Ok\", height=1, width=6, command=about_window.destroy)\r\n ok_btn.grid(row=3, column=0, sticky=tk.E, padx=10, pady=5)\r\n\r\n about_label = tk.Label(about_window, text=\"Version Changes:\", )\r\n about_label.grid(row=1, column=0, sticky=tk.W, padx=10, pady=5)\r\n\r\n about_frame = tk.Frame(about_window)\r\n about_frame.grid(row=2, column=0, sticky=tk.W, padx=10, pady=5)\r\n\r\n text_box = tk.Text(about_frame, height=17, width=46, font=(\"Calibri\", 10))\r\n text_box.grid(row=2, column=0, sticky=tk.W, padx=5)\r\n changes = open(\"credit.txt\").read()\r\n text_box.insert(tk.END, changes)\r\n\r\n # adds a scrollbar for easier navigation for quicker viewing of version changes\r\n scrollbar = tk.Scrollbar(about_frame, command=text_box.yview)\r\n text_box.config(yscrollcommand=scrollbar.set, state=tk.DISABLED)\r\n scrollbar.grid(row=2, column=1, sticky='ns')\r\n about_window.transient(self.root)", "def window(*args, width: int = 200, height: int = 200, autosize: bool = False,\n no_resize: bool = False, no_title_bar: bool = False, no_move: bool = False, no_scrollbar: bool = False,\n no_collapse: bool = False, horizontal_scrollbar: bool = False, no_focus_on_appearing: bool = False,\n no_bring_to_front_on_focus: bool = False, menubar: bool = False, no_close: bool = False,\n no_background: bool = False, label: str = '', show: bool = True, collapsed: bool = False,\n modal: bool = False, popup: bool = False,\n on_close: Callable = None, min_size: List[int]=[32, 32], max_size: List[int] = [30000, 30000], id:str=''):\n try:\n\n widget = internal_dpg.add_window(*args, width=width, height=height, autosize=autosize,\n no_resize=no_resize, no_title_bar=no_title_bar, no_move=no_move,\n no_scrollbar=no_scrollbar, no_collapse=no_collapse,\n horizontal_scrollbar=horizontal_scrollbar,\n no_focus_on_appearing=no_focus_on_appearing,\n no_bring_to_front_on_focus=no_bring_to_front_on_focus,\n menubar=menubar, no_close=no_close,\n no_background=no_background, label=label, show=show, \n collapsed=collapsed, on_close=on_close,\n min_size=min_size, max_size=max_size, id=id, modal=modal,\n popup=popup)\n internal_dpg.push_container_stack(widget)\n yield widget\n\n finally:\n internal_dpg.pop_container_stack()", "def get_test_window(self, window_id, parent):\n pass", "def win(self):\n return self._get(\"win\")", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return shiboken2.wrapInstance(long(main_window), PySide2.QtWidgets.QWidget)", "def window(self) -> Optional[pulumi.Input['MaintenanceWindowArgs']]:\n return pulumi.get(self, \"window\")", "def GetMainWindow(self):\r\n \r\n return self._main_win", "def getMayaMainWindow():\n ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(ptr), QtWidgets.QMainWindow)", "def getRenWin(self):\n return self.renWinInteract.GetRenderWindow()", "def popup_add(self, event):\n def callb():\n PhysicsWindow.AddObjectWindow(self.window, event)\n return callb", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def show_window_fields(self):\n self.analyze()\n items = []\n for ba in analyzer.window_actions:\n items.append(\n \"{0} : {1}\".format(\n ba.full_name(), layout_fields(ba)))\n\n return rstgen.ul(items)", "def _window(self, get_lims=False):\n\t\timg_h, img_w = self.od_model.img_shape\n\t\th_llim = 0\n\t\tw_llim = img_w // 3\n\t\th_ulim = img_h - (img_h // 4)\n\t\tw_ulim = 1- wllim\n\n\t\tif get_lims:\n\t\t\treturn (h_llim, h_ulim), (w_llim, w_ulim)\n\n\t\twindow = slice(h_llim, h_ulim), slice(w_llim, w_ulim)\n\t\treturn window", "def build_second_window():\r\n\r\n new_window = tk.Tk()\r\n windows.append(new_window)\r\n new_window.protocol(\"WM_DELETE_WINDOW\", new_round(new_window))\r\n\r\n ask = tk.Label(new_window, text='Would You Like To Play Again?', bg='Cyan')\r\n ask.pack(fill=tk.X)\r\n\r\n frame = tk.Frame(new_window)\r\n frame.pack()\r\n\r\n yes_button = tk.Button(frame, text='Yes', bg='green',\r\n command=new_round(new_window))\r\n yes_button.pack(side=tk.LEFT)\r\n\r\n no_button = tk.Button(frame, text='No', bg='red',\r\n command=close)\r\n no_button.pack(side=tk.LEFT)", "def GetWindow(self):\n\n return self._window", "def __init__(self):\n self.defaultTheme = \"DarkAmber\"\n self.version = 1.4\n self.versionName = \"class update\"\n self.title = \"Lms GUI default window\"\n self.layout = [[sg.Text(\"This is the base window class layout.\")]]\n self.elementJustification = 'c'\n self.location=(500, 300)\n self.running = True\n self.window = None\n self.event = \"\"\n self.values = []\n self.nextAction = None", "def get_help_window(self):\n self.gui.active_window.hide()\n\n self.associated_window = help_window.HelpWindow(self.gui)\n self.gui.active_window = self.associated_window\n\n self.gui.active_window.show()", "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def new_game_function(self):\n\n def new_game(_):\n\n self.console.children = []\n\n self.answer = [random.randint(1, 4) for j in range(4)]\n\n self.trials.children = [wd.HBox([wd.VBox(\n [self.answer_box, self.arrows_box])], layout={'margin': '0px 0px 20px 0px'})]\n self.turn = 0\n for selector in self.selectors.children:\n selector.disabled = False\n self.confirm_button.disabled = False\n self.try_return = {'well_placed': 0, 'misplaced': 0}\n\n for button in self.answer_box.children:\n button.icon = 'question'\n button.style.button_color = '#4B4A4E'\n\n return new_game", "def win(self):\n return \"Win\"", "def root_wdgt(self):\n self.summarize()\n modes = ['Global', 'Single-Image']\n\n def logic(mode):\n # cache the widget later\n if mode == modes[0]:\n if self.global_walk is None:\n self.global_walk = self.global_walk_specifier()\n ipy_display(self.global_walk)\n elif mode == modes[1]:\n self.image_view = self.single_image_selector()\n # if self.image_view is None:\n # self.image_view = self.single_image_selector()\n # ipy_display(self.image_view)\n\n UI = interactive(\n logic, mode=widgets.ToggleButtons(options=modes, value=modes[0])\n )\n UI.children[-1].layout.height = '1000px'\n ipy_display(UI)", "def get_maya_window():\n\twindow = apiui.MQtUtil.mainWindow()\n\tif window is not None:\n\t\treturn shiboken2.wrapInstance(long(window), QtWidgets.QWidget)", "def GetManagedWindow(self):\r\n \r\n return self._frame", "def newwin(self,name,sizeY,sizeX,offsetY,offsetX, border=False):\n\t\tself.windows[name]=Window(sizeY,sizeX,offsetY,offsetX,border)\n\t\treturn self.windows[name]", "def createAboutWindow(self):\n if (not hasattr(self, \"about_window\")):\n self.about_window = AboutWindow(self)\n self.about_window.show()", "def TransferToWindow(self):\n return True", "def createWindow(self):\n\n # create window, set basic attributes\n w = gtk.Window(gtk.WINDOW_TOPLEVEL)\n w.set_size_request(*self.__def_win_size__)\n w.set_decorated(False)\n #w.fullscreen()\n #w.unfullscreen()\n w.set_title(self.__name__)\n w.connect(\"destroy\", gtk.main_quit)\n\n # declare buttons and their associated handlers\n controls = (\n (\"open_button\", gtk.ToolButton(gtk.STOCK_OPEN), self.onPlay),\n (\"play_button\", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),\n (\"stop_button\", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),\n (\"quit_button\", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)\n )\n\n # as well as the container in which to put them\n box = gtk.HButtonBox()\n\n # for every widget, connect to its clicked signal and add it\n # to the enclosing box\n for name, widget, handler in controls:\n widget.connect(\"clicked\", handler)\n box.pack_start(widget, True)\n setattr(self, name, widget)\n\n viewer = gtk.DrawingArea()\n viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)\n\n # we will need this later\n self.xid = None\n\n # now finally do the top-level layout for the window\n layout = gtk.VBox(False)\n layout.pack_start(viewer)\n\n # subclasses can override childWidgets() to supply\n # custom controls\n layout.pack_start(self.customWidgets(), False, False)\n layout.pack_end(box, False, False)\n w.add(layout)\n w.show_all()\n\n # we want to return only the portion of the window which will\n # be used to display the video, not the whole top-level\n # window. a DrawingArea widget is, in fact, an X11 window.\n return viewer", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def current_swing_mode(self):\n return None" ]
[ "0.64761674", "0.63803315", "0.6314744", "0.6216543", "0.62010247", "0.6149478", "0.60766095", "0.60618967", "0.6037659", "0.60210836", "0.60210836", "0.600711", "0.6003741", "0.5992173", "0.59492654", "0.58889663", "0.58503634", "0.57341456", "0.56976503", "0.56692207", "0.56683326", "0.56683326", "0.5647913", "0.5622526", "0.5618607", "0.56168467", "0.5615513", "0.5606547", "0.5605087", "0.5602058", "0.5595254", "0.5594606", "0.5594606", "0.5594606", "0.559301", "0.5587338", "0.5578012", "0.5577042", "0.5575164", "0.55749136", "0.55721724", "0.5571685", "0.5569759", "0.5558504", "0.55582494", "0.55534625", "0.55463964", "0.5531157", "0.5520725", "0.5502822", "0.5492181", "0.54910886", "0.5482239", "0.5479704", "0.54730624", "0.54628146", "0.54606456", "0.5447724", "0.5445508", "0.54398286", "0.5439361", "0.54107577", "0.54013807", "0.5398669", "0.53974915", "0.53945947", "0.53940773", "0.5392592", "0.5391695", "0.5391066", "0.5386261", "0.5369335", "0.536921", "0.53671426", "0.5366132", "0.5364958", "0.5362989", "0.53600204", "0.5358397", "0.5358282", "0.5357012", "0.53247494", "0.53197736", "0.5319302", "0.5317206", "0.5315295", "0.5314823", "0.5312823", "0.5306877", "0.52916443", "0.5281437", "0.5280979", "0.5274443", "0.5270977", "0.5270586", "0.5266586", "0.5260119", "0.52521443", "0.5239343", "0.5237938" ]
0.678502
0
Function that represents the window which Enemy Mods can be applied.
def pallete_window(): path_dir = r'Sor_Mods_Storage\palletes' char_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen palletes = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(palletes, image=mainTitleImg) title = tk.Label(palletes, text="Pallete Mods") comboBox_palletes = ttk.Combobox(palletes, values=list(char_mods_dict.keys())) def apply_pallete_mod(): pallete_selected = comboBox_palletes.get() result_window = tk.Toplevel() value = '' if pallete_selected == '': value = f'{value} Please Select an Pallete to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=pallete_selected, type='palletes') value = f'Enemy Mod {pallete_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(palletes, text='Apply', command=apply_pallete_mod) title.grid(row=0, column=0) comboBox_palletes.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enemy_window():\n path_dir = r'Sor_Mods_Storage\\enemies'\n enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n enemies = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n\n imgRandom_label = tk.Label(enemies, image=mainTitleImg)\n title = tk.Label(enemies, text=\"Enemies Mods\")\n\n comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys()))\n\n def apply_enemy_mod():\n char_selected = comboBox_enemies.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies')\n value = f'Enemy Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod)\n\n title.grid(row=0, column=0)\n comboBox_enemies.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def get_main_window():\n\n pass", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def maya_window():\n return to_qwidget(\"MayaWindow\")", "def stage_window():\n path_dir = r'Sor_Mods_Storage\\stages'\n stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n stages = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(stages, image=mainTitleImg)\n title = tk.Label(stages, text=\"Stage Mods\")\n\n comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys()))\n\n def apply_stage_mod():\n stage_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if stage_selected == '':\n value = f'{value} Please Select an Stage Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages')\n value = f'Enemy Mod {stage_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def GetWindow(self):\r\n\r\n return self.window", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def chars_window():\n path_dir = r'Sor_Mods_Storage\\chars'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n chars = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(chars, image=mainTitleImg)\n title = tk.Label(chars, text=\"Characters Mods\")\n\n comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))\n\n def apply_char_mod():\n char_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')\n value = f'Character Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def show(self, window):\r\n\r\n return", "def window(self):\n return self._window", "def window(self):\n return self._window", "def showWindow(*args, **kwargs)->None:\n pass", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def current_window(self):\n pass", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def draw(self): \n pygame.event.clear()\n self.window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])\n self.paintScreen()\n self.paintAvatar()\n self.paintTags()\n self.paintCustomizeZone()\n self.paintButtons()\n self.window.zOrder = 90000\n self.window.depth = 2\n return self.window", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def automatic_window(self):\n \n #Create window and label\n automatic_window = tk.Toplevel(self)\n windowtext = self.translate('How many days do you want the simulation to run for?') \n automatic_window.title(windowtext)\n automatic_window.config(bg=self.default_background)\n lbl_text = tk.Label(automatic_window, text=windowtext,\n bg=self.default_background)\n lbl_text.grid(column=0, row=0)\n \n #Create input box\n self.auto_var = tk.IntVar()\n self.auto_var.set(1)\n auto_menu = tk.Entry(automatic_window)\n auto_menu.insert(0,0)\n auto_menu.configure(width=5)\n auto_menu.grid(column=0, row=1)\n\n #Create button to initate the simulation\n auto_run_button = tk.Button(automatic_window, text=self.translate('Run Simulation'), \n command = lambda: self.auto_run(automatic_window, int(auto_menu.get())),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n auto_run_button.grid(column=0, row=2)\n \n #Center the window on the screen\n automatic_window.withdraw()\n automatic_window.update_idletasks() # Update \"requested size\" from geometry manager\n x = (self.screenwidth - automatic_window.winfo_reqwidth()) / 2\n y = (self.screenheight - automatic_window.winfo_reqheight()) / 2\n automatic_window.geometry(\"+%d+%d\" % (x, y))\n automatic_window.deiconify()", "def create_board_window():\n wn = turtle.Screen()\n wn.setworldcoordinates(0, 0, WIDTH+1, HEIGHT+1)\n t = turtle.Turtle()\n t.pensize(1)\n t.speed(0)\n t.hideturtle()\n return (wn, t)", "def details_window(self, instance: Union[Nobleman, Location]):\n window = tk.Toplevel()\n window.title(instance.name)\n window.protocol(\"WM_DELETE_WINDOW\",\n partial(self.close_details_window, instance))\n self.register_extra_window(instance, window)\n self.generate_window_content(instance, window)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def renderWindowEditor(*args, autoResize: bool=True, blendMode: Union[int, bool]=0, caption:\n Union[AnyStr, bool]=\"\", changeCommand: Union[List[AnyStr, AnyStr, AnyStr,\n AnyStr], bool]=None, clear: Union[List[int, int, float, float, float],\n bool]=None, cmEnabled: bool=True, colorManage: bool=True, compDisplay:\n Union[int, bool]=0, compImageFile: Union[AnyStr, bool]=\"\", control:\n bool=True, currentCamera: Union[AnyStr, bool]=\"\", currentCameraRig:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", displayImage:\n Union[int, bool]=0, displayImageViewCount: Union[int, bool]=0,\n displayStyle: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr, bool]=\"\",\n doubleBuffer: bool=True, drawAxis: bool=True, editorName: bool=True,\n exists: bool=True, exposure: Union[float, bool]=0.0, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n frameImage: bool=True, frameRegion: bool=True, gamma: Union[float,\n bool]=0.0, highlightConnection: Union[AnyStr, bool]=\"\", loadImage:\n AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", marquee: Union[List[float, float, float, float],\n bool]=None, nbImages: bool=True, nextViewImage: bool=True,\n outputColorManage: bool=True, panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", pcaption: Union[AnyStr, bool]=\"\", realSize:\n bool=True, refresh: bool=True, removeAllImages: bool=True, removeImage:\n bool=True, resetRegion: bool=True, resetViewImage: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectionConnection:\n Union[AnyStr, bool]=\"\", showRegion: Union[List[int, int], bool]=None,\n singleBuffer: bool=True, snapshot: Union[List[AnyStr, int, int],\n bool]=None, snapshotMode: bool=True, stateString: bool=True, stereo:\n Union[int, bool]=0, stereoImageOrientation: Union[List[AnyStr, AnyStr],\n bool]=None, stereoMode: Union[AnyStr, bool]=\"\", toggle: bool=True,\n unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", viewImageCount:\n Union[int, bool]=0, viewTransformName: Union[AnyStr, bool]=\"\",\n writeImage: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def get_window(self):\n if self.isWindow:\n return self\n else:\n return self.window", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window), QtWidgets.QWidget)", "def _get_window_width(self):", "def window(self) -> pulumi.Input['AssetModelMetricWindowArgs']:\n return pulumi.get(self, \"window\")", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n if sys.version_info.major >= 3:\n return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)\n else:\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def get_parent_window(self): # real signature unknown; restored from __doc__\n pass", "def build(theme: str) -> sg.Window:\n\n # yapf: disable\n sg.theme(theme)\n des=['Top 10 de palabras que se encuentran primero de todas las partidas','Porcentaje de partidas por estado (terminada, cancelada,abandonadas)','Porcentaje de partidas finalizadas según género',\n 'Cantidad de partidas que se juegan para cada día de la semana','Promedio de tiempo de partidas finalizadas por nivel.','Porcentaje de palabras encontradas en las partidas timeout.'\n ]\n tab_layout=[[[sg.Text(des[x],font=(f\"{WINDOW_FONT}\", WINDOW_FONT_SIZE))],[sg.Canvas(key=f\"-CANVAS{x}-\")]] for x in range(len(des))]\n\n layout = [[sg.Text(f\"Estadisticas\",font=(WINDOW_TITLE_FONT, WINDOW_FONT_SIZE * 2))],\n [sg.TabGroup([[sg.Tab(f'Gráfico {l+1}',tab_layout[l],element_justification='center') for l in range(len(des))]])],\n [sg.Button(\"Menu\",key=\"-BACK BUTTON-\")]\n ]\n # yapf: enable\n stat_window = sg.Window(\"Stats\",layout,finalize=True,element_justification='center',margins=(10, 10),size=(900, 700))\n info = pd.read_csv(os.path.join(os.getcwd(), GAME_INFO_PATH),encoding='utf-8')\n draw_figure(stat_window['-CANVAS0-'].TKCanvas, top_10_palabras(info))\n stat_window.refresh() #Esta linea permite que se muestre más rápido el primer gráfico, dando tiempo a que se creen los demás\n draw_figure(stat_window['-CANVAS1-'].TKCanvas, partidas_por_estado(info))\n draw_figure(stat_window['-CANVAS2-'].TKCanvas, partidas_por_genero(info))\n draw_figure(stat_window['-CANVAS3-'].TKCanvas, partidas_por_dia(info))\n draw_figure(stat_window['-CANVAS4-'].TKCanvas,promedio_tiempo_por_nivel(info))\n draw_figure(stat_window['-CANVAS5-'].TKCanvas,cant_encontradas_en_timeout(info))\n\n return stat_window", "def win_popup(self):\n content = BoxLayout(orientation='vertical')\n message_label = Label(text=self.win_message)\n button_layer = BoxLayout(orientation='horizontal')\n dismiss_button = Button(text='QUIT', size_hint=(1, 1))\n next_button = Button(id='next', text='NEXT ROUND', size_hint=(1, 1))\n button_layer.add_widget(dismiss_button)\n button_layer.add_widget(next_button)\n content.add_widget(message_label)\n content.add_widget(button_layer)\n popup = Popup(title=self.winner,\n content=content, size_hint=(0.3, 0.25))\n dismiss_button.bind(on_release=(lambda a: self.exit_game()),\n on_press=popup.dismiss)\n next_button.bind(on_release=(lambda a: self.next_round()),\n on_press=popup.dismiss)\n popup.open()", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def menu_screen(win):\n\tpass", "def init_window(self, game, width, height, scale):\n self.controller = game\n self.window.geometry(\"{0}x{1}\".format((width * scale)+5, (height * scale)+5))\n self.window.resizable(False, False)\n\n self.canvas = tk.Canvas(self.window, width=width * scale, height=height * scale)\n self.canvas.grid(row=0, column=0, sticky=\"nesw\")\n\n self.draw_grid(width, height, scale)\n\n self.window.bind(\"<Button-1>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<B1-Motion>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<space>\", lambda a: game.toggle_pause())\n self.window.bind(\"<Return>\", lambda a: game.do_step())\n self.window.bind(\"<BackSpace>\", lambda a: game.reset())\n self.set_menu()", "def getMayaWindow():\n ptr = openmayaui.MQtUtil.mainWindow()\n return wrapInstance(long(ptr), QtWidgets.QMainWindow)", "def window(self):\n return self.attribute('VW')", "def default_window():\n X = [0, .125, 1.4375, 1.5625, 2.9375, 3.0625, 4.4375, 4.5625, 5.875, 6.0]\n Y = [0, .125, 2.875, 3.0]\n Z = [0, .125]\n V, F = True, False\n occupancy = [\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]]\n ]\n return w7.window(X, Y, Z, occupancy)", "def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def popup_info(self, force_object, event):\n self.window.log('window popup called')\n po = force_object\n\n def callb():\n if type(po) == Physics.PhysicsObject:\n fow = PhysicsWindow.PhysicsObjectWindow(self.window, po, event.x, event.y)\n return callb", "def show(self,window):\n self.showFunctions(window)", "def gui(self):\n return gui", "def draw_window_pane():\n houseturtle.begin_fill()\n for y in range(4):\n houseturtle.pendown()\n houseturtle.forward(35)\n houseturtle.left(90)\n houseturtle.penup()\n houseturtle.end_fill()", "def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])", "def getwinsize(self):", "def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h=560)\n\n #######################\n #top bar layout\n #######################\n\n #rowlayout\n widgets[\"bannerFLO\"] = cmds.formLayout(w=1000, h=50, bgc=(.300,.3,.300))\n widgets[\"bannerImage\"] = cmds.image(image=\"{0}/banner_shotWin.png\".format(pi.images))\n widgets[\"spotImage\"] = cmds.iconTextButton(style=\"iconOnly\", image = \"{0}/defaultSpotImage.jpg\".format(pi.images), w=50, h=50, ann=ann[\"spotIcon\"], c=changeSpotIcon)\n widgets[\"projectText\"] = cmds.text(l=\"Project Name: Spot Name\", font = \"boldLabelFont\")\n widgets[\"sceneText\"] = cmds.text(l=\"Current Scene\") \n widgets[\"projectButton\"] = cmds.button(l=\"Change Job\", w = 100, h= 40, bgc= (.5,.5,.5), ann = ann[\"proj\"], c=setProject)\n widgets[\"refreshButton\"] = cmds.button(l=\"Refresh\", w = 60, h= 40, bgc= (.2,.2,.2), c = populateWindow)\n widgets[\"exploreButton\"] = cmds.button(l=\"Explore\\nReference\", w = 60, h= 40, bgc= (.7,.5,.3), c=exploreReference)\n\n cmds.formLayout(widgets[\"bannerFLO\"], e=True, af = [(widgets[\"bannerImage\"], \"top\", 0),\n (widgets[\"bannerImage\"], \"left\", 0),\n (widgets[\"projectText\"], \"left\", 400),\n (widgets[\"projectText\"], \"top\", 5),\n (widgets[\"sceneText\"], \"top\", 25),\n (widgets[\"spotImage\"], \"left\", 335), \n (widgets[\"sceneText\"], \"left\", 400),\n (widgets[\"projectButton\"], \"left\", 740),\n (widgets[\"projectButton\"], \"top\", 5),\n (widgets[\"refreshButton\"], \"left\", 850),\n (widgets[\"refreshButton\"], \"top\", 5),\n (widgets[\"exploreButton\"], \"left\", 920),\n (widgets[\"exploreButton\"], \"top\", 5), \n ])\n\n ######################\n #bottom layout\n ########################\n cmds.setParent(widgets[\"mainCLO\"])\n widgets[\"lowFLO\"] = cmds.formLayout()\n widgets[\"lowTLO\"] = cmds.tabLayout(bgc = (.2, .2, .2 ))\n\n ################\n #shots tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"shotsFLO\"] = cmds.formLayout(\"Shots - Anim, Light and FX\",w=1000, h=500, bgc = (.4,.4,.4))\n \n ##############\n #shot asset List layout\n ###############\n widgets[\"shotAssListCLO\"] = cmds.columnLayout(w=240, bgc = (.5, .5,.5))\n widgets[\"shotAssListFLO\"] = cmds.formLayout(w=240, h= 500)\n widgets[\"shotAssListTSL\"] = cmds.textScrollList(w=240, h=465, ams=True) \n\n widgets[\"shotAssListTitleText\"] = cmds.text(l=\"Referenced Assets In Current Scene\", font = \"boldLabelFont\", al=\"center\", ann=ann[\"reffedAssets\"])\n\n cmds.formLayout(widgets[\"shotAssListFLO\"], e=True, af = [\n (widgets[\"shotAssListTSL\"], \"top\", 35),\n (widgets[\"shotAssListTSL\"], \"left\", 0),\n \n (widgets[\"shotAssListTitleText\"], \"top\", 5),\n (widgets[\"shotAssListTitleText\"], \"left\", 20),\n ])\n\n ##############\n #shot List layout\n ###############\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotListCLO\"] = cmds.columnLayout(w=130, bgc = (.5, .5, .5))\n widgets[\"shotListFLO\"] = cmds.formLayout(w=130, h= 500)\n widgets[\"shotListTSL\"] = cmds.textScrollList(w=130, h=460)\n widgets[\"shotListTitleText\"] = cmds.text(l=\"Shot List\", font = \"boldLabelFont\", ann=ann[\"shotList\"])\n widgets[\"shotListCharText\"] = cmds.text(l=\"Shots\")\n\n cmds.formLayout(widgets[\"shotListFLO\"], e=True, af = [\n (widgets[\"shotListTSL\"], \"top\", 40), \n (widgets[\"shotListTSL\"], \"left\", 0),\n (widgets[\"shotListTitleText\"], \"top\", 5),\n (widgets[\"shotListTitleText\"], \"left\", 30),\n (widgets[\"shotListCharText\"], \"top\", 25),\n (widgets[\"shotListCharText\"], \"left\", 5),\n ])\n\n ##############\n #shot Status layout\n ############### \n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotInfoAssListTLO\"] = cmds.tabLayout(w=200, h=500)\n widgets[\"shotInfoFLO\"] = cmds.formLayout(\"ShotInfo\", w=200, h=500, bgc= (.5, .5, .5))\n widgets[\"shotInfoTitleText\"] = cmds.text(l=\"Shot Information\", font = \"boldLabelFont\")\n widgets[\"shotInfoNameText\"] = cmds.text(l=\"<Shot Name>\", font = \"boldLabelFont\", al=\"center\", w=200)\n widgets[\"shotInfoVariantText\"] = cmds.text(l=\"<Var Name>\", font = \"boldLabelFont\", al=\"center\", w=200) \n widgets[\"shotInfoPic\"] = cmds.image(image = \"{0}/kitten-photo-632-3.jpg\".format(pi.images), w= 154, h=154)\n widgets[\"shotAnnCB\"] = cmds.checkBox(l=\"Tooltips popups?\", value=tooltips, changeCommand=tooltipSet)\n\n cmds.formLayout(widgets[\"shotInfoFLO\"], e=True, af =[\n (widgets[\"shotInfoNameText\"], \"top\", 60),\n (widgets[\"shotInfoNameText\"], \"left\", 0),\n (widgets[\"shotInfoVariantText\"], \"top\", 80),\n (widgets[\"shotInfoVariantText\"], \"left\", 0), \n (widgets[\"shotInfoPic\"], \"top\", 110),\n (widgets[\"shotInfoPic\"], \"left\", 23),\n (widgets[\"shotInfoTitleText\"], \"top\", 5),\n (widgets[\"shotInfoTitleText\"], \"left\", 35),\n (widgets[\"shotAnnCB\"], \"top\", 420),\n (widgets[\"shotAnnCB\"], \"left\", 50), \n ])\n\n cmds.setParent(widgets[\"shotInfoAssListTLO\"])\n widgets[\"shotAssRigListTLO\"] = cmds.tabLayout(\"ProjAssets\", w=200, h=500) \n widgets[\"shotAssRigCharListCLO\"] = cmds.columnLayout(\"Chars\", w=200, h=500)\n widgets[\"shotAssRigCharListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigPropListCLO\"] = cmds.columnLayout(\"Props\", w=200, h=500)\n widgets[\"shotAssRigPropListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigSetListCLO\"] = cmds.columnLayout(\"Sets\", w=200, h=500)\n widgets[\"shotAssRigSetListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAnmMstListCLO\"] = cmds.columnLayout(\"Anm\", w=200, h=500)\n widgets[\"shotAnmMstListTSL\"] = cmds.textScrollList(w=200, h=450) \n ###############\n #Shot Action layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotActionFLO\"] = cmds.formLayout(w=150, h=500, bgc =(.5, .5, .5))\n widgets[\"shotActionRefAssBut\"] = cmds.button(l=\"-> Ref Asset In ->\", w=130, h=20, bgc = (.7,.7,.7), c=referenceAsset, ann=ann[\"refAsset\"]) \n widgets[\"shotActionReplaceBut\"] = cmds.button(l=\"-> Replace Reference ->\", w=130, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"replace\"], c=replaceReference)\n widgets[\"shotActionRefMultBut\"] = cmds.button(l=\"-> Ref Multiple ->\", w=100, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"refMult\"], c=referenceMultiple)\n widgets[\"shotActionRefMultIFG\"] = cmds.intFieldGrp(w=20, v1=1)\n widgets[\"shotActionReloadBut\"] = cmds.button(l=\"Reload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=reloadReference, ann=ann[\"reload\"])\n widgets[\"shotActionUnloadBut\"] = cmds.button(l=\"Unload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=unloadReference, ann=ann[\"unload\"])\n widgets[\"shotActionRemoveBut\"] = cmds.button(l=\"Remove Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=removeReference, ann=ann[\"remove\"])\n widgets[\"shotActionQIncrBut\"] = cmds.button(l=\"Quick Increment\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=quickIncrement, ann=ann[\"qkIncr\"])\n widgets[\"shotActionNewShotBut\"] = cmds.button(l=\"Create new shot\", en=True, w=130, h=20, bgc = (.7,.7,.7), c=createNewShot, ann=ann[\"crtShot\"]) \n widgets[\"shotActionTitle\"] = cmds.text(l=\"Shot Actions\", font = \"boldLabelFont\")\n\n # create an embedded tab layout for each type of button!\n widgets[\"shotActionTypeTLO\"] = cmds.tabLayout(\"Specific Actions\", w=150, h=180, bgc=(.2,.2,.2))\n\n widgets[\"shotActionTypeAnmSLO\"] = cmds.scrollLayout(\"Anm\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeAnmFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .45, .4))\n widgets[\"shotActionExpAnimBut\"] = cmds.button(l=\"Export Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=exportAnimation, ann=ann[\"expAnim\"])\n widgets[\"shotActionImpAnimBut\"] = cmds.button(l=\"Import Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=importAnimation, ann=ann[\"impAnim\"])\n widgets[\"shotActionRefToBut\"] = cmds.button(l=\"-> Reference To\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=referenceTo, ann=ann[\"refTo\"])\n widgets[\"shotActionCtrlMkBut\"] = cmds.button(l=\"Ctrl On Selection\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=controlMaker, ann=ann[\"ctrlMk\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeLgtSLO\"] = cmds.scrollLayout(\"Lgt\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeLgtFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .4, .45))\n widgets[\"shotActionGenericBut\"] = cmds.button(l=\"Render Setup\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=renderSetup, ann=ann[\"rendGen\"])\n\n widgets[\"shotActionMtlBut\"] = cmds.button(l=\"-> Apply Mtl To Sel ->\", w=130, h=20, en=False, bgc = (.7,.7,.7), ann=ann[\"mtlApply\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeFxSLO\"] = cmds.scrollLayout(\"Fx\", w=150, h=240, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeFxFLO\"] = cmds.formLayout(w=150,h=180, bgc=(.45, .4, .4))\n \n\n#---------------- add any fx buttons here and then postion them below \n\n cmds.formLayout(widgets[\"shotActionTypeLgtFLO\"], e=True, af = [\n (widgets[\"shotActionGenericBut\"], \"top\", 10),\n (widgets[\"shotActionGenericBut\"], \"left\", 2),\n (widgets[\"shotActionMtlBut\"], \"top\", 40),\n (widgets[\"shotActionMtlBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionTypeAnmFLO\"], e=True, af = [\n (widgets[\"shotActionExpAnimBut\"], \"top\", 10),\n (widgets[\"shotActionExpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionImpAnimBut\"], \"top\", 40),\n (widgets[\"shotActionImpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionRefToBut\"], \"top\", 70),\n (widgets[\"shotActionRefToBut\"], \"left\", 2),\n (widgets[\"shotActionCtrlMkBut\"], \"top\", 100),\n (widgets[\"shotActionCtrlMkBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionFLO\"], e=True, af = [\n (widgets[\"shotActionTitle\"], \"top\", 5),\n (widgets[\"shotActionTitle\"], \"left\", 35),\n (widgets[\"shotActionRefAssBut\"], \"top\", 30),\n (widgets[\"shotActionRefAssBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultBut\"], \"top\", 60),\n (widgets[\"shotActionRefMultBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultIFG\"], \"top\", 60),\n (widgets[\"shotActionRefMultIFG\"], \"left\", 110),\n (widgets[\"shotActionReloadBut\"], \"top\", 90),\n (widgets[\"shotActionReloadBut\"], \"left\", 10),\n (widgets[\"shotActionUnloadBut\"], \"top\", 120),\n (widgets[\"shotActionUnloadBut\"], \"left\", 10),\n (widgets[\"shotActionRemoveBut\"], \"top\", 150),\n (widgets[\"shotActionRemoveBut\"], \"left\", 10),\n (widgets[\"shotActionReplaceBut\"], \"top\", 180),\n (widgets[\"shotActionReplaceBut\"], \"left\", 10),\n (widgets[\"shotActionQIncrBut\"], \"top\", 210),\n (widgets[\"shotActionQIncrBut\"], \"left\", 10),\n (widgets[\"shotActionTypeTLO\"], \"top\", 270),\n (widgets[\"shotActionTypeTLO\"], \"left\", 0), \n (widgets[\"shotActionNewShotBut\"], \"top\", 470),\n (widgets[\"shotActionNewShotBut\"], \"left\", 10), \n ])\n\n ###############\n #Shot anmLgt tab layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"anmLgtFLO\"] = cmds.formLayout(w=250, h=500, bgc = (.4, .4, .4))\n widgets[\"anmLgtTLO\"] = cmds.tabLayout(w=250, h=500, bgc = (.4,.4,.4), changeCommand = varTabChange)\n ###############\n #shot anm tab layout\n ###############\n widgets[\"anmTabCLO\"] = cmds.columnLayout(\"ANM\", w=250, bgc = (.4, .45, .4))\n #################\n #anm info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"anmVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"anmLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"anmLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n cmds.separator(h=5)\n\n #################\n #anm 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmWSFLO\"] = cmds.frameLayout(\"Animation Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"anmWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.45,.4))\n\n widgets[\"anmWSOpenBut\"] = cmds.button(l=\"Open Latest\\nAnim\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"anmWSIncrBut\"] = cmds.button(l=\"Increment Anim Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), ann=ann[\"incrWS\"], c = partial(incrementWorkshop, \"anm\"))\n widgets[\"anmWSPrevBut\"] = cmds.button(l=\"Previous Anim Workshops\", w=160, bgc = (.7,.7,.7), en=False, ann=ann[\"prevWS\"])\n widgets[\"anmWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"anmWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"anm\"), ann=ann[\"crtVariant\"])\n widgets[\"anmVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, bgc = (.7,.7,.7), en=False, c=createShotIcon, ann=ann[\"crtIcon\"]) \n\n cmds.formLayout(widgets[\"anmWSFoLO\"], e=True, af = [\n (widgets[\"anmWSOpenBut\"], \"left\", 5),\n (widgets[\"anmWSOpenBut\"], \"top\", 10),\n (widgets[\"anmWSIncrBut\"], \"left\", 80),\n (widgets[\"anmWSIncrBut\"], \"top\", 10),\n (widgets[\"anmWSInfoBut\"], \"left\", 5),\n (widgets[\"anmWSInfoBut\"], \"top\", 65),\n (widgets[\"anmWSPrevBut\"], \"left\", 80),\n (widgets[\"anmWSPrevBut\"], \"top\", 65),\n (widgets[\"anmWSNewVarBut\"], \"left\", 5),\n (widgets[\"anmWSNewVarBut\"], \"top\", 105),\n (widgets[\"anmVarIconBut\"], \"left\", 170),\n (widgets[\"anmVarIconBut\"], \"top\", 105), \n ])\n #################\n #anm 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmMstFLO\"] = cmds.frameLayout(\"Animation Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"anmMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.45,.4))\n widgets[\"anmMstOpenBut\"] = cmds.button(l=\"Open Anim\\nMaster\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"anmMstIncrBut\"] = cmds.button(l=\"Publish Anim Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"anmMstBgIncrBut\"] = cmds.button(l=\"BG Publish Anim Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"])\n widgets[\"anmMstPrevBut\"] = cmds.button(l=\"Previous Anim Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"anmMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"])\n\n\n \n cmds.formLayout(widgets[\"anmMstFoLO\"], e=True, af = [\n (widgets[\"anmMstOpenBut\"], \"left\", 5),\n (widgets[\"anmMstOpenBut\"], \"top\", 10),\n (widgets[\"anmMstIncrBut\"], \"left\", 80),\n (widgets[\"anmMstIncrBut\"], \"top\", 10),\n (widgets[\"anmMstBgIncrBut\"], \"left\", 5),\n (widgets[\"anmMstBgIncrBut\"], \"top\", 65), \n (widgets[\"anmMstInfoBut\"], \"left\", 5),\n (widgets[\"anmMstInfoBut\"], \"top\", 95), \n (widgets[\"anmMstPrevBut\"], \"left\", 80),\n (widgets[\"anmMstPrevBut\"], \"top\", 95), \n \n ])\n ###############\n #shot Lgt tab layout\n ################ \n cmds.setParent(widgets[\"anmLgtTLO\"]) \n widgets[\"lgtTabCLO\"] = cmds.columnLayout(\"LGT\", w=250, bgc = (.4,.4,.45))\n #################\n #lgt info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"lgtVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"lgtLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"lgtLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtWSFLO\"] = cmds.frameLayout(\"Lighting Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"lgtWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.4,.45))\n\n widgets[\"lgtWSOpenBut\"] = cmds.button(l=\"Open Latest\\nLight\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"lgtWSIncrBut\"] = cmds.button(l=\"Increment Light Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"lgt\"), ann=ann[\"incrWS\"])\n widgets[\"lgtWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"])\n widgets[\"lgtWSPrevBut\"] = cmds.button(l=\"Previous Light Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"lgtWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"lgt\"), ann=ann[\"crtVariant\"]) \n widgets[\"lgtVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"])\n\n cmds.formLayout(widgets[\"lgtWSFoLO\"], e=True, af = [\n (widgets[\"lgtWSOpenBut\"], \"left\", 5),\n (widgets[\"lgtWSOpenBut\"], \"top\", 10),\n (widgets[\"lgtWSIncrBut\"], \"left\", 80),\n (widgets[\"lgtWSIncrBut\"], \"top\", 10),\n (widgets[\"lgtWSInfoBut\"], \"left\", 5),\n (widgets[\"lgtWSInfoBut\"], \"top\", 65),\n (widgets[\"lgtWSPrevBut\"], \"left\", 80),\n (widgets[\"lgtWSPrevBut\"], \"top\", 65),\n (widgets[\"lgtWSNewVarBut\"], \"left\", 5),\n (widgets[\"lgtWSNewVarBut\"], \"top\", 105),\n (widgets[\"lgtVarIconBut\"], \"left\", 170),\n (widgets[\"lgtVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtMstFLO\"] = cmds.frameLayout(\"Lighting Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"lgtMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.4,.45))\n widgets[\"lgtMstOpenBut\"] = cmds.button(l=\"Open\\nLight Master\", w=70, h=50, en=True, bgc = (.5,.7,.5), c=partial(openShotMaster, \"lgt\"), ann=ann[\"openMst\"])\n widgets[\"lgtMstIncrBut\"] = cmds.button(l=\"Publish Light Master\\n(Keep Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"lgtMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"lgtMstPrevBut\"] = cmds.button(l=\"Previous Light Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"lgtMstBgIncrBut\"] = cmds.button(l=\" BG Publish Light Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"lgtMstFoLO\"], e=True, af = [\n (widgets[\"lgtMstOpenBut\"], \"left\", 5),\n (widgets[\"lgtMstOpenBut\"], \"top\", 10),\n (widgets[\"lgtMstIncrBut\"], \"left\", 80),\n (widgets[\"lgtMstIncrBut\"], \"top\", 10),\n (widgets[\"lgtMstBgIncrBut\"], \"left\", 5),\n (widgets[\"lgtMstBgIncrBut\"], \"top\", 65), \n (widgets[\"lgtMstInfoBut\"], \"left\", 5),\n (widgets[\"lgtMstInfoBut\"], \"top\", 95),\n (widgets[\"lgtMstPrevBut\"], \"left\", 80),\n (widgets[\"lgtMstPrevBut\"], \"top\", 95),\n \n ]) \n\n ###############\n #shot anm tab layout\n ###############\n cmds.setParent(widgets[\"anmLgtTLO\"])\n widgets[\"fxTabCLO\"] = cmds.columnLayout(\"FX\", w=250, bgc = (.45, .4, .4))\n #################\n #fx info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"fxVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"fxLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"fxLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxWSFLO\"] = cmds.frameLayout(\"FX Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"fxWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.45,.4,.4))\n\n widgets[\"fxWSOpenBut\"] = cmds.button(l=\"Open Latest\\nFX\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"fxWSIncrBut\"] = cmds.button(l=\"Increment FX Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"fx\"), ann=ann[\"incrWS\"])\n widgets[\"fxWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"fxWSPrevBut\"] = cmds.button(l=\"Previous FX Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"fxWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"fx\"), ann=ann[\"crtVariant\"])\n widgets[\"fxVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"]) \n \n cmds.formLayout(widgets[\"fxWSFoLO\"], e=True, af = [\n (widgets[\"fxWSOpenBut\"], \"left\", 5),\n (widgets[\"fxWSOpenBut\"], \"top\", 10),\n (widgets[\"fxWSIncrBut\"], \"left\", 80),\n (widgets[\"fxWSIncrBut\"], \"top\", 10),\n (widgets[\"fxWSInfoBut\"], \"left\", 5),\n (widgets[\"fxWSInfoBut\"], \"top\", 65),\n (widgets[\"fxWSPrevBut\"], \"left\", 80),\n (widgets[\"fxWSPrevBut\"], \"top\", 65),\n (widgets[\"fxWSNewVarBut\"], \"left\", 5),\n (widgets[\"fxWSNewVarBut\"], \"top\", 105),\n (widgets[\"fxVarIconBut\"], \"left\", 170),\n (widgets[\"fxVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxMstFLO\"] = cmds.frameLayout(\"FX Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"fxMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.45,.4,.4))\n widgets[\"fxMstOpenBut\"] = cmds.button(l=\"Open\\nFX Master\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"fxMstIncrBut\"] = cmds.button(l=\"Publish FX Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"fxMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"fxMstPrevBut\"] = cmds.button(l=\"Previous FX Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"fxMstBgIncrBut\"] = cmds.button(l=\" BG Publish FX Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"fxMstFoLO\"], e=True, af = [\n (widgets[\"fxMstOpenBut\"], \"left\", 5),\n (widgets[\"fxMstOpenBut\"], \"top\", 10),\n (widgets[\"fxMstIncrBut\"], \"left\", 80),\n (widgets[\"fxMstIncrBut\"], \"top\", 10),\n (widgets[\"fxMstBgIncrBut\"], \"left\", 5),\n (widgets[\"fxMstBgIncrBut\"], \"top\", 65), \n (widgets[\"fxMstInfoBut\"], \"left\", 5),\n (widgets[\"fxMstInfoBut\"], \"top\", 95),\n (widgets[\"fxMstPrevBut\"], \"left\", 80),\n (widgets[\"fxMstPrevBut\"], \"top\", 95),\n \n ]) \n\n\n cmds.setParent(widgets[\"anmLgtFLO\"])\n widgets[\"anmLgtTitleText\"] = cmds.text(l=\"Variant Files\", font = \"boldLabelFont\", ann=ann[\"varFile\"]) \n\n cmds.formLayout(widgets[\"anmLgtFLO\"], e=True, af = [(widgets[\"anmLgtTitleText\"], \"top\", 5), (widgets[\"anmLgtTitleText\"], \"left\", 135)])\n\n ###################\n # - -- Shot Tab form setup\n ##################\n cmds.formLayout(widgets[\"shotsFLO\"], e=True, af = [\n (widgets[\"shotListCLO\"], \"left\", 0),\n (widgets[\"shotListCLO\"], \"top\", 0),\n (widgets[\"anmLgtFLO\"], \"left\", 134),\n (widgets[\"anmLgtFLO\"], \"top\", 0), \n (widgets[\"shotInfoAssListTLO\"], \"left\", 387),\n (widgets[\"shotInfoAssListTLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"left\", 594),\n (widgets[\"shotAssListCLO\"], \"top\", 0),\n (widgets[\"shotAssListCLO\"], \"left\", 752)\n ])\n\n ################\n #Misc tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"miscFLO\"] = cmds.formLayout(\"Other Shot Tools\",width=1000, height=500, backgroundColor = (.4,.4,.4))\n\n widgets[\"animationTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .35, .3))\n widgets[\"animationRCLO\"] = cmds.rowColumnLayout(\"animation\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"lightingTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .32, .35))\n widgets[\"lightingRCLO\"] = cmds.rowColumnLayout(\"lighting\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5]) \n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"fxTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.35, .3, .3))\n widgets[\"fxRCLO\"] = cmds.rowColumnLayout(\"fx\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"charlexTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.55, .55, .55))\n widgets[\"charlexRCLO\"] = cmds.rowColumnLayout(\"charlex_general\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.formLayout(widgets[\"miscFLO\"], e=True, af=[\n (widgets[\"charlexTLO\"], \"top\", 0),\n (widgets[\"charlexTLO\"], \"left\", 0),\n (widgets[\"animationTLO\"], \"top\", 0),\n (widgets[\"animationTLO\"], \"left\", 500),\n (widgets[\"lightingTLO\"], \"top\", 250),\n (widgets[\"lightingTLO\"], \"left\", 0),\n (widgets[\"fxTLO\"], \"top\", 250),\n (widgets[\"fxTLO\"], \"left\", 500) \n ])\n\n # get the dictionary of scripts, calls and annotations from the database\n dbPath =os.path.join(os.getenv(\"MAYA_ROOT\"), \"scripts\", \"chrlx_pipe\", \"chrlxScriptList.json\")\n with open(dbPath, \"r\") as f:\n scriptList = json.load(f)\n\n # populate the row column layouts with buttons and funcs from the database\n btl.buttonsToLayout(widgets[\"animationRCLO\"], scriptList[\"shot\"][\"animation\"], width=117, height=40, color=(.38, .3, .38))\n btl.buttonsToLayout(widgets[\"lightingRCLO\"], scriptList[\"shot\"][\"lighting\"], width=117, height=40, color=(.37,.34, .3))\n btl.buttonsToLayout(widgets[\"fxRCLO\"], scriptList[\"shot\"][\"fx\"], width=117, height=40, color=(.35, .3, .3))\n btl.buttonsToLayout(widgets[\"charlexRCLO\"], scriptList[\"shot\"][\"charlex\"], width=117, height=40, color=(.3, .3, .3))\n\n # widgets[\"miscCLO\"] = cmds.columnLayout(\"Other Pipeline Tools\",w=1000, h=500, bgc = (.4,.4,.4))\n # cmds.text(l=\"------ANIM STUFF-------\")\n # cmds.text(l=\"TODO - export cam(s) for nuke, etc\")\n # cmds.text(l=\"TODO - create a new prop from selected geo (propify)\") \n # cmds.text(l=\"TODO - blasting, rendering stuff?\")\n # cmds.text(l=\"TODO - export data (text file of scene locations?)\")\n # cmds.text(l=\"TODO - create render cam? Should this be in the main anim increment? (probably both)\")\n\n # cmds.text(l=\"------LGT STUFF--------\")\n # cmds.text(l=\"TODO - set up current scene for maxwell, arnold\")\n # cmds.text(l=\"TODO - convert an external image to icon (char or project)\")\n # cmds.text(l=\"TODO - revert ['ROLL BACK'] to master version? (replaces master and grabs that workshop\")\n # cmds.text(l=\"TODO - function to add your folder to the WIP folder in this project - save current to WIP folder\")\n # cmds.text(l=\"TODO - explore various frame (render) folders in explorer\")\n # cmds.text(l=\"TODO - various preset light setups/rigs? \")\n\n\n ######################\n #show window\n ######################\n cmds.window(widgets[\"win\"], e=True, w=1000, h=580)\n cmds.showWindow(widgets[\"win\"])\n\n #start us off\n populateWindow()", "def Window(self, w):\r\n\r\n self.window = w\r\n return self", "def createWindow():\n\n windowName = \"ObjectSpawner\"\n\n if cmds.window(windowName, query=True, exists=True):\n cmds.deleteUI(windowName)\n\n cmds.window(windowName)\n\n populateUI()\n enableEditorDrop()\n\n cmds.showWindow(windowName)", "def access_window(self):\n return self._access_window", "def get_window_info (self):\n \n # g.trace(self.w,self.h,self.x,self.y)\n \n return self.w,self.h,self.x,self.y", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def GetWindow(self):\r\n\r\n return self._wnd", "def main_window(self) -> MainWindow:\n return self._main_window", "def show_create(self):\n\t\t# Get a rectangle with amargin.\n\t\trect = self.renderer._get_rect()\n\t\trect = (rect[0] + 16, rect[1] + 16, rect[2] - 16, rect[3] - 16)\n\n\t\tself.f_tab = ow.Table(4, 2)\n\t\tself.f_tab.topleft = (rect[0], rect[1])\n\n\t\t# Name of the game textbox.\n\t\tself.e_gamename = ow.Entry(\"Ship Wreckyard\")\n\t\tself.l_gamename = ow.Label(\"Name of the game: \")\n\t\tself.f_tab.add_child(0, 0, self.l_gamename)\n\t\tself.f_tab.add_child(0, 1, self.e_gamename)\n\n\t\t# Number of players.\n\t\tself.e_players = ow.Entry(\"2\")\n\t\tself.l_players = ow.Label(\"Number of players: \")\n\t\tself.f_tab.add_child(1, 0, self.l_players)\n\t\tself.f_tab.add_child(1, 1, self.e_players)\n\n\t\t# Board size.\n\t\tself.l_boardw = ow.Label(\"Board width: \")\n\t\tself.e_boardw = ow.Entry(\"10\")\n\t\tself.l_boardh = ow.Label(\"Board height: \")\n\t\tself.e_boardh = ow.Entry(\"10\")\n\t\tself.f_tab.add_child(2, 0, self.l_boardw)\n\t\tself.f_tab.add_child(2, 1, self.e_boardw)\n\t\tself.f_tab.add_child(3, 0, self.l_boardh)\n\t\tself.f_tab.add_child(3, 1, self.e_boardh)\n\n\t\t# Create Game button.\n\t\tself.b_cancel = ow.Button(\"Cancel\")\n\t\tself.b_cancel.topleft = (rect[2] - self.b_cancel.width - 100, rect[3] - self.b_cancel.height)\n\t\tself.b_cancel.connect_signal(oc.SIG_CLICKED, self.do_lobby)\n\n\t\t# Cancel button.\n\t\tself.b_create = ow.Button(\"Start Game\")\n\t\tself.b_create.topleft = (rect[2] - self.b_create.width, rect[3] - self.b_create.height)\n\t\tself.b_create.connect_signal(oc.SIG_CLICKED, self.do_start_hosted)\n\n\t\t# Add all the widgets.\n\t\tself.renderer.add_widget(self.f_tab)\n\t\tself.renderer.add_widget(self.b_create)\n\t\tself.renderer.add_widget(self.b_cancel)", "def window(windowX, windowY, occurrency):\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0", "def _getMayaWindow():\n\n ptr = OpenMayaUI.MQtUtil.mainWindow ()\n if ptr is not None:\n return wrapInstance (long (ptr), QMainWindow)", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def create(self):\n\n cv2.namedWindow(winname=self.title, flags=self.style)", "def create_screen(self, width, height):", "def __window_print(self):\n pass", "def win_game(self):\n wingame = GLabel('YOU WIN! :D')\n wingame.font = '-50'\n self.window.add(wingame, x=self.window.width / 6, y=self.window.height * 0.666)", "def renderWindows(XWindow, YWindow, occurrencyWindow, windowModel = False):\n\t\tdef renderDoors(XDoor, YDoor, occurrencyDoor, doorModel = False):\n\t\t\t\"\"\"\n\t\t\trenderWindows accept the door's cells and the occurrency, and optionally a door generating function \n\t\t\t\"\"\"\n\t\t\tdef renderRoof(vertices, pitchAngle, height):\n\t\t\t\t\"\"\"\n\t\t\t\trenderRoof accept the vertices of the base roof, a pitch angle and the desired height \n\t\t\t\tof the roof\n\t\t\t\t\"\"\"\n\t\t\t\tdef renderLadder(ladderHeight, interStep, riser):\n\t\t\t\t\t\"\"\"\n\t\t\t\t\trenderLadder is the inner function used to assembly all together, it takes the \n\t\t\t\t\tdesired height of the ladder, an interstep between two step and a riser for the single\n\t\t\t\t\tstep.\n\t\t\t\t\t\"\"\"\n\n\t\t\t\t\t#building the ladder model and the ladder box\n\t\t\t\t\tladderModel = ladder.make_ladder(ladderHeight, interStep, riser)\n\t\t\t\t\twith open(\"lines/ladder.lines\", \"rb\") as ladderFile:\n\t\t\t\t\t\treader = csv.reader(ladderFile, delimiter=\",\")\n\t\t\t\t\t\trow = next(reader)\n\t\t\t\t\t\tladderModel = T([1,2])([float(row[0])*xfactor, float(row[1])*yfactor])(ladderModel)\n\t\t\t\t\tladderBOX = CUBOID([SIZE([1])(ladderModel)[0]/xfactor,SIZE([2])(ladderModel)[0]/yfactor, SIZE([3])(ladderModel)[0]/zfactor])\n\t\t\t\t\tladderBOX = T([1,2])([float(row[0])-SIZE([1])(ladderBOX)[0]/2., float(row[1])-SIZE([2])(ladderBOX)[0]/2.])(ladderBOX)\n\n\t\t\t\t\t#building roof model\n\t\t\t\t\tif isinstance(vertices, basestring):\n\t\t\t\t\t\twith open(\"lines/\" + vertices + \".lines\", \"rb\") as file:\n\t\t\t\t\t\t\treader = csv.reader(file, delimiter=\",\")\n\t\t\t\t\t\t\tnewVertices = []\n\t\t\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\t\t\tnewVertices.append([float(row[0]), float(row[1])])\n\t\t\t\t\tif newVertices:\n\t\t\t\t\t\troofModel = roof.roofBuilder(newVertices, pitchAngle, height)\n\t\t\t\t\telse:\n\t\t\t\t\t\troofModel = roof.roofBuilder(vertices, pitchAngle, height)\n\t\t\t\t\troofModel = T([3])([nStorey*3/zfactor])(roofModel)\n\t\t\t\t\troofModel = S([1,2,3])([xfactor*1.09,yfactor*1.09,zfactor])(roofModel)\n\t\t\t\t\troofModel = T([1,2])([-SIZE([1])(roofModel)[0]*0.05,-SIZE([2])(roofModel)[0]*0.05]) (roofModel)\n\n\t\t\t\t\t#building full house model with windows and doors\n\t\t\t\t\tfullHouse = []\n\t\t\t\t\tfor story in range(nStorey):\n\t\t\t\t\t\thouseModel = house.build_house(story, windowModel, doorModel, ladderBOX)\n\t\t\t\t\t\tfullHouse.append(houseModel)\n\t\t\t\t\t\tfullHouse.append(T([3])([3]))\n\t\t\t\t\tfullHouse = STRUCT(fullHouse)\n\n\t\t\t\t\t#returning the result\n\t\t\t\t\treturn STRUCT([roofModel, ladderModel, fullHouse])\n\n\t\t\t\treturn renderLadder\n\n\t\t\treturn renderRoof\n\n\t\treturn renderDoors", "def getWidgetClass(self):\n\t\treturn AbstraccionWindowWidget", "def winScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_COMPLETE:\n label = GLabel(text=\"Congratulations! You win!\", x = GAME_WIDTH/2,\n y = 50, font_size = 50, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def build_window(self):\n\n main_frame = tk.Frame(self.root)\n main_frame.pack(fill='both')\n\n self.open_machine_learner_window_button = tk.Button(main_frame, text=\"Open Machine Learner\")\n self.open_machine_learner_window_button.bind('<Button-1>', self.open_machine_learner_window)\n self.open_machine_learner_window_button.pack(side=\"left\")\n\n self.open_web_crawler_window_button = tk.Button(main_frame, text=\"Open Web Crawler\")\n self.open_web_crawler_window_button.bind('<Button-1>', self.open_web_crawler_window)\n self.open_web_crawler_window_button.pack(side=\"left\")\n\n self.open_webpage_classifier_window_button = tk.Button(main_frame, text=\"Open WebPage Classifier\")\n self.open_webpage_classifier_window_button.bind('<Button-1>', self.open_webpage_classifier_window)\n self.open_webpage_classifier_window_button.pack(side=\"left\")\n\n self.run_steady_state_genetic_button = tk.Button(main_frame, text=\"Run Steady State\")\n self.run_steady_state_genetic_button.bind('<Button-1>', self.run_steady_state)\n self.run_steady_state_genetic_button.pack(side=\"left\")\n\n # Protocol for closing window using 'x' button\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing_event)", "def getWin(self):\n return self.__win", "def __handle_view_win_condition(self, gamestate_component):", "def get_active_window(self): # real signature unknown; restored from __doc__\n pass", "def create_window(self):\r\n pos_x = self.root.winfo_x()\r\n pos_y = self.root.winfo_y()\r\n\r\n about_window = tk.Toplevel(self)\r\n about_window.geometry('380x345' + f\"+{pos_x + 250}+{pos_y + 100}\")\r\n about_window.iconbitmap('icon.ico')\r\n about_window.resizable(False, False)\r\n\r\n # creates an 'Ok' buttons that allow the user to closes the About window\r\n ok_btn = HoverButton(about_window, text=\"Ok\", height=1, width=6, command=about_window.destroy)\r\n ok_btn.grid(row=3, column=0, sticky=tk.E, padx=10, pady=5)\r\n\r\n about_label = tk.Label(about_window, text=\"Version Changes:\", )\r\n about_label.grid(row=1, column=0, sticky=tk.W, padx=10, pady=5)\r\n\r\n about_frame = tk.Frame(about_window)\r\n about_frame.grid(row=2, column=0, sticky=tk.W, padx=10, pady=5)\r\n\r\n text_box = tk.Text(about_frame, height=17, width=46, font=(\"Calibri\", 10))\r\n text_box.grid(row=2, column=0, sticky=tk.W, padx=5)\r\n changes = open(\"credit.txt\").read()\r\n text_box.insert(tk.END, changes)\r\n\r\n # adds a scrollbar for easier navigation for quicker viewing of version changes\r\n scrollbar = tk.Scrollbar(about_frame, command=text_box.yview)\r\n text_box.config(yscrollcommand=scrollbar.set, state=tk.DISABLED)\r\n scrollbar.grid(row=2, column=1, sticky='ns')\r\n about_window.transient(self.root)", "def window(*args, width: int = 200, height: int = 200, autosize: bool = False,\n no_resize: bool = False, no_title_bar: bool = False, no_move: bool = False, no_scrollbar: bool = False,\n no_collapse: bool = False, horizontal_scrollbar: bool = False, no_focus_on_appearing: bool = False,\n no_bring_to_front_on_focus: bool = False, menubar: bool = False, no_close: bool = False,\n no_background: bool = False, label: str = '', show: bool = True, collapsed: bool = False,\n modal: bool = False, popup: bool = False,\n on_close: Callable = None, min_size: List[int]=[32, 32], max_size: List[int] = [30000, 30000], id:str=''):\n try:\n\n widget = internal_dpg.add_window(*args, width=width, height=height, autosize=autosize,\n no_resize=no_resize, no_title_bar=no_title_bar, no_move=no_move,\n no_scrollbar=no_scrollbar, no_collapse=no_collapse,\n horizontal_scrollbar=horizontal_scrollbar,\n no_focus_on_appearing=no_focus_on_appearing,\n no_bring_to_front_on_focus=no_bring_to_front_on_focus,\n menubar=menubar, no_close=no_close,\n no_background=no_background, label=label, show=show, \n collapsed=collapsed, on_close=on_close,\n min_size=min_size, max_size=max_size, id=id, modal=modal,\n popup=popup)\n internal_dpg.push_container_stack(widget)\n yield widget\n\n finally:\n internal_dpg.pop_container_stack()", "def get_test_window(self, window_id, parent):\n pass", "def win(self):\n return self._get(\"win\")", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return shiboken2.wrapInstance(long(main_window), PySide2.QtWidgets.QWidget)", "def window(self) -> Optional[pulumi.Input['MaintenanceWindowArgs']]:\n return pulumi.get(self, \"window\")", "def GetMainWindow(self):\r\n \r\n return self._main_win", "def getRenWin(self):\n return self.renWinInteract.GetRenderWindow()", "def popup_add(self, event):\n def callb():\n PhysicsWindow.AddObjectWindow(self.window, event)\n return callb", "def getMayaMainWindow():\n ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(ptr), QtWidgets.QMainWindow)", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def show_window_fields(self):\n self.analyze()\n items = []\n for ba in analyzer.window_actions:\n items.append(\n \"{0} : {1}\".format(\n ba.full_name(), layout_fields(ba)))\n\n return rstgen.ul(items)", "def _window(self, get_lims=False):\n\t\timg_h, img_w = self.od_model.img_shape\n\t\th_llim = 0\n\t\tw_llim = img_w // 3\n\t\th_ulim = img_h - (img_h // 4)\n\t\tw_ulim = 1- wllim\n\n\t\tif get_lims:\n\t\t\treturn (h_llim, h_ulim), (w_llim, w_ulim)\n\n\t\twindow = slice(h_llim, h_ulim), slice(w_llim, w_ulim)\n\t\treturn window", "def build_second_window():\r\n\r\n new_window = tk.Tk()\r\n windows.append(new_window)\r\n new_window.protocol(\"WM_DELETE_WINDOW\", new_round(new_window))\r\n\r\n ask = tk.Label(new_window, text='Would You Like To Play Again?', bg='Cyan')\r\n ask.pack(fill=tk.X)\r\n\r\n frame = tk.Frame(new_window)\r\n frame.pack()\r\n\r\n yes_button = tk.Button(frame, text='Yes', bg='green',\r\n command=new_round(new_window))\r\n yes_button.pack(side=tk.LEFT)\r\n\r\n no_button = tk.Button(frame, text='No', bg='red',\r\n command=close)\r\n no_button.pack(side=tk.LEFT)", "def GetWindow(self):\n\n return self._window", "def __init__(self):\n self.defaultTheme = \"DarkAmber\"\n self.version = 1.4\n self.versionName = \"class update\"\n self.title = \"Lms GUI default window\"\n self.layout = [[sg.Text(\"This is the base window class layout.\")]]\n self.elementJustification = 'c'\n self.location=(500, 300)\n self.running = True\n self.window = None\n self.event = \"\"\n self.values = []\n self.nextAction = None", "def get_help_window(self):\n self.gui.active_window.hide()\n\n self.associated_window = help_window.HelpWindow(self.gui)\n self.gui.active_window = self.associated_window\n\n self.gui.active_window.show()", "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def new_game_function(self):\n\n def new_game(_):\n\n self.console.children = []\n\n self.answer = [random.randint(1, 4) for j in range(4)]\n\n self.trials.children = [wd.HBox([wd.VBox(\n [self.answer_box, self.arrows_box])], layout={'margin': '0px 0px 20px 0px'})]\n self.turn = 0\n for selector in self.selectors.children:\n selector.disabled = False\n self.confirm_button.disabled = False\n self.try_return = {'well_placed': 0, 'misplaced': 0}\n\n for button in self.answer_box.children:\n button.icon = 'question'\n button.style.button_color = '#4B4A4E'\n\n return new_game", "def root_wdgt(self):\n self.summarize()\n modes = ['Global', 'Single-Image']\n\n def logic(mode):\n # cache the widget later\n if mode == modes[0]:\n if self.global_walk is None:\n self.global_walk = self.global_walk_specifier()\n ipy_display(self.global_walk)\n elif mode == modes[1]:\n self.image_view = self.single_image_selector()\n # if self.image_view is None:\n # self.image_view = self.single_image_selector()\n # ipy_display(self.image_view)\n\n UI = interactive(\n logic, mode=widgets.ToggleButtons(options=modes, value=modes[0])\n )\n UI.children[-1].layout.height = '1000px'\n ipy_display(UI)", "def win(self):\n return \"Win\"", "def get_maya_window():\n\twindow = apiui.MQtUtil.mainWindow()\n\tif window is not None:\n\t\treturn shiboken2.wrapInstance(long(window), QtWidgets.QWidget)", "def GetManagedWindow(self):\r\n \r\n return self._frame", "def newwin(self,name,sizeY,sizeX,offsetY,offsetX, border=False):\n\t\tself.windows[name]=Window(sizeY,sizeX,offsetY,offsetX,border)\n\t\treturn self.windows[name]", "def createAboutWindow(self):\n if (not hasattr(self, \"about_window\")):\n self.about_window = AboutWindow(self)\n self.about_window.show()", "def TransferToWindow(self):\n return True", "def createWindow(self):\n\n # create window, set basic attributes\n w = gtk.Window(gtk.WINDOW_TOPLEVEL)\n w.set_size_request(*self.__def_win_size__)\n w.set_decorated(False)\n #w.fullscreen()\n #w.unfullscreen()\n w.set_title(self.__name__)\n w.connect(\"destroy\", gtk.main_quit)\n\n # declare buttons and their associated handlers\n controls = (\n (\"open_button\", gtk.ToolButton(gtk.STOCK_OPEN), self.onPlay),\n (\"play_button\", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),\n (\"stop_button\", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),\n (\"quit_button\", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)\n )\n\n # as well as the container in which to put them\n box = gtk.HButtonBox()\n\n # for every widget, connect to its clicked signal and add it\n # to the enclosing box\n for name, widget, handler in controls:\n widget.connect(\"clicked\", handler)\n box.pack_start(widget, True)\n setattr(self, name, widget)\n\n viewer = gtk.DrawingArea()\n viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)\n\n # we will need this later\n self.xid = None\n\n # now finally do the top-level layout for the window\n layout = gtk.VBox(False)\n layout.pack_start(viewer)\n\n # subclasses can override childWidgets() to supply\n # custom controls\n layout.pack_start(self.customWidgets(), False, False)\n layout.pack_end(box, False, False)\n w.add(layout)\n w.show_all()\n\n # we want to return only the portion of the window which will\n # be used to display the video, not the whole top-level\n # window. a DrawingArea widget is, in fact, an X11 window.\n return viewer", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def main():\n # A layout of the window\n layout =[\n [sg.Image(filename='', key='image')],\n [sg.Button('Start Automated Fishing',size=(80,1))],\n [sg.Button('Stop Automated Fishing',button_type='Stop',size=(80,1))],\n [[sg.ReadButton('Exit',size=(80,1))]]\n ]\n #Window title \n main_window = sg.Window(\"Automated Minecraft Window\", layout, finalize=True,element_justification='c')\n choice = None\n \n\n while True:\n event, values = main_window._ReadNonBlocking()\n if event is 'Exit' or values is None:\n break \n elif event is 'Start Automated Fishing':\n # Runs the fishing bot function from the fishingbot_minecraft script\n start_automation = multiprocessing.Process(target=fishingbot.object_detection)\n # Starts multiprocessor\n start_automation.start()\n elif event is 'Stop Automated Fishing':\n #Terminates the multiprocessor function\n start_automation.kill()\n print('Stopped')\n\n # Update the monitor capture screen every 500ms\n start_image = multiprocessing.Process(target=main_window.find_element('image').Update(data=capture_screen()))\n start_image.start()\n\n # When Exit event is processed it closes the gui window\n main_window.close()" ]
[ "0.67854697", "0.6475978", "0.63797003", "0.63149863", "0.6215324", "0.62009335", "0.6149193", "0.60768974", "0.60621846", "0.6039159", "0.6020179", "0.6020179", "0.6007678", "0.60024905", "0.599055", "0.5948242", "0.58887446", "0.585083", "0.5733424", "0.56965476", "0.5670421", "0.5667439", "0.5667439", "0.56465286", "0.5621861", "0.5617549", "0.56127477", "0.5604356", "0.5603442", "0.5601413", "0.5594307", "0.55940354", "0.5593601", "0.5593601", "0.5593601", "0.5586119", "0.55779403", "0.5575746", "0.55744", "0.55730623", "0.5572142", "0.55714875", "0.55707216", "0.55604213", "0.5559761", "0.5553402", "0.55440843", "0.5527853", "0.55185205", "0.550348", "0.5490805", "0.5490708", "0.5479532", "0.54787236", "0.54728705", "0.5462558", "0.54616857", "0.54479355", "0.54443234", "0.5441468", "0.5439687", "0.5409738", "0.54019356", "0.53986794", "0.53979355", "0.5394204", "0.53941894", "0.5392832", "0.53904885", "0.5390243", "0.5385183", "0.53686136", "0.5368606", "0.5366704", "0.5363852", "0.5363743", "0.5360875", "0.53597623", "0.53580284", "0.53576934", "0.53573376", "0.53253764", "0.5319113", "0.53182405", "0.53168744", "0.5314953", "0.53144616", "0.53133684", "0.5306401", "0.52929235", "0.52812475", "0.5280336", "0.52726084", "0.5270399", "0.526954", "0.5267161", "0.5259822", "0.52527255", "0.52378726", "0.52356863" ]
0.56174505
26
Function that represents the window which Stage Mods can be applied.
def stage_window(): path_dir = r'Sor_Mods_Storage\stages' stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen stages = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(stages, image=mainTitleImg) title = tk.Label(stages, text="Stage Mods") comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys())) def apply_stage_mod(): stage_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if stage_selected == '': value = f'{value} Please Select an Stage Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages') value = f'Enemy Mod {stage_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def get_main_window():\n\n pass", "def GetWindow(self):\r\n\r\n return self.window", "def window(self):\n return self._window", "def window(self):\n return self._window", "def showWindow(*args, **kwargs)->None:\n pass", "def current_window(self):\n pass", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def maya_window():\n return to_qwidget(\"MayaWindow\")", "def show(self, window):\r\n\r\n return", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def window(self) -> pulumi.Input['AssetModelMetricWindowArgs']:\n return pulumi.get(self, \"window\")", "def _get_window_width(self):", "def window(self) -> Optional[pulumi.Input['MaintenanceWindowArgs']]:\n return pulumi.get(self, \"window\")", "def get_window(self):\n if self.isWindow:\n return self\n else:\n return self.window", "def window(self):\n return self.attribute('VW')", "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n if sys.version_info.major >= 3:\n return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)\n else:\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def GetWindow(self):\r\n\r\n return self._wnd", "def show_window_fields(self):\n self.analyze()\n items = []\n for ba in analyzer.window_actions:\n items.append(\n \"{0} : {1}\".format(\n ba.full_name(), layout_fields(ba)))\n\n return rstgen.ul(items)", "def get_parent_window(self): # real signature unknown; restored from __doc__\n pass", "def chars_window():\n path_dir = r'Sor_Mods_Storage\\chars'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n chars = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(chars, image=mainTitleImg)\n title = tk.Label(chars, text=\"Characters Mods\")\n\n comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))\n\n def apply_char_mod():\n char_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')\n value = f'Character Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def access_window(self):\n return self._access_window", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def enemy_window():\n path_dir = r'Sor_Mods_Storage\\enemies'\n enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n enemies = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n\n imgRandom_label = tk.Label(enemies, image=mainTitleImg)\n title = tk.Label(enemies, text=\"Enemies Mods\")\n\n comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys()))\n\n def apply_enemy_mod():\n char_selected = comboBox_enemies.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies')\n value = f'Enemy Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod)\n\n title.grid(row=0, column=0)\n comboBox_enemies.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def get_active_window(self): # real signature unknown; restored from __doc__\n pass", "def createWindow():\n\n windowName = \"ObjectSpawner\"\n\n if cmds.window(windowName, query=True, exists=True):\n cmds.deleteUI(windowName)\n\n cmds.window(windowName)\n\n populateUI()\n enableEditorDrop()\n\n cmds.showWindow(windowName)", "def getMayaWindow():\n ptr = openmayaui.MQtUtil.mainWindow()\n return wrapInstance(long(ptr), QtWidgets.QMainWindow)", "def _get_window_start(self, waveforms):", "def _current_window_for_event(event):\n return event.app.layout.current_window", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window), QtWidgets.QWidget)", "def _window(self, get_lims=False):\n\t\timg_h, img_w = self.od_model.img_shape\n\t\th_llim = 0\n\t\tw_llim = img_w // 3\n\t\th_ulim = img_h - (img_h // 4)\n\t\tw_ulim = 1- wllim\n\n\t\tif get_lims:\n\t\t\treturn (h_llim, h_ulim), (w_llim, w_ulim)\n\n\t\twindow = slice(h_llim, h_ulim), slice(w_llim, w_ulim)\n\t\treturn window", "def main_window(self) -> MainWindow:\n return self._main_window", "def build(theme: str) -> sg.Window:\n\n # yapf: disable\n sg.theme(theme)\n des=['Top 10 de palabras que se encuentran primero de todas las partidas','Porcentaje de partidas por estado (terminada, cancelada,abandonadas)','Porcentaje de partidas finalizadas según género',\n 'Cantidad de partidas que se juegan para cada día de la semana','Promedio de tiempo de partidas finalizadas por nivel.','Porcentaje de palabras encontradas en las partidas timeout.'\n ]\n tab_layout=[[[sg.Text(des[x],font=(f\"{WINDOW_FONT}\", WINDOW_FONT_SIZE))],[sg.Canvas(key=f\"-CANVAS{x}-\")]] for x in range(len(des))]\n\n layout = [[sg.Text(f\"Estadisticas\",font=(WINDOW_TITLE_FONT, WINDOW_FONT_SIZE * 2))],\n [sg.TabGroup([[sg.Tab(f'Gráfico {l+1}',tab_layout[l],element_justification='center') for l in range(len(des))]])],\n [sg.Button(\"Menu\",key=\"-BACK BUTTON-\")]\n ]\n # yapf: enable\n stat_window = sg.Window(\"Stats\",layout,finalize=True,element_justification='center',margins=(10, 10),size=(900, 700))\n info = pd.read_csv(os.path.join(os.getcwd(), GAME_INFO_PATH),encoding='utf-8')\n draw_figure(stat_window['-CANVAS0-'].TKCanvas, top_10_palabras(info))\n stat_window.refresh() #Esta linea permite que se muestre más rápido el primer gráfico, dando tiempo a que se creen los demás\n draw_figure(stat_window['-CANVAS1-'].TKCanvas, partidas_por_estado(info))\n draw_figure(stat_window['-CANVAS2-'].TKCanvas, partidas_por_genero(info))\n draw_figure(stat_window['-CANVAS3-'].TKCanvas, partidas_por_dia(info))\n draw_figure(stat_window['-CANVAS4-'].TKCanvas,promedio_tiempo_por_nivel(info))\n draw_figure(stat_window['-CANVAS5-'].TKCanvas,cant_encontradas_en_timeout(info))\n\n return stat_window", "def default_window():\n X = [0, .125, 1.4375, 1.5625, 2.9375, 3.0625, 4.4375, 4.5625, 5.875, 6.0]\n Y = [0, .125, 2.875, 3.0]\n Z = [0, .125]\n V, F = True, False\n occupancy = [\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]],\n [[V],[F],[V]],\n [[V],[V],[V]]\n ]\n return w7.window(X, Y, Z, occupancy)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def GetManagedWindow(self):\r\n \r\n return self._frame", "def show(self,window):\n self.showFunctions(window)", "def win(self):\n return self._get(\"win\")", "def GetWindow(self):\n\n return self._window", "def create(self):\n\n cv2.namedWindow(winname=self.title, flags=self.style)", "def getWin(self):\n return self.__win", "def formWindow(self, p_int): # real signature unknown; restored from __doc__\n return QDesignerFormWindowInterface", "def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])", "def viz_windows(self, score_img, mode):\n if mode == 'filtered':\n lw_img = window_image(self.windows_left, 'x_filtered', color=(0, 255, 0))\n rw_img = window_image(self.windows_right, 'x_filtered', color=(0, 255, 0))\n elif mode == 'raw':\n color = (255, 0, 0)\n win_left_detected, arg = filter_window_list(self.windows_left, False, False, remove_undetected=True)\n win_right_detected, arg = filter_window_list(self.windows_right, False, False, remove_undetected=True)\n lw_img = window_image(win_left_detected, 'x_measured', color, color, color)\n rw_img = window_image(win_right_detected, 'x_measured', color, color, color)\n else:\n raise Exception('mode is not valid')\n combined = lw_img + rw_img\n return cv2.addWeighted(score_img, 1, combined, 0.5, 0)", "def getwinsize(self):", "def GetMainWindow(self):\r\n \r\n return self._main_win", "def Window(self, w):\r\n\r\n self.window = w\r\n return self", "def ev_windowminimized(self, event: WindowEvent) -> None:", "def renderWindowEditor(*args, autoResize: bool=True, blendMode: Union[int, bool]=0, caption:\n Union[AnyStr, bool]=\"\", changeCommand: Union[List[AnyStr, AnyStr, AnyStr,\n AnyStr], bool]=None, clear: Union[List[int, int, float, float, float],\n bool]=None, cmEnabled: bool=True, colorManage: bool=True, compDisplay:\n Union[int, bool]=0, compImageFile: Union[AnyStr, bool]=\"\", control:\n bool=True, currentCamera: Union[AnyStr, bool]=\"\", currentCameraRig:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", displayImage:\n Union[int, bool]=0, displayImageViewCount: Union[int, bool]=0,\n displayStyle: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr, bool]=\"\",\n doubleBuffer: bool=True, drawAxis: bool=True, editorName: bool=True,\n exists: bool=True, exposure: Union[float, bool]=0.0, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n frameImage: bool=True, frameRegion: bool=True, gamma: Union[float,\n bool]=0.0, highlightConnection: Union[AnyStr, bool]=\"\", loadImage:\n AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", marquee: Union[List[float, float, float, float],\n bool]=None, nbImages: bool=True, nextViewImage: bool=True,\n outputColorManage: bool=True, panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", pcaption: Union[AnyStr, bool]=\"\", realSize:\n bool=True, refresh: bool=True, removeAllImages: bool=True, removeImage:\n bool=True, resetRegion: bool=True, resetViewImage: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectionConnection:\n Union[AnyStr, bool]=\"\", showRegion: Union[List[int, int], bool]=None,\n singleBuffer: bool=True, snapshot: Union[List[AnyStr, int, int],\n bool]=None, snapshotMode: bool=True, stateString: bool=True, stereo:\n Union[int, bool]=0, stereoImageOrientation: Union[List[AnyStr, AnyStr],\n bool]=None, stereoMode: Union[AnyStr, bool]=\"\", toggle: bool=True,\n unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", viewImageCount:\n Union[int, bool]=0, viewTransformName: Union[AnyStr, bool]=\"\",\n writeImage: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def ev_windowminimized(self, event: tcod.event.WindowEvent) -> T | None:", "def stackingWindows():\n space = 50\n offset = 70\n cv2.moveWindow(\"Original image\", space, space)\n cv2.moveWindow(\"Keypoints original\", space, hsize + space + offset)\n cv2.moveWindow(\"Color matched\", wsize + space, space)\n cv2.moveWindow(\"Keypoints Dark\", wsize + space, hsize + space + offset)", "def details_window(self, instance: Union[Nobleman, Location]):\n window = tk.Toplevel()\n window.title(instance.name)\n window.protocol(\"WM_DELETE_WINDOW\",\n partial(self.close_details_window, instance))\n self.register_extra_window(instance, window)\n self.generate_window_content(instance, window)", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def _getMayaWindow():\n\n ptr = OpenMayaUI.MQtUtil.mainWindow ()\n if ptr is not None:\n return wrapInstance (long (ptr), QMainWindow)", "def main_window():\n # case: Maya\n if pkgutil.find_loader(\"maya\"):\n for obj in QtWidgets.QApplication.topLevelWidgets():\n if obj.objectName() == \"MayaWindow\":\n return obj\n\n # case: Houdini\n if pkgutil.find_loader(\"hou\"):\n module = importlib.import_module(\"hou\")\n return getattr(module.qt, \"mainWindow\")()\n\n # case: Nuke\n if pkgutil.find_loader(\"nuke\"):\n for obj in QtWidgets.QApplication.topLevelWidgets():\n cls_name = obj.metaObject().className()\n inherits = obj.inherits(\"QMainWindow\")\n if inherits and cls_name == \"Foundry::UI::DockMainWindow\":\n return obj\n\n return None", "def automatic_window(self):\n \n #Create window and label\n automatic_window = tk.Toplevel(self)\n windowtext = self.translate('How many days do you want the simulation to run for?') \n automatic_window.title(windowtext)\n automatic_window.config(bg=self.default_background)\n lbl_text = tk.Label(automatic_window, text=windowtext,\n bg=self.default_background)\n lbl_text.grid(column=0, row=0)\n \n #Create input box\n self.auto_var = tk.IntVar()\n self.auto_var.set(1)\n auto_menu = tk.Entry(automatic_window)\n auto_menu.insert(0,0)\n auto_menu.configure(width=5)\n auto_menu.grid(column=0, row=1)\n\n #Create button to initate the simulation\n auto_run_button = tk.Button(automatic_window, text=self.translate('Run Simulation'), \n command = lambda: self.auto_run(automatic_window, int(auto_menu.get())),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n auto_run_button.grid(column=0, row=2)\n \n #Center the window on the screen\n automatic_window.withdraw()\n automatic_window.update_idletasks() # Update \"requested size\" from geometry manager\n x = (self.screenwidth - automatic_window.winfo_reqwidth()) / 2\n y = (self.screenheight - automatic_window.winfo_reqheight()) / 2\n automatic_window.geometry(\"+%d+%d\" % (x, y))\n automatic_window.deiconify()", "def get_train_windows(self, scene):\n pass", "def get_maya_window():\n\twindow = apiui.MQtUtil.mainWindow()\n\tif window is not None:\n\t\treturn shiboken2.wrapInstance(long(window), QtWidgets.QWidget)", "def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h=560)\n\n #######################\n #top bar layout\n #######################\n\n #rowlayout\n widgets[\"bannerFLO\"] = cmds.formLayout(w=1000, h=50, bgc=(.300,.3,.300))\n widgets[\"bannerImage\"] = cmds.image(image=\"{0}/banner_shotWin.png\".format(pi.images))\n widgets[\"spotImage\"] = cmds.iconTextButton(style=\"iconOnly\", image = \"{0}/defaultSpotImage.jpg\".format(pi.images), w=50, h=50, ann=ann[\"spotIcon\"], c=changeSpotIcon)\n widgets[\"projectText\"] = cmds.text(l=\"Project Name: Spot Name\", font = \"boldLabelFont\")\n widgets[\"sceneText\"] = cmds.text(l=\"Current Scene\") \n widgets[\"projectButton\"] = cmds.button(l=\"Change Job\", w = 100, h= 40, bgc= (.5,.5,.5), ann = ann[\"proj\"], c=setProject)\n widgets[\"refreshButton\"] = cmds.button(l=\"Refresh\", w = 60, h= 40, bgc= (.2,.2,.2), c = populateWindow)\n widgets[\"exploreButton\"] = cmds.button(l=\"Explore\\nReference\", w = 60, h= 40, bgc= (.7,.5,.3), c=exploreReference)\n\n cmds.formLayout(widgets[\"bannerFLO\"], e=True, af = [(widgets[\"bannerImage\"], \"top\", 0),\n (widgets[\"bannerImage\"], \"left\", 0),\n (widgets[\"projectText\"], \"left\", 400),\n (widgets[\"projectText\"], \"top\", 5),\n (widgets[\"sceneText\"], \"top\", 25),\n (widgets[\"spotImage\"], \"left\", 335), \n (widgets[\"sceneText\"], \"left\", 400),\n (widgets[\"projectButton\"], \"left\", 740),\n (widgets[\"projectButton\"], \"top\", 5),\n (widgets[\"refreshButton\"], \"left\", 850),\n (widgets[\"refreshButton\"], \"top\", 5),\n (widgets[\"exploreButton\"], \"left\", 920),\n (widgets[\"exploreButton\"], \"top\", 5), \n ])\n\n ######################\n #bottom layout\n ########################\n cmds.setParent(widgets[\"mainCLO\"])\n widgets[\"lowFLO\"] = cmds.formLayout()\n widgets[\"lowTLO\"] = cmds.tabLayout(bgc = (.2, .2, .2 ))\n\n ################\n #shots tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"shotsFLO\"] = cmds.formLayout(\"Shots - Anim, Light and FX\",w=1000, h=500, bgc = (.4,.4,.4))\n \n ##############\n #shot asset List layout\n ###############\n widgets[\"shotAssListCLO\"] = cmds.columnLayout(w=240, bgc = (.5, .5,.5))\n widgets[\"shotAssListFLO\"] = cmds.formLayout(w=240, h= 500)\n widgets[\"shotAssListTSL\"] = cmds.textScrollList(w=240, h=465, ams=True) \n\n widgets[\"shotAssListTitleText\"] = cmds.text(l=\"Referenced Assets In Current Scene\", font = \"boldLabelFont\", al=\"center\", ann=ann[\"reffedAssets\"])\n\n cmds.formLayout(widgets[\"shotAssListFLO\"], e=True, af = [\n (widgets[\"shotAssListTSL\"], \"top\", 35),\n (widgets[\"shotAssListTSL\"], \"left\", 0),\n \n (widgets[\"shotAssListTitleText\"], \"top\", 5),\n (widgets[\"shotAssListTitleText\"], \"left\", 20),\n ])\n\n ##############\n #shot List layout\n ###############\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotListCLO\"] = cmds.columnLayout(w=130, bgc = (.5, .5, .5))\n widgets[\"shotListFLO\"] = cmds.formLayout(w=130, h= 500)\n widgets[\"shotListTSL\"] = cmds.textScrollList(w=130, h=460)\n widgets[\"shotListTitleText\"] = cmds.text(l=\"Shot List\", font = \"boldLabelFont\", ann=ann[\"shotList\"])\n widgets[\"shotListCharText\"] = cmds.text(l=\"Shots\")\n\n cmds.formLayout(widgets[\"shotListFLO\"], e=True, af = [\n (widgets[\"shotListTSL\"], \"top\", 40), \n (widgets[\"shotListTSL\"], \"left\", 0),\n (widgets[\"shotListTitleText\"], \"top\", 5),\n (widgets[\"shotListTitleText\"], \"left\", 30),\n (widgets[\"shotListCharText\"], \"top\", 25),\n (widgets[\"shotListCharText\"], \"left\", 5),\n ])\n\n ##############\n #shot Status layout\n ############### \n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotInfoAssListTLO\"] = cmds.tabLayout(w=200, h=500)\n widgets[\"shotInfoFLO\"] = cmds.formLayout(\"ShotInfo\", w=200, h=500, bgc= (.5, .5, .5))\n widgets[\"shotInfoTitleText\"] = cmds.text(l=\"Shot Information\", font = \"boldLabelFont\")\n widgets[\"shotInfoNameText\"] = cmds.text(l=\"<Shot Name>\", font = \"boldLabelFont\", al=\"center\", w=200)\n widgets[\"shotInfoVariantText\"] = cmds.text(l=\"<Var Name>\", font = \"boldLabelFont\", al=\"center\", w=200) \n widgets[\"shotInfoPic\"] = cmds.image(image = \"{0}/kitten-photo-632-3.jpg\".format(pi.images), w= 154, h=154)\n widgets[\"shotAnnCB\"] = cmds.checkBox(l=\"Tooltips popups?\", value=tooltips, changeCommand=tooltipSet)\n\n cmds.formLayout(widgets[\"shotInfoFLO\"], e=True, af =[\n (widgets[\"shotInfoNameText\"], \"top\", 60),\n (widgets[\"shotInfoNameText\"], \"left\", 0),\n (widgets[\"shotInfoVariantText\"], \"top\", 80),\n (widgets[\"shotInfoVariantText\"], \"left\", 0), \n (widgets[\"shotInfoPic\"], \"top\", 110),\n (widgets[\"shotInfoPic\"], \"left\", 23),\n (widgets[\"shotInfoTitleText\"], \"top\", 5),\n (widgets[\"shotInfoTitleText\"], \"left\", 35),\n (widgets[\"shotAnnCB\"], \"top\", 420),\n (widgets[\"shotAnnCB\"], \"left\", 50), \n ])\n\n cmds.setParent(widgets[\"shotInfoAssListTLO\"])\n widgets[\"shotAssRigListTLO\"] = cmds.tabLayout(\"ProjAssets\", w=200, h=500) \n widgets[\"shotAssRigCharListCLO\"] = cmds.columnLayout(\"Chars\", w=200, h=500)\n widgets[\"shotAssRigCharListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigPropListCLO\"] = cmds.columnLayout(\"Props\", w=200, h=500)\n widgets[\"shotAssRigPropListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAssRigSetListCLO\"] = cmds.columnLayout(\"Sets\", w=200, h=500)\n widgets[\"shotAssRigSetListTSL\"] = cmds.textScrollList(w=200, h=450) \n cmds.setParent(widgets[\"shotAssRigListTLO\"])\n widgets[\"shotAnmMstListCLO\"] = cmds.columnLayout(\"Anm\", w=200, h=500)\n widgets[\"shotAnmMstListTSL\"] = cmds.textScrollList(w=200, h=450) \n ###############\n #Shot Action layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"shotActionFLO\"] = cmds.formLayout(w=150, h=500, bgc =(.5, .5, .5))\n widgets[\"shotActionRefAssBut\"] = cmds.button(l=\"-> Ref Asset In ->\", w=130, h=20, bgc = (.7,.7,.7), c=referenceAsset, ann=ann[\"refAsset\"]) \n widgets[\"shotActionReplaceBut\"] = cmds.button(l=\"-> Replace Reference ->\", w=130, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"replace\"], c=replaceReference)\n widgets[\"shotActionRefMultBut\"] = cmds.button(l=\"-> Ref Multiple ->\", w=100, h=20, en=True, bgc = (.7,.7,.7), ann=ann[\"refMult\"], c=referenceMultiple)\n widgets[\"shotActionRefMultIFG\"] = cmds.intFieldGrp(w=20, v1=1)\n widgets[\"shotActionReloadBut\"] = cmds.button(l=\"Reload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=reloadReference, ann=ann[\"reload\"])\n widgets[\"shotActionUnloadBut\"] = cmds.button(l=\"Unload Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=unloadReference, ann=ann[\"unload\"])\n widgets[\"shotActionRemoveBut\"] = cmds.button(l=\"Remove Reference ->\", w=130, h=20, bgc = (.7,.7,.7), c=removeReference, ann=ann[\"remove\"])\n widgets[\"shotActionQIncrBut\"] = cmds.button(l=\"Quick Increment\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=quickIncrement, ann=ann[\"qkIncr\"])\n widgets[\"shotActionNewShotBut\"] = cmds.button(l=\"Create new shot\", en=True, w=130, h=20, bgc = (.7,.7,.7), c=createNewShot, ann=ann[\"crtShot\"]) \n widgets[\"shotActionTitle\"] = cmds.text(l=\"Shot Actions\", font = \"boldLabelFont\")\n\n # create an embedded tab layout for each type of button!\n widgets[\"shotActionTypeTLO\"] = cmds.tabLayout(\"Specific Actions\", w=150, h=180, bgc=(.2,.2,.2))\n\n widgets[\"shotActionTypeAnmSLO\"] = cmds.scrollLayout(\"Anm\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeAnmFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .45, .4))\n widgets[\"shotActionExpAnimBut\"] = cmds.button(l=\"Export Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=exportAnimation, ann=ann[\"expAnim\"])\n widgets[\"shotActionImpAnimBut\"] = cmds.button(l=\"Import Anim ->\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=importAnimation, ann=ann[\"impAnim\"])\n widgets[\"shotActionRefToBut\"] = cmds.button(l=\"-> Reference To\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=referenceTo, ann=ann[\"refTo\"])\n widgets[\"shotActionCtrlMkBut\"] = cmds.button(l=\"Ctrl On Selection\", w=130, h=20, en=True, bgc=(.7,.7,.7), c=controlMaker, ann=ann[\"ctrlMk\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeLgtSLO\"] = cmds.scrollLayout(\"Lgt\", w=150, h=180, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeLgtFLO\"] = cmds.formLayout(w=150,h=240, bgc=(.4, .4, .45))\n widgets[\"shotActionGenericBut\"] = cmds.button(l=\"Render Setup\", w=130, h=20, en=True, bgc = (.7,.7,.7), c=renderSetup, ann=ann[\"rendGen\"])\n\n widgets[\"shotActionMtlBut\"] = cmds.button(l=\"-> Apply Mtl To Sel ->\", w=130, h=20, en=False, bgc = (.7,.7,.7), ann=ann[\"mtlApply\"])\n\n cmds.setParent(widgets[\"shotActionTypeTLO\"])\n widgets[\"shotActionTypeFxSLO\"] = cmds.scrollLayout(\"Fx\", w=150, h=240, verticalScrollBarThickness=5) \n widgets[\"shotActionTypeFxFLO\"] = cmds.formLayout(w=150,h=180, bgc=(.45, .4, .4))\n \n\n#---------------- add any fx buttons here and then postion them below \n\n cmds.formLayout(widgets[\"shotActionTypeLgtFLO\"], e=True, af = [\n (widgets[\"shotActionGenericBut\"], \"top\", 10),\n (widgets[\"shotActionGenericBut\"], \"left\", 2),\n (widgets[\"shotActionMtlBut\"], \"top\", 40),\n (widgets[\"shotActionMtlBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionTypeAnmFLO\"], e=True, af = [\n (widgets[\"shotActionExpAnimBut\"], \"top\", 10),\n (widgets[\"shotActionExpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionImpAnimBut\"], \"top\", 40),\n (widgets[\"shotActionImpAnimBut\"], \"left\", 2),\n (widgets[\"shotActionRefToBut\"], \"top\", 70),\n (widgets[\"shotActionRefToBut\"], \"left\", 2),\n (widgets[\"shotActionCtrlMkBut\"], \"top\", 100),\n (widgets[\"shotActionCtrlMkBut\"], \"left\", 2) \n ])\n\n cmds.formLayout(widgets[\"shotActionFLO\"], e=True, af = [\n (widgets[\"shotActionTitle\"], \"top\", 5),\n (widgets[\"shotActionTitle\"], \"left\", 35),\n (widgets[\"shotActionRefAssBut\"], \"top\", 30),\n (widgets[\"shotActionRefAssBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultBut\"], \"top\", 60),\n (widgets[\"shotActionRefMultBut\"], \"left\", 10),\n (widgets[\"shotActionRefMultIFG\"], \"top\", 60),\n (widgets[\"shotActionRefMultIFG\"], \"left\", 110),\n (widgets[\"shotActionReloadBut\"], \"top\", 90),\n (widgets[\"shotActionReloadBut\"], \"left\", 10),\n (widgets[\"shotActionUnloadBut\"], \"top\", 120),\n (widgets[\"shotActionUnloadBut\"], \"left\", 10),\n (widgets[\"shotActionRemoveBut\"], \"top\", 150),\n (widgets[\"shotActionRemoveBut\"], \"left\", 10),\n (widgets[\"shotActionReplaceBut\"], \"top\", 180),\n (widgets[\"shotActionReplaceBut\"], \"left\", 10),\n (widgets[\"shotActionQIncrBut\"], \"top\", 210),\n (widgets[\"shotActionQIncrBut\"], \"left\", 10),\n (widgets[\"shotActionTypeTLO\"], \"top\", 270),\n (widgets[\"shotActionTypeTLO\"], \"left\", 0), \n (widgets[\"shotActionNewShotBut\"], \"top\", 470),\n (widgets[\"shotActionNewShotBut\"], \"left\", 10), \n ])\n\n ###############\n #Shot anmLgt tab layout\n ################\n cmds.setParent(widgets[\"shotsFLO\"])\n widgets[\"anmLgtFLO\"] = cmds.formLayout(w=250, h=500, bgc = (.4, .4, .4))\n widgets[\"anmLgtTLO\"] = cmds.tabLayout(w=250, h=500, bgc = (.4,.4,.4), changeCommand = varTabChange)\n ###############\n #shot anm tab layout\n ###############\n widgets[\"anmTabCLO\"] = cmds.columnLayout(\"ANM\", w=250, bgc = (.4, .45, .4))\n #################\n #anm info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"anmVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"anmLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"anmLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n cmds.separator(h=5)\n\n #################\n #anm 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmWSFLO\"] = cmds.frameLayout(\"Animation Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"anmWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.45,.4))\n\n widgets[\"anmWSOpenBut\"] = cmds.button(l=\"Open Latest\\nAnim\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"anmWSIncrBut\"] = cmds.button(l=\"Increment Anim Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), ann=ann[\"incrWS\"], c = partial(incrementWorkshop, \"anm\"))\n widgets[\"anmWSPrevBut\"] = cmds.button(l=\"Previous Anim Workshops\", w=160, bgc = (.7,.7,.7), en=False, ann=ann[\"prevWS\"])\n widgets[\"anmWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"anmWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"anm\"), ann=ann[\"crtVariant\"])\n widgets[\"anmVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, bgc = (.7,.7,.7), en=False, c=createShotIcon, ann=ann[\"crtIcon\"]) \n\n cmds.formLayout(widgets[\"anmWSFoLO\"], e=True, af = [\n (widgets[\"anmWSOpenBut\"], \"left\", 5),\n (widgets[\"anmWSOpenBut\"], \"top\", 10),\n (widgets[\"anmWSIncrBut\"], \"left\", 80),\n (widgets[\"anmWSIncrBut\"], \"top\", 10),\n (widgets[\"anmWSInfoBut\"], \"left\", 5),\n (widgets[\"anmWSInfoBut\"], \"top\", 65),\n (widgets[\"anmWSPrevBut\"], \"left\", 80),\n (widgets[\"anmWSPrevBut\"], \"top\", 65),\n (widgets[\"anmWSNewVarBut\"], \"left\", 5),\n (widgets[\"anmWSNewVarBut\"], \"top\", 105),\n (widgets[\"anmVarIconBut\"], \"left\", 170),\n (widgets[\"anmVarIconBut\"], \"top\", 105), \n ])\n #################\n #anm 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"anmTabCLO\"])\n widgets[\"anmMstFLO\"] = cmds.frameLayout(\"Animation Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"anmMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.45,.4))\n widgets[\"anmMstOpenBut\"] = cmds.button(l=\"Open Anim\\nMaster\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"anmMstIncrBut\"] = cmds.button(l=\"Publish Anim Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"anmMstBgIncrBut\"] = cmds.button(l=\"BG Publish Anim Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"])\n widgets[\"anmMstPrevBut\"] = cmds.button(l=\"Previous Anim Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"anmMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"])\n\n\n \n cmds.formLayout(widgets[\"anmMstFoLO\"], e=True, af = [\n (widgets[\"anmMstOpenBut\"], \"left\", 5),\n (widgets[\"anmMstOpenBut\"], \"top\", 10),\n (widgets[\"anmMstIncrBut\"], \"left\", 80),\n (widgets[\"anmMstIncrBut\"], \"top\", 10),\n (widgets[\"anmMstBgIncrBut\"], \"left\", 5),\n (widgets[\"anmMstBgIncrBut\"], \"top\", 65), \n (widgets[\"anmMstInfoBut\"], \"left\", 5),\n (widgets[\"anmMstInfoBut\"], \"top\", 95), \n (widgets[\"anmMstPrevBut\"], \"left\", 80),\n (widgets[\"anmMstPrevBut\"], \"top\", 95), \n \n ])\n ###############\n #shot Lgt tab layout\n ################ \n cmds.setParent(widgets[\"anmLgtTLO\"]) \n widgets[\"lgtTabCLO\"] = cmds.columnLayout(\"LGT\", w=250, bgc = (.4,.4,.45))\n #################\n #lgt info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"lgtVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"lgtLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"lgtLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtWSFLO\"] = cmds.frameLayout(\"Lighting Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"lgtWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.4,.4,.45))\n\n widgets[\"lgtWSOpenBut\"] = cmds.button(l=\"Open Latest\\nLight\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"lgtWSIncrBut\"] = cmds.button(l=\"Increment Light Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"lgt\"), ann=ann[\"incrWS\"])\n widgets[\"lgtWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"])\n widgets[\"lgtWSPrevBut\"] = cmds.button(l=\"Previous Light Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"lgtWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"lgt\"), ann=ann[\"crtVariant\"]) \n widgets[\"lgtVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"])\n\n cmds.formLayout(widgets[\"lgtWSFoLO\"], e=True, af = [\n (widgets[\"lgtWSOpenBut\"], \"left\", 5),\n (widgets[\"lgtWSOpenBut\"], \"top\", 10),\n (widgets[\"lgtWSIncrBut\"], \"left\", 80),\n (widgets[\"lgtWSIncrBut\"], \"top\", 10),\n (widgets[\"lgtWSInfoBut\"], \"left\", 5),\n (widgets[\"lgtWSInfoBut\"], \"top\", 65),\n (widgets[\"lgtWSPrevBut\"], \"left\", 80),\n (widgets[\"lgtWSPrevBut\"], \"top\", 65),\n (widgets[\"lgtWSNewVarBut\"], \"left\", 5),\n (widgets[\"lgtWSNewVarBut\"], \"top\", 105),\n (widgets[\"lgtVarIconBut\"], \"left\", 170),\n (widgets[\"lgtVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"lgtTabCLO\"])\n widgets[\"lgtMstFLO\"] = cmds.frameLayout(\"Lighting Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"lgtMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.4,.4,.45))\n widgets[\"lgtMstOpenBut\"] = cmds.button(l=\"Open\\nLight Master\", w=70, h=50, en=True, bgc = (.5,.7,.5), c=partial(openShotMaster, \"lgt\"), ann=ann[\"openMst\"])\n widgets[\"lgtMstIncrBut\"] = cmds.button(l=\"Publish Light Master\\n(Keep Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"lgtMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"lgtMstPrevBut\"] = cmds.button(l=\"Previous Light Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"lgtMstBgIncrBut\"] = cmds.button(l=\" BG Publish Light Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"lgtMstFoLO\"], e=True, af = [\n (widgets[\"lgtMstOpenBut\"], \"left\", 5),\n (widgets[\"lgtMstOpenBut\"], \"top\", 10),\n (widgets[\"lgtMstIncrBut\"], \"left\", 80),\n (widgets[\"lgtMstIncrBut\"], \"top\", 10),\n (widgets[\"lgtMstBgIncrBut\"], \"left\", 5),\n (widgets[\"lgtMstBgIncrBut\"], \"top\", 65), \n (widgets[\"lgtMstInfoBut\"], \"left\", 5),\n (widgets[\"lgtMstInfoBut\"], \"top\", 95),\n (widgets[\"lgtMstPrevBut\"], \"left\", 80),\n (widgets[\"lgtMstPrevBut\"], \"top\", 95),\n \n ]) \n\n ###############\n #shot anm tab layout\n ###############\n cmds.setParent(widgets[\"anmLgtTLO\"])\n widgets[\"fxTabCLO\"] = cmds.columnLayout(\"FX\", w=250, bgc = (.45, .4, .4))\n #################\n #fx info frame and column layouts\n ################# \n cmds.separator(h=5)\n widgets[\"fxVariationsTSL\"] = cmds.textScrollList(w=250, h=90)\n widgets[\"fxLastWSTFG\"] = cmds.textFieldGrp(l=\"Latest WS: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False)\n widgets[\"fxLastMasterTFG\"] = cmds.textFieldGrp(l=\"Master: \", w=250, cw = [(1, 70), (2,170)], cal = [(1,\"left\"), (2, \"left\")],ed=False) \n cmds.separator(h=5)\n #################\n #lgt 'workshop' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxWSFLO\"] = cmds.frameLayout(\"FX Workshop\", w=250, h=165, bgc= (.3, .3, .3))\n widgets[\"fxWSFoLO\"] = cmds.formLayout(w=250, h=165, bgc = (.45,.4,.4))\n\n widgets[\"fxWSOpenBut\"] = cmds.button(l=\"Open Latest\\nFX\\nWorkshop\", w=70, h=50, en=False, bgc = (.4,.5,.8), ann=ann[\"openWS\"])\n widgets[\"fxWSIncrBut\"] = cmds.button(l=\"Increment FX Workshop\", w=160, h=50, en=True, bgc = (.7,.6,.4), c = partial(incrementWorkshop, \"fx\"), ann=ann[\"incrWS\"])\n widgets[\"fxWSInfoBut\"] = cmds.button(l=\"WS Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"WSInfo\"]) \n widgets[\"fxWSPrevBut\"] = cmds.button(l=\"Previous FX Workshops\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevWS\"])\n widgets[\"fxWSNewVarBut\"] = cmds.button(l=\"Create New Variant\", w=160, h=30, bgc = (.2,.2,.2), c=partial(createVariant, \"fx\"), ann=ann[\"crtVariant\"])\n widgets[\"fxVarIconBut\"] = cmds.button(l=\"Create Var\\nIcon\", w=70, h=30, en=False, bgc = (.7,.7,.7), c=createShotIcon, ann=ann[\"crtIcon\"]) \n \n cmds.formLayout(widgets[\"fxWSFoLO\"], e=True, af = [\n (widgets[\"fxWSOpenBut\"], \"left\", 5),\n (widgets[\"fxWSOpenBut\"], \"top\", 10),\n (widgets[\"fxWSIncrBut\"], \"left\", 80),\n (widgets[\"fxWSIncrBut\"], \"top\", 10),\n (widgets[\"fxWSInfoBut\"], \"left\", 5),\n (widgets[\"fxWSInfoBut\"], \"top\", 65),\n (widgets[\"fxWSPrevBut\"], \"left\", 80),\n (widgets[\"fxWSPrevBut\"], \"top\", 65),\n (widgets[\"fxWSNewVarBut\"], \"left\", 5),\n (widgets[\"fxWSNewVarBut\"], \"top\", 105),\n (widgets[\"fxVarIconBut\"], \"left\", 170),\n (widgets[\"fxVarIconBut\"], \"top\", 105), \n ]) \n #################\n #lgt 'master' frame and column layouts\n #################\n cmds.setParent(widgets[\"fxTabCLO\"])\n widgets[\"fxMstFLO\"] = cmds.frameLayout(\"FX Master\", w=250, h=200, bgc= (.3, .3, .3))\n widgets[\"fxMstFoLO\"] = cmds.formLayout(w=250, h=200, bgc = (.45,.4,.4))\n widgets[\"fxMstOpenBut\"] = cmds.button(l=\"Open\\nFX Master\", w=70, h=50, en=False, bgc = (.5,.7,.5), ann=ann[\"openMst\"])\n widgets[\"fxMstIncrBut\"] = cmds.button(l=\"Publish FX Master\\n(Import Refs)\", w=160, h=50, en=False, bgc = (.7,.5,.5), ann=ann[\"pubRefMst\"])\n widgets[\"fxMstInfoBut\"] = cmds.button(l=\"Mst Info\", w=70, bgc = (.7, .7, .7), en=False, ann=ann[\"MstInfo\"]) \n widgets[\"fxMstPrevBut\"] = cmds.button(l=\"Previous FX Masters\", w=160, en=False, bgc = (.7,.7,.7), ann=ann[\"prevMst\"])\n widgets[\"fxMstBgIncrBut\"] = cmds.button(l=\" BG Publish FX Master (Import Refs)\", w=235, en=False, bgc = (.3,.3,.3), ann=ann[\"pubBGMst\"]) \n\n cmds.formLayout(widgets[\"fxMstFoLO\"], e=True, af = [\n (widgets[\"fxMstOpenBut\"], \"left\", 5),\n (widgets[\"fxMstOpenBut\"], \"top\", 10),\n (widgets[\"fxMstIncrBut\"], \"left\", 80),\n (widgets[\"fxMstIncrBut\"], \"top\", 10),\n (widgets[\"fxMstBgIncrBut\"], \"left\", 5),\n (widgets[\"fxMstBgIncrBut\"], \"top\", 65), \n (widgets[\"fxMstInfoBut\"], \"left\", 5),\n (widgets[\"fxMstInfoBut\"], \"top\", 95),\n (widgets[\"fxMstPrevBut\"], \"left\", 80),\n (widgets[\"fxMstPrevBut\"], \"top\", 95),\n \n ]) \n\n\n cmds.setParent(widgets[\"anmLgtFLO\"])\n widgets[\"anmLgtTitleText\"] = cmds.text(l=\"Variant Files\", font = \"boldLabelFont\", ann=ann[\"varFile\"]) \n\n cmds.formLayout(widgets[\"anmLgtFLO\"], e=True, af = [(widgets[\"anmLgtTitleText\"], \"top\", 5), (widgets[\"anmLgtTitleText\"], \"left\", 135)])\n\n ###################\n # - -- Shot Tab form setup\n ##################\n cmds.formLayout(widgets[\"shotsFLO\"], e=True, af = [\n (widgets[\"shotListCLO\"], \"left\", 0),\n (widgets[\"shotListCLO\"], \"top\", 0),\n (widgets[\"anmLgtFLO\"], \"left\", 134),\n (widgets[\"anmLgtFLO\"], \"top\", 0), \n (widgets[\"shotInfoAssListTLO\"], \"left\", 387),\n (widgets[\"shotInfoAssListTLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"top\", 0),\n (widgets[\"shotActionFLO\"], \"left\", 594),\n (widgets[\"shotAssListCLO\"], \"top\", 0),\n (widgets[\"shotAssListCLO\"], \"left\", 752)\n ])\n\n ################\n #Misc tab\n ################\n cmds.setParent(widgets[\"lowTLO\"])\n widgets[\"miscFLO\"] = cmds.formLayout(\"Other Shot Tools\",width=1000, height=500, backgroundColor = (.4,.4,.4))\n\n widgets[\"animationTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .35, .3))\n widgets[\"animationRCLO\"] = cmds.rowColumnLayout(\"animation\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"lightingTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.3, .32, .35))\n widgets[\"lightingRCLO\"] = cmds.rowColumnLayout(\"lighting\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5]) \n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"fxTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.35, .3, .3))\n widgets[\"fxRCLO\"] = cmds.rowColumnLayout(\"fx\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.setParent(widgets[\"miscFLO\"])\n widgets[\"charlexTLO\"] = cmds.tabLayout(width=500, height=250, backgroundColor = (.55, .55, .55))\n widgets[\"charlexRCLO\"] = cmds.rowColumnLayout(\"charlex_general\", numberOfColumns = 4, columnSpacing=[(1, 0), (2,5), (3,5), (4,5)], rowSpacing=[1,5])\n\n cmds.formLayout(widgets[\"miscFLO\"], e=True, af=[\n (widgets[\"charlexTLO\"], \"top\", 0),\n (widgets[\"charlexTLO\"], \"left\", 0),\n (widgets[\"animationTLO\"], \"top\", 0),\n (widgets[\"animationTLO\"], \"left\", 500),\n (widgets[\"lightingTLO\"], \"top\", 250),\n (widgets[\"lightingTLO\"], \"left\", 0),\n (widgets[\"fxTLO\"], \"top\", 250),\n (widgets[\"fxTLO\"], \"left\", 500) \n ])\n\n # get the dictionary of scripts, calls and annotations from the database\n dbPath =os.path.join(os.getenv(\"MAYA_ROOT\"), \"scripts\", \"chrlx_pipe\", \"chrlxScriptList.json\")\n with open(dbPath, \"r\") as f:\n scriptList = json.load(f)\n\n # populate the row column layouts with buttons and funcs from the database\n btl.buttonsToLayout(widgets[\"animationRCLO\"], scriptList[\"shot\"][\"animation\"], width=117, height=40, color=(.38, .3, .38))\n btl.buttonsToLayout(widgets[\"lightingRCLO\"], scriptList[\"shot\"][\"lighting\"], width=117, height=40, color=(.37,.34, .3))\n btl.buttonsToLayout(widgets[\"fxRCLO\"], scriptList[\"shot\"][\"fx\"], width=117, height=40, color=(.35, .3, .3))\n btl.buttonsToLayout(widgets[\"charlexRCLO\"], scriptList[\"shot\"][\"charlex\"], width=117, height=40, color=(.3, .3, .3))\n\n # widgets[\"miscCLO\"] = cmds.columnLayout(\"Other Pipeline Tools\",w=1000, h=500, bgc = (.4,.4,.4))\n # cmds.text(l=\"------ANIM STUFF-------\")\n # cmds.text(l=\"TODO - export cam(s) for nuke, etc\")\n # cmds.text(l=\"TODO - create a new prop from selected geo (propify)\") \n # cmds.text(l=\"TODO - blasting, rendering stuff?\")\n # cmds.text(l=\"TODO - export data (text file of scene locations?)\")\n # cmds.text(l=\"TODO - create render cam? Should this be in the main anim increment? (probably both)\")\n\n # cmds.text(l=\"------LGT STUFF--------\")\n # cmds.text(l=\"TODO - set up current scene for maxwell, arnold\")\n # cmds.text(l=\"TODO - convert an external image to icon (char or project)\")\n # cmds.text(l=\"TODO - revert ['ROLL BACK'] to master version? (replaces master and grabs that workshop\")\n # cmds.text(l=\"TODO - function to add your folder to the WIP folder in this project - save current to WIP folder\")\n # cmds.text(l=\"TODO - explore various frame (render) folders in explorer\")\n # cmds.text(l=\"TODO - various preset light setups/rigs? \")\n\n\n ######################\n #show window\n ######################\n cmds.window(widgets[\"win\"], e=True, w=1000, h=580)\n cmds.showWindow(widgets[\"win\"])\n\n #start us off\n populateWindow()", "def activeWindow(self):\n raise RuntimeError('Not implemented')\n \n return None # __IGNORE_WARNING_M831__", "def window(*args, width: int = 200, height: int = 200, autosize: bool = False,\n no_resize: bool = False, no_title_bar: bool = False, no_move: bool = False, no_scrollbar: bool = False,\n no_collapse: bool = False, horizontal_scrollbar: bool = False, no_focus_on_appearing: bool = False,\n no_bring_to_front_on_focus: bool = False, menubar: bool = False, no_close: bool = False,\n no_background: bool = False, label: str = '', show: bool = True, collapsed: bool = False,\n modal: bool = False, popup: bool = False,\n on_close: Callable = None, min_size: List[int]=[32, 32], max_size: List[int] = [30000, 30000], id:str=''):\n try:\n\n widget = internal_dpg.add_window(*args, width=width, height=height, autosize=autosize,\n no_resize=no_resize, no_title_bar=no_title_bar, no_move=no_move,\n no_scrollbar=no_scrollbar, no_collapse=no_collapse,\n horizontal_scrollbar=horizontal_scrollbar,\n no_focus_on_appearing=no_focus_on_appearing,\n no_bring_to_front_on_focus=no_bring_to_front_on_focus,\n menubar=menubar, no_close=no_close,\n no_background=no_background, label=label, show=show, \n collapsed=collapsed, on_close=on_close,\n min_size=min_size, max_size=max_size, id=id, modal=modal,\n popup=popup)\n internal_dpg.push_container_stack(widget)\n yield widget\n\n finally:\n internal_dpg.pop_container_stack()", "def size_with_window(self):\n return self.container['size_with_window']", "def gui(self):\n return gui", "def window(windowX, windowY, occurrency):\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0", "def current_swing_mode(self):\n return None", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def menu_screen(win):\n\tpass", "def do_standalone_display(self):\n stage = clutter.Stage()\n stage.connect('destroy', clutter.main_quit)\n stage.connect('key-press-event', lambda x,y: clutter.main_quit())\n stage.set_fullscreen(True)\n stage.set_color(clutter.color_from_string('black'))\n stage.add(self.group)\n stage.show_all()\n clutter.main()", "def window(self) -> Optional[pulumi.Input['TimeWindowArgs']]:\n return pulumi.get(self, \"window\")", "def TransferToWindow(self):\n return True", "def rendererWindowActivated(self, sw):\n pass", "def window_handles(self):\n pass", "def _main_window(app=HOST):\n\tif app in ['standalone', 'generic']:\n\t\treturn None\n\n\t# Blender (not Qt)\n\t# Requires bqt - see https://github.com/techartorg/bqt\n\telif app == 'blender':\n\t\tobj = QtWidgets.QApplication.instance().blender_widget\n\t\tif obj:\n\t\t\treturn obj\n\n\t# Clarisse (not Qt)\n\t# elif app == 'clarisse':\n\t# \tfor obj in QtWidgets.QApplication.topLevelWidgets():\n\t# \t\tif obj.inherits('QMainWindow'):\n\t# \t\t\treturn obj\n\n\t# Houdini\n\telif app == 'houdini':\n\t\tobj = hou.qt.mainWindow()\n\t\tif obj:\n\t\t\treturn obj\n\n\t# Maya\n\telif app == 'maya':\n\t\tfor obj in QtWidgets.QApplication.topLevelWidgets():\n\t\t\tif obj.objectName() == 'MayaWindow':\n\t\t\t\treturn obj\n\n\t# Max\n\telif app == 'max':\n\t\t# obj = QtWidgets.QWidget.find(runtime.windows.getMAXHWND()) # seems unstable?\n\t\tobj = qtmax.GetQMaxMainWindow()\n\t\tif obj:\n\t\t\treturn obj\n\n\t# Nuke\n\telif app == 'nuke':\n\t\tfor obj in QtWidgets.QApplication.topLevelWidgets():\n\t\t\tif (obj.inherits('QMainWindow') and obj.metaObject().className() == 'Foundry::UI::DockMainWindow'):\n\t\t\t\treturn obj\n\n\telse:\n\t\t# raise RuntimeError(\"Could not find %s's main window instance\" % app)\n\t\t# verbose.warning(\"Could not find %s's main window instance\" % app)\n\t\tprint(\"Could not find %s's main window instance\" % app)\n\t\treturn None", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def ev_windowshown(self, event: WindowEvent) -> None:", "def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)", "def getWidgetClass(self):\n\t\treturn AbstraccionWindowWidget", "def createWindow(self):\n\n # create window, set basic attributes\n w = gtk.Window(gtk.WINDOW_TOPLEVEL)\n w.set_size_request(*self.__def_win_size__)\n w.set_decorated(False)\n #w.fullscreen()\n #w.unfullscreen()\n w.set_title(self.__name__)\n w.connect(\"destroy\", gtk.main_quit)\n\n # declare buttons and their associated handlers\n controls = (\n (\"open_button\", gtk.ToolButton(gtk.STOCK_OPEN), self.onPlay),\n (\"play_button\", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),\n (\"stop_button\", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),\n (\"quit_button\", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)\n )\n\n # as well as the container in which to put them\n box = gtk.HButtonBox()\n\n # for every widget, connect to its clicked signal and add it\n # to the enclosing box\n for name, widget, handler in controls:\n widget.connect(\"clicked\", handler)\n box.pack_start(widget, True)\n setattr(self, name, widget)\n\n viewer = gtk.DrawingArea()\n viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)\n\n # we will need this later\n self.xid = None\n\n # now finally do the top-level layout for the window\n layout = gtk.VBox(False)\n layout.pack_start(viewer)\n\n # subclasses can override childWidgets() to supply\n # custom controls\n layout.pack_start(self.customWidgets(), False, False)\n layout.pack_end(box, False, False)\n w.add(layout)\n w.show_all()\n\n # we want to return only the portion of the window which will\n # be used to display the video, not the whole top-level\n # window. a DrawingArea widget is, in fact, an X11 window.\n return viewer", "def __window_print(self):\n pass", "def get_window_info (self):\n \n # g.trace(self.w,self.h,self.x,self.y)\n \n return self.w,self.h,self.x,self.y", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return shiboken2.wrapInstance(long(main_window), PySide2.QtWidgets.QWidget)", "def pyvista_render_window():\n from pyvista import examples\n globe = examples.load_globe() #add texture\n pl = pv.Plotter()\n pl.add_mesh(globe)\n sphere = pv.Sphere()\n scalars=sphere.points[:, 2]\n sphere._add_point_array(scalars, 'test', set_active=True) #allow to test scalars\n pl.add_mesh(sphere)\n return pl.ren_win", "def getRenWin(self):\n return self.renWinInteract.GetRenderWindow()", "def show(self):\n # This function has to be placed here (and not in the user.py script)\n self.showMaximized()\n visapp.run()", "def __window_focus(self):\n pass", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def build_window(self):\n\n main_frame = tk.Frame(self.root)\n main_frame.pack(fill='both')\n\n self.open_machine_learner_window_button = tk.Button(main_frame, text=\"Open Machine Learner\")\n self.open_machine_learner_window_button.bind('<Button-1>', self.open_machine_learner_window)\n self.open_machine_learner_window_button.pack(side=\"left\")\n\n self.open_web_crawler_window_button = tk.Button(main_frame, text=\"Open Web Crawler\")\n self.open_web_crawler_window_button.bind('<Button-1>', self.open_web_crawler_window)\n self.open_web_crawler_window_button.pack(side=\"left\")\n\n self.open_webpage_classifier_window_button = tk.Button(main_frame, text=\"Open WebPage Classifier\")\n self.open_webpage_classifier_window_button.bind('<Button-1>', self.open_webpage_classifier_window)\n self.open_webpage_classifier_window_button.pack(side=\"left\")\n\n self.run_steady_state_genetic_button = tk.Button(main_frame, text=\"Run Steady State\")\n self.run_steady_state_genetic_button.bind('<Button-1>', self.run_steady_state)\n self.run_steady_state_genetic_button.pack(side=\"left\")\n\n # Protocol for closing window using 'x' button\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing_event)", "def window(*args, backgroundColor: List[float, float, float]=None, closeCommand: Script=None,\n defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dockCorner:\n Union[List[AnyStr, AnyStr], List[List[AnyStr, AnyStr]]]=None, dockStation: bool=True,\n dockingLayout: Union[AnyStr, bool]=\"\", exists: bool=True, frontWindow: bool=True,\n height: Union[int, bool]=0, iconName: Union[AnyStr, bool]=\"\", iconify: bool=True,\n interactivePlacement: bool=True, leftEdge: Union[int, bool]=0, mainMenuBar:\n bool=True, mainWindow: bool=True, maximizeButton: bool=True, menuArray: bool=True,\n menuBar: bool=True, menuBarCornerWidget: Union[List[AnyStr, AnyStr], bool]=None,\n menuBarResize: bool=True, menuBarVisible: bool=True, menuIndex: List[AnyStr,\n int]=None, minimizeButton: bool=True, minimizeCommand: Script=None,\n nestedDockingEnabled: bool=True, numberOfMenus: bool=True, parent: AnyStr=\"\",\n resizeToFitChildren: bool=True, restoreCommand: Script=None, retain: bool=True,\n sizeable: bool=True, state: Union[AnyStr, bool]=\"\", title: Union[AnyStr, bool]=\"\",\n titleBar: bool=True, titleBarMenu: bool=True, toolbox: bool=True, topEdge: Union[int,\n bool]=0, topLeftCorner: Union[List[int, int], bool]=None, useTemplate: AnyStr=\"\",\n visible: bool=True, width: Union[int, bool]=0, widthHeight: Union[List[int, int],\n bool]=None, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def get_window(self):\n\n if not self.window:\n self.window_manager = window.WindowManager(self)\n self.window = self.window_manager.window\n\n return self.window", "def update_edit_fit_window(self):\n model = self._get_selected_model()\n try:\n window = float(self.edit_fit_window.text())\n except:\n return None\n else:\n model.metadata[\"window\"] = window\n return None", "def create_window(self, img, roi, name):\n\n self.window = SpinBalanceDialog()\n\n # call the user-implemented functionality\n self.window.main(img, roi)\n # show the window\n self.window.show()\n\n return self.window", "def create_new_window():\n logging.debug(\"Function create_new_window() called\")\n\n new_window = tk.Toplevel()\n new_window.title(\"Test functions\")\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_origins()\",\n command=lambda: dss.update_origins(\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n ),\n )\n ButtonCal.grid(row=1, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_classes()\",\n command=lambda: dss.update_classes(\n class_list_=class_list,\n champions_list_=champions_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=2, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_classes_and_origins()\",\n command=lambda: dss.update_classes_and_origins(\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=3, column=0)\n\n # is_in_game = tk.IntVar()\n # dss.create_gui_counter_with_plus_minus(window_tk=new_window, origin_index=1, counter=is_in_game, shift_between_upside_downside=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_champions_to_buy_from_ocr_detection()\",\n command=lambda: dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=4, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_from_ocr()\",\n command=lambda: dss.show_nonzero_counters_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n ),\n )\n ButtonCal.grid(row=5, column=0)\n\n Labeling = tk.Label(\n new_window, text=\"Care additional points in below\", font=BOLDED_FONT\n )\n Labeling.grid(row=6, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_points_for_nonzero_counters_from_ocr()\",\n command=lambda: dss.show_points_for_nonzero_counters_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n ),\n )\n ButtonCal.grid(row=7, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_with_points_from_ocr() OCR button\",\n command=lambda: dss.show_nonzero_counters_with_points_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n origin_list_=origin_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=8, column=0)\n\n Labeling = tk.Label(new_window, text=\"with Game\", font=BOLDED_FONT)\n Labeling.grid(row=0, column=1)\n\n ButtonCal = tk.Button(\n new_window, text=\"update_curent_ss()\", command=lambda: dss.update_curent_ss()\n )\n ButtonCal.grid(row=1, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_curent_cropped_ss_with_champions()\",\n command=lambda: dss.update_curent_cropped_ss_with_champions(),\n )\n ButtonCal.grid(row=2, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_ocr_results_champions()\",\n command=lambda: dss.update_ocr_results_champions(\n cropped_ss_with_champion_card_names=dss.crop_img,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=3, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_sorted_champions_to_buy()\",\n command=lambda: dss.update_sorted_champions_to_buy(\n ocr_results_sorted=dss.ocr_results_champions,\n champions_list_for_ocr_=champions_list_for_ocr,\n ),\n )\n ButtonCal.grid(row=4, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_champions_to_buy_from_ocr_detection()\",\n command=lambda: dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=5, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"draw_rectangles_show_points_show_buttons_reset_counters() scan&go\",\n command=lambda: dss.draw_rectangles_show_points_show_buttons_reset_counters(\n rgb_colours_list_=rgb_colours_list,\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr_=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n df_=df,\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n round_counter=CounterOcrResultsRound,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=6, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(4)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=4,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=0, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(5)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=5,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=1, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(6)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=6,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=2, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"filling_list_with_counter_for_namedtuple(7)\",\n command=lambda: dss.filling_list_with_counter_for_namedtuple(\n field_to_check=7,\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=3, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"append_counters_to_input_list(champion_info)\",\n command=lambda: dss.append_counters_to_input_list(\n input_list=champion_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=4, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"append_counters_to_input_list(champion_to_buy_info)\",\n command=lambda: dss.append_counters_to_input_list(\n input_list=champion_to_buy_info,\n origin_list_=origin_list,\n class_list_=class_list,\n origin_counters_=origin_counters,\n class_counters_=class_counters,\n df_=df,\n ),\n )\n ButtonCal.grid(row=5, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"calculate_card_position_on_screen(2)\",\n command=lambda: dss.calculate_card_position_on_screen(\n card_index=2,\n X_FIRST_CHAMPION_CARD_=dss.X_FIRST_CHAMPION_CARD,\n PADDING_BETWEEN_CHAMPION_CARDS_=dss.PADDING_BETWEEN_CHAMPION_CARDS,\n W_CHAMPION_CARD_=dss.W_CHAMPION_CARD,\n ),\n )\n ButtonCal.grid(row=0, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"build_list_of_champion_cards_rectangles()\",\n command=lambda: dss.build_list_of_champion_cards_rectangles(\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n Y_FIRST_CHAMPION_CARD_=dss.Y_FIRST_CHAMPION_CARD,\n W_CHAMPION_CARD_=dss.W_CHAMPION_CARD,\n H_CHAMPION_CARD_=dss.H_CHAMPION_CARD,\n ),\n )\n ButtonCal.grid(row=1, column=3)\n\n Labeling = tk.Label(\n new_window, text=\"Another cases below this row\", font=BOLDED_FONT\n )\n Labeling.grid(row=9, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"check_nonzero_counters()\",\n command=lambda: dss.check_nonzero_counters(\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n ),\n )\n ButtonCal.grid(row=10, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters()\",\n command=lambda: dss.show_nonzero_counters(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n row_offset=0,\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n SHIFT_BETWEEN_ORIGINS_=dss.SHIFT_BETWEEN_ORIGINS,\n ),\n )\n ButtonCal.grid(row=11, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_classes_and_origins()\",\n command=lambda: dss.update_classes_and_origins(\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=12, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_points_for_nonzero_counters()\",\n command=lambda: dss.show_points_for_nonzero_counters(\n tk_window=MainWindow,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n row_offset=2,\n show_mode=1,\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n SHIFT_BETWEEN_ORIGINS_=dss.SHIFT_BETWEEN_ORIGINS,\n ),\n )\n ButtonCal.grid(row=13, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_with_points()\",\n command=lambda: dss.show_nonzero_counters_with_points(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n origin_list_=origin_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=14, column=0)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"reset_counters_in_list()\",\n command=lambda: dss.reset_counters_in_list(\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy\n ),\n )\n ButtonCal.grid(row=10, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_champions_to_buy_from_ocr_detection()\",\n command=lambda: dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n ),\n )\n ButtonCal.grid(row=11, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"show_nonzero_counters_with_points_from_ocr()\",\n command=lambda: dss.show_nonzero_counters_with_points_from_ocr(\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n champions_list_=champions_list,\n df_=df,\n index_list=dss.update_champions_to_buy_from_ocr_detection(\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr__=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n )[1],\n origin_list_=origin_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n ),\n )\n ButtonCal.grid(row=12, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"build_list_of_champion_cards_rectangles()\",\n command=lambda: dss.build_list_of_champion_cards_rectangles(\n CARDS_TO_BUY_AMOUNT_=dss.CARDS_TO_BUY_AMOUNT,\n Y_FIRST_CHAMPION_CARD_=dss.Y_FIRST_CHAMPION_CARD,\n W_CHAMPION_CARD_=dss.W_CHAMPION_CARD,\n H_CHAMPION_CARD_=dss.H_CHAMPION_CARD,\n ),\n )\n ButtonCal.grid(row=13, column=1)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"draw_rectangles_show_points_show_buttons_reset_counters() scan&go\",\n command=lambda: dss.draw_rectangles_show_points_show_buttons_reset_counters(\n rgb_colours_list_=rgb_colours_list,\n sorted_champions_to_buy_=dss.sorted_champions_to_buy,\n champions_list_for_ocr_=champions_list_for_ocr,\n origin_champs_counters_to_buy_=origin_champs_counters_to_buy,\n reader_=reader,\n tk_window=MainWindow,\n origin_champs_counters_=origin_champs_counters,\n df_=df,\n origin_list_=origin_list,\n champions_list_=champions_list,\n origin_counters_=origin_counters,\n class_list_=class_list,\n class_counters_=class_counters,\n round_counter=CounterOcrResultsRound,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=14, column=1)\n\n ButtonCal = tk.Button(\n new_window, text=\"update_curent_ss()\", command=lambda: dss.update_curent_ss()\n )\n ButtonCal.grid(row=10, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_curent_cropped_ss_with_rounds()\",\n command=lambda: dss.update_curent_cropped_ss_with_rounds(),\n )\n ButtonCal.grid(row=11, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_ocr_results_round()\",\n command=lambda: dss.update_ocr_results_round(\n reader_=reader, round_counter=CounterOcrResultsRound\n ),\n )\n ButtonCal.grid(row=12, column=2)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"full_state_update_champions_ocr()\",\n command=lambda: dss.full_state_update_rounds_ocr(\n reader_=reader, round_counter=CounterOcrResultsRound\n ),\n )\n ButtonCal.grid(row=13, column=2)\n\n ButtonCal = tk.Button(\n new_window, text=\"update_curent_ss()\", command=lambda: dss.update_curent_ss()\n )\n ButtonCal.grid(row=10, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_curent_cropped_ss_with_gold()\",\n command=lambda: dss.update_curent_cropped_ss_with_gold(),\n )\n ButtonCal.grid(row=11, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"update_ocr_results_gold()\",\n command=lambda: dss.update_ocr_results_gold(\n reader_=reader,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=12, column=3)\n\n ButtonCal = tk.Button(\n new_window,\n text=\"full_state_update_gold_ocr()\",\n command=lambda: dss.full_state_update_gold_ocr(\n reader_=reader,\n gold_counter=CounterOcrResultsGold,\n ),\n )\n ButtonCal.grid(row=13, column=3)\n\n logging.debug(\"Function create_new_window() end\")" ]
[ "0.6928395", "0.684634", "0.6601684", "0.6564466", "0.6564466", "0.64463425", "0.6378235", "0.6353312", "0.6351047", "0.629362", "0.62887776", "0.6145612", "0.6136457", "0.6110733", "0.60973674", "0.6074384", "0.6053072", "0.6018539", "0.59742665", "0.5963318", "0.59591293", "0.59559196", "0.59359986", "0.59145725", "0.5905236", "0.5902226", "0.5902226", "0.58956766", "0.58951306", "0.5869485", "0.58622915", "0.584726", "0.5832626", "0.5822604", "0.5820486", "0.5818168", "0.58145213", "0.58102006", "0.58021283", "0.58007807", "0.58007807", "0.58007807", "0.5797467", "0.57945335", "0.5793707", "0.5792787", "0.5780166", "0.57742554", "0.5767725", "0.57596946", "0.5758987", "0.57435626", "0.573756", "0.57352686", "0.5733746", "0.5730556", "0.5729387", "0.5723324", "0.5711188", "0.5705141", "0.5700333", "0.56975406", "0.56840444", "0.56623816", "0.5658155", "0.56549853", "0.5654026", "0.56537426", "0.5653685", "0.56535995", "0.5652658", "0.56469244", "0.5640439", "0.5639822", "0.56386817", "0.56293243", "0.5628393", "0.5623396", "0.5611759", "0.5611755", "0.56087905", "0.5603284", "0.55979985", "0.5596291", "0.55949503", "0.559376", "0.55836993", "0.5582131", "0.55816925", "0.55761087", "0.5574644", "0.5573579", "0.5568616", "0.55654037", "0.55648935", "0.55565566", "0.5555895", "0.5554585", "0.5538611", "0.55305123" ]
0.6935589
0
takes list of files as parameter, prints out readible version Fn strips each file by line, gets rid of duplicates, then splits the line into usable chunks before printing out the usable version
def turn_files_into_pretty_text(text_files): list_of_all_lines = [] for item in text_files: for line in item: line = line.rstrip() if line not in list_of_all_lines: list_of_all_lines.append(line) for item in list_of_all_lines: words = item.split('|') melon = words[0] count = words[1] amount = words[2] print "Delivered {} {}s for total of ${}".format(count, melon, amount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findDuplicateReleaseFiles(self, initialList, workingTowerName, newInfix):\n Release_Tower_Name = self.getReleaseVersion(workingTowerName, newInfix)\n Duplicate_List = []\n for fname in initialList:\n prefixStream, postfixStream = string.split(fname, workingTowerName)\n A_File_Name = prefixStream + Release_Tower_Name + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks", "def mms_file_filter(files, latest_version=False, major_version=False, min_version=None, version=None):\n \n if not isinstance(files, list): files = [files]\n\n # allow the user to specify partial version #s\n if min_version is not None:\n n_declms = len(min_version.split('.'))\n if n_declms == 1:\n min_version = min_version + '.0.0'\n elif n_declms == 2:\n min_version = min_version + '.0'\n elif version is not None:\n n_declms = len(version.split('.'))\n if n_declms == 1:\n version = version + '.0.0'\n elif n_declms == 2:\n version = version + '.0'\n\n out_files = []\n file_versions = []\n max_major_version = 0\n max_version = 0\n\n # find all of the version #s, including the max major version and max total version\n for file in files:\n version_found = re.search(r'v([0-9]+)\\.([0-9]+)\\.([0-9]+)\\.cdf$', file)\n if version_found:\n file_version = version_found.groups()\n # vX.Y.Z\n version_X = int(file_version[0])\n version_Y = int(file_version[1])\n version_Z = int(file_version[2])\n file_versions.append((version_X, version_Y, version_Z, file))\n if version_X > max_major_version:\n max_major_version = version_X\n if max_version == 0:\n max_version = (version_X, version_Y, version_Z)\n else:\n if (version_X > max_version[0]) or (version_X == max_version[0] and version_Y > max_version[1]) or (version_X == max_version[0] and version_Y == max_version[1] and version_Z > max_version[2]):\n max_version = (version_X, version_Y, version_Z)\n else:\n continue\n\n for file_ver in file_versions:\n if min_version is not None: # MINIMUM file version\n min_version_num = [int(v) for v in min_version.split('.')]\n if (file_ver[0] > min_version_num[0]) or (file_ver[0] == min_version_num[0] and file_ver[1] > min_version_num[1]) or (file_ver[0] == min_version_num[0] and file_ver[1] == min_version_num[1] and file_ver[2] >= min_version_num[2]):\n out_files.append(file_ver[3])\n elif version is not None: # EXACT file version\n exact_version_num = [int(v) for v in version.split('.')]\n if file_ver[0] == exact_version_num[0] and file_ver[1] == exact_version_num[1] and file_ver[2] == exact_version_num[2]:\n out_files.append(file_ver[3])\n elif latest_version is not False: # LATEST (full) version, i.e., latest X.Y.Z\n if file_ver[0] == max_version[0] and file_ver[1] == max_version[1] and file_ver[2] == max_version[2]:\n out_files.append(file_ver[3])\n elif major_version is not False: # LATEST MAJOR version, i.e., latest X in vX.Y.Z\n if file_ver[0] >= max_major_version:\n out_files.append(file_ver[3])\n else:\n out_files.append(file_ver[3])\n\n return out_files", "def main():\n \"\"\"Removes the common prefix from each filename.\"\"\"\n \"\"\"Writes a new file with the stripped filenames.\"\"\"\n parser = OptionParser(usage='%prog [options] infile outfile')\n parser.add_option('-f', '--force', action='store_true', default=False, help='overwrite current outfile, if exists')\n\n # check inputs\n options, args = parser.parse_args() \n if len(args) != 2: parser.error('wrong number of positional arguments') \n\n infile = args[0]\n outfile = args[1]\n\n if exists(outfile) and not(options.force): \n print >>sys.stderr, 'Target %s already exists.' % outfile\n print >>sys.stderr, 'Use --force to overwrite.'\n sys.exit(1)\n\n if not(exists(infile)):\n print >>sys.stderr, 'File %s not found.' % infile \n sys.exit(1)\n\n infieldnames = ['filename', 'procname', 'lineno'] \n outfieldnames = ['filename', 'lineno']\n\n # read file\n instream = open(infile)\n reader = DictReader(instream, fieldnames=infieldnames)\n entries = list(reader) \n instream.close()\n\n # process entries\n fnames = map(lambda d: d['filename'], entries) \n prefix = commonprefix(fnames)\n\n # if there is only one file, the common prefix will include the filename \n # however, in the output we want to preserve the filename\n prefix, tail = split(prefix)\n\n for e in entries: \n tails = e['filename'].split(prefix) \n if not(tails[0] == ''): \n print >>sys.stderr, 'This prefix is uncommon!'\n sys.exit(1) \n e['filename'] = (tails[1].split('/'))[1] \n\n # print results\n outstream = open(outfile, 'w')\n writer = DictWriter(outstream, outfieldnames, extrasaction='ignore', lineterminator='\\n')\n writer.writerows(entries)\n outstream.close()", "def test_files_from_plate_truncate():\n plate_path = os.path.join(TEST_PATH_IX, \"test-plate-1\")\n output = filelister_ix.files_from_plate(plate_path)\n for f in output:\n assert len(f.split(os.sep)) == 4", "def create_chunks(file_names):\n\n\tnew_chunks = []\n\n\tfor name in file_names:\n\n\t\t# Find the .inf file and read the details stored within\n\t\ttry:\n\t\t\tdetails = open(name + suffix + 'inf', 'r').readline()\n\t\texcept IOError:\n\n\t\t\ttry:\n\t\t\t\tdetails = open(name + suffix + 'INF', 'r').readline()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Couldn't open information file, %s\" % name+suffix+'inf')\n\t\t\t\tsys.exit()\n\n\t\t# Parse the details\n\t\tdetails = [string.rstrip(details)]\n\n\t\tsplitters = [' ', '\\011']\n\n\t\t# Split the details up where certain whitespace characters occur\n\t\tfor s in splitters:\n\n\t\t\tnew_details = []\n\n\t\t\t# Split up each substring (list entry)\n\t\t\tfor d in details:\n\n\t\t\t\tnew_details = new_details + string.split(d, s)\n\n\t\t\tdetails = new_details\n\n\t\t# We should have details about the load and execution addresses\n\n\t\t# Open the file\n\t\ttry:\n\t\t\tin_file = open(name, 'rb')\n\t\texcept IOError:\n\t\t\tprint(\"Couldn't open file, %s\" % name)\n\t\t\tsys.exit()\n\n\t\t# Find the length of the file (don't rely on the .inf file)\n\t\tin_file.seek(0, 2)\n\t\tlength = in_file.tell()\n\t\tin_file.seek(0, 0)\n\n\t\t# Examine the name entry and take the load and execution addresses\n\t\tdot_at = string.find(details[0], '.')\n\t\tif dot_at != -1:\n\t\t\treal_name = details[0][dot_at+1:]\n\t\t\tload, exe = details[1], details[2]\n\t\telse:\n\t\t\treal_name = get_leafname(name)\n\t\t\tload, exe = details[0], details[1]\n\n\t\tload = hex2num(load)\n\t\texe = hex2num(exe)\n\n\t\tif load == None or exe == None:\n\t\t\tprint('Problem with %s: information is possibly incorrect.' % name+suffix+'inf')\n\t\t\tsys.exit()\n\n\t\t# Reset the block number to zero\n\t\tblock_number = 0\n\n\t\t# Long gap\n\t\tgap = 1\n\t\n\t\t# Write block details\n\t\twhile True:\n\t\t\tblock, last = write_block(in_file, real_name, load, exe, length, block_number)\n\n\t\t\tif gap == 1:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x05dc)))\n\t\t\t\tgap = 0\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x0258)))\n\n\t\t\t# Write the block to the list of new chunks\n\n\t\t\t# For old versions, just write the block\n\t\t\tif UEF_major == 0 and UEF_minor < 9:\n\t\t\t\tnew_chunks.append((0x100, block))\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x100, block))\n\n\t\t\tif last == 1:\n\t\t\t\tbreak\n\n\t\t\t# Increment the block number\n\t\t\tblock_number = block_number + 1\n\n\t\t# Close the input file\n\t\tin_file.close()\n\n\t# Write some finishing bytes to the list of new chunks\n#\tnew_chunks.append((0x110, number(2,0x0258)))\n#\tnew_chunks.append((0x112, number(2,0x0258)))\n\n\t# Return the list of new chunks\n\treturn new_chunks", "def concat_chunks(file_list: list, output_path: str, verbose_level=0) -> str:\n temp_file_name = 'temp_' + str(len(file_list)) + \\\n str(int(round(time.time() * 1000))) + '.wav'\n files_str = ' '.join(file_list)\n if str(verbose_level) == '2':\n print('sox -V%s %s %s' % (verbose_level, files_str, output_path +\n os.sep + temp_file_name))\n os.system('sox -V%s %s %s' % (verbose_level, files_str, output_path +\n os.sep + temp_file_name))\n return temp_file_name", "def method4(fname):\n\t#jfrom cStringIO import StringIO\n\t#from tokenize import generate_tokens\n\timport re\n\tprint \"Method 4: read in files by line\"\n\tprint \"and rather than printing out all of it, only print out specific cols \"\n\tf = open(fname,\"r\")\n\tline = f.readline()\n\ti = 0 \n\t\n\twhile line != '':\n\t\ttmp= line.strip()\n\t\tif tmp :\n\t\t\t#print tmp\n\t\t\t#tmp = line.strip()\n\t\t\ttmpp = tmp.split()\n\t\t\t#i +=1\n\t\t\t#print len(tmpp)\n\t\t\tif len(tmpp) >1:\n\t\t\t\tprint tmpp[1]\n\t\t#tmp = line.split(' ')\n\t\t#i += 1\n\t\t#tmp = 'sdklsd sdjlks '\n\t\t#print len(tmp)\n\t\t#if len(tmp) > 1: \n\t\t\t#print tmp[1]\n\t\tline=f.readline()\n\t\n\tf.close()\n\tprint \"Method 4 done\"", "def merge_files(\n files: List[TextIOWrapper],\n ) -> Generator[Tuple[List[TextIOWrapper]], str, None]:\n\n result = []\n\n for index, file in enumerate(files):\n try:\n iterator = iter(file)\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()\n\n previous = None\n comment_count = 0\n max_comment_count = 2\n\n while result:\n ignore = False\n\n _, index, value, iterator, file = heapq.heappop(result)\n\n if remove_duplicates and value == previous:\n ignore = True\n\n if (\n write_header\n and comment_count < max_comment_count\n and value[0] == \"#\"\n ):\n ignore = True\n max_comment_count += 1\n\n if not ignore:\n yield value\n previous = value\n\n try:\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()", "def get_file_list(params):\n if params['mode'] == 'test':\n create_file_list(params)\n\n with open(params['file_list']) as flist:\n full_lines = [line.strip() for line in flist]\n\n full_lines = shuffle_lines(full_lines, params[\"shuffle_seed\"])\n\n # use only partial data for each trainer in distributed training\n if params['mode'] == 'train':\n real_trainer_num = max(trainers_num, 1)\n img_per_trainer = len(full_lines) // real_trainer_num\n full_lines = full_lines[trainer_id::real_trainer_num][:img_per_trainer]\n\n return full_lines", "def read_in_files():\n\n num_files = len([name for name in os.listdir(DATA_SOURCE) if name.endswith(\".txt\")])\n loading_section_size = num_files / 30\n count = 0\n\n sentences_as_lists = []\n for filename in os.listdir(DATA_SOURCE):\n if filename.endswith(\".txt\"):\n\n # Pretty loading bar\n print(\"Processing Files: [\", end=\"\")\n for i in range(31, -1, -1):\n if count > i * loading_section_size:\n for j in range(0, i):\n print(\"-\", end=\"\")\n sys.stdout.flush()\n for j in range(i, 30):\n print(\" \", end=\"\")\n sys.stdout.flush()\n break;\n if count == num_files:\n print(\"] \", count, end=\"\\n\")\n else:\n print(\"] \", count, end=\"\\r\")\n sys.stdout.flush()\n\n # Open the paper\n paper_to_open = DATA_SOURCE + filename\n paper = Reader().open_file_single_string(paper_to_open)\n udata = paper.decode(\"utf-8\")\n paper = udata.encode(\"ascii\", \"ignore\")\n\n # Split the data into a list of sentences, where each sentence is a list of words\n sentences = sent_tokenize(paper)\n\n for sentence in sentences:\n words = word_tokenize(sentence)\n sentences_as_lists.append(words)\n\n if DEBUG:\n print(sentences_as_lists)\n wait()\n\n count += 1\n\n return sentences_as_lists", "def cat_sff_files(list_of_file_handles):\r\n # mimicks lazy_parse_sff_handle on multiple files\r\n # Move to cogent???\r\n if (list_of_file_handles == []):\r\n return [], None\r\n try:\r\n flowgrams_and_headers = map(\r\n lazy_parse_sff_handle,\r\n list_of_file_handles)\r\n except ValueError:\r\n raise FileFormatError('Wrong flogram file format. Make sure you pass the sff.txt format ' +\r\n 'produced by sffinfo. The binary .sff will not work here.')\r\n\r\n flowgram_iterators = [a for a, b in flowgrams_and_headers]\r\n return chain(*flowgram_iterators), flowgrams_and_headers[0][1]", "def position_helper():\n for file_name in file_list[:1]:\n file_bits = file_splitter(file_name)\n line_length = len(max(file_bits, key=len)) + 13\n index = 0\n print('\\n' + ('-' * line_length))\n for x in file_bits:\n print('Index ', str(index), ' = ', file_bits[index])\n index += 1\n print(('-' * line_length) + '\\n')", "def get_pieces(pkg_root_dir, files, piece_length):\n s = b''\n for f in files:\n file_path = os.path.join(pkg_root_dir, os.sep.join(f['path']))\n with open(file_path, 'rb') as fd:\n buf = fd.read(piece_length)\n while len(buf) != 0:\n s += buf\n if len(s) >= piece_length:\n h = hashlib.sha1()\n h.update(s[:piece_length])\n yield h.digest()\n s = s[piece_length:]\n buf = fd.read(piece_length)\n h = hashlib.sha1()\n h.update(s[:piece_length])\n yield h.digest()", "def main(inputfname, outfname):\n with open(inputfname, 'rt', encoding='utf8') as fh:\n # first block\n reviews = []\n while True:\n comment = next(fh).strip()\n if not comment:\n # blank line, block separator\n break\n url_moviedb = next(fh).strip()\n url_moviedb, movie_id = fix_moviedb(url_moviedb)\n reviews.append((comment, url_moviedb, movie_id))\n\n # second block\n futures = []\n while True:\n try:\n title = next(fh).strip()\n except StopIteration:\n break\n if not title:\n continue\n url_moviedb = next(fh).strip()\n url_moviedb, movie_id = fix_moviedb(url_moviedb)\n futures.append((title, url_moviedb, movie_id))\n\n lines, viewed = process_reviews(reviews)\n lines.append(\"\")\n lines.extend(process_futures(futures))\n lines.append(\"\")\n\n pelis_lines, raw_pending = proc_pelshtml(futures, viewed)\n\n lines.extend(line.format(enter='', space=' ') for line in raw_pending)\n lines.append(\"\")\n lines.extend(pelis_lines)\n lines.extend(line.format(enter='<br/>', space='&nbsp;') for line in raw_pending)\n\n with open(outfname, 'wt', encoding='utf8') as fh:\n fh.write(\"\\n\".join(lines))", "def print_solutions(file_):\n with open(file_, 'r') as inp:\n for line in inp:\n print(line[:-5] + str(process_line(line)))", "def de_flip_file(n):\n start = '0'*(n//2) + '1' + '0'*(n//2) # 00100 if n == 5\n f_name = 'C:/Users/clean/Desktop/dede.txt'\n f_tmp = 'C:/Users/clean/Desktop/dede2.txt'\n f_result = 'C:/Users/clean/Desktop/de_flip.txt'\n\n #L = [[start]]\n with open(f_name,'w') as f:\n f.write(start + '\\n')\n\n #L_tmp = []\n with open(f_tmp,'w') as ftmp:\n pass\n \n #collect = []\n with open(f_result,'w') as f_end:\n pass\n \n \n length = 1\n \n while(True):\n count = 0\n check_end = True\n\n with open(f_name) as f:\n for branch in f:\n L_branch = branch.split()\n next_0 = L_branch[-1][1:] + '0'\n next_1 = L_branch[-1][1:] + '1'\n\n if next_0 == start:\n with open(f_result,'a') as f_end:\n f_end.write(branch)\n count += 1\n continue\n\n if next_0 not in L_branch and next_0[::-1] not in L_branch:\n check_end = False\n with open(f_tmp,'a') as ftmp:\n ftmp.write(branch[:-1] + ' ' + next_0 + '\\n')\n\n if next_1 not in L_branch and next_1[::-1] not in L_branch:\n check_end = False\n with open(f_tmp,'a') as ftmp:\n ftmp.write(branch[:-1] + ' ' + next_1 + '\\n')\n \n if check_end == True:\n break\n \n shutil.copyfile(f_tmp, f_name) # 오른쪽으로 복사\n\n with open(f_tmp,'w') as ftmp:\n pass\n\n print(\"length : {0}, count : {1}\".format(length,count))\n length += 1\n return None", "def splitting():\n n = 1\n with open('numbers.txt', 'r+') as f:\n f.readline()\n seek_2 = f.tell()\n seek_1 = 0\n\n while seek_1 != seek_2:\n print(n)\n n += 1\n with open('numbers.txt', 'r+') as f, open('numbers.txt', 'r+') as f_2:\n f.seek(seek_1)\n f_2.seek(seek_2)\n seek_1, seek_2 = merge(f, f_2)\n\n make_result_file(seek_1)", "def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i", "def file_checker():\n\n PATH_RELEASE1_IDEN = os.getcwd()+'/archive_all_2014-10/'\n PATH_RELEASE1_UNIDE = None\n #PATH_RELEASE1_UNIDE = os.getcwd()+'/archive_all_2014-10/'\n\n PATH_RELEASE2_IDEN = os.getcwd()+'/archive_all_2016-10/archive_identified_2016-10/'\n PATH_RELEASE2_UNIDE = os.getcwd() + '/archive_all_2016-10/archive_unidentified_2016-10/'\n\n\n #From here don't change anything.\n #This global function finds the .mgf files in paths\n list_of_files_release1_ide = glob.glob(PATH_RELEASE1_IDEN+'*.mgf')\n list_of_files_release1_unide = None #REMOVE THIS PART AND UNCOMMENT NEXT LINE IN NEXT RELEASES.\n\n #list_of_files_release1_unid = glob.glob(PATH_RELEASE1_UNID'+*.mgf')\n\n list_of_files_release2_ide = glob.glob(PATH_RELEASE2_IDEN+'*.mgf')\n list_of_files_release2_unide = glob.glob(PATH_RELEASE2_UNIDE+'*.mgf')\n\n\n #Check if exist cache folder. If not will make it. \n #RELEASE 1 \n if not os.path.exists(PATH_RELEASE1_IDEN+'cache'):\n os.makedirs(PATH_RELEASE1_IDEN+'cache')\n\n # if not os.path.exists(PATH_RELEASE1_UNIDE'+cache'):\n # os.makedirs(PATH_RELEASE1_UNIDE'+cache')\n\n #RELEASE2\n if not os.path.exists(PATH_RELEASE2_IDEN+'cache'):\n os.makedirs(PATH_RELEASE2_IDEN+'cache')\n\n if not os.path.exists(PATH_RELEASE2_UNIDE+'cache'):\n os.makedirs(PATH_RELEASE2_UNIDE+'cache')\n \n\n return PATH_RELEASE1_IDEN, \\\n PATH_RELEASE2_IDEN, \\\n PATH_RELEASE2_UNIDE, \\\n list_of_files_release1_ide, \\\n list_of_files_release2_ide, \\\n list_of_files_release2_unide", "def checkdifferences(oldfile, changelist, num):\n if num == 1: #combining the unique values of a list & file into 1 list\n newcontent = changelist\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n combined = combinelists(currentcontent, newcontent)\n return combined\n if num == 2: # combine the unique values of 2 files into 1 list\n currentcontent = csv_read('{}.csv'.format(changelist)) #clientlist\n combined = []\n for each in currentcontent:\n # for elk in each:\n combined + each\n newlst = combinelists(currentcontent, combined)\n return newlst\n if num == 3: # removing the doubles from each list\n currentcontent = csv_read('{}.csv'.format(oldfile)) # ReadyForAck\n changecontent = changelist\n newlist = dividelists(currentcontent, changecontent)\n return newlist", "def process_canonical_file(version, filename):\n log(\"Processing canonical file %s, version %d\" % (filename, version))\n f = open(filename, 'r')\n all_lines = f.readlines()\n contents = \" \".join(all_lines)\n identifiers.add_identifiers(of_g.identifiers, of_g.identifiers_by_group,\n version, contents)", "def baselines_from_filelist_position(filename, filelist):\n # The reason this function is not in utils is that it needs to use HERAData\n hd = HERAData(filename)\n bls = list(set([bl[:2] for bl in hd.bls]))\n file_index = filelist.index(filename)\n nfiles = len(filelist)\n # Determine chunk size\n nbls = len(bls)\n chunk_size = nbls // nfiles + 1\n lower_index = file_index * chunk_size\n upper_index = np.min([(file_index + 1) * chunk_size, nbls])\n output = bls[lower_index:upper_index]\n return output", "def clean_file(filesnames_list, file_type): # so now not needed.\r\n global files_list\r\n files_list = []\r\n global ft_list\r\n ft_list = []\r\n for line in filesnames_list:\r\n s, fileType = line.split('.') # split off file_type here\r\n print(s)\r\n files_list.append(s)\r\n ft_list.append(fileType)\r\n print(files_list)\r\n return (files_list)", "def merge_files(filename_list, merged_file, encode):\n lines_counter = list()\n for file_name in filename_list:\n lines_counter.append(count_lines(file_name))\n lines_counter.sort(key=lambda item: item[-1])\n with open(merged_file, 'w', encoding=encode) as file:\n for doc in lines_counter:\n file.write(f'{doc[0]}\\n')\n file.write(f'{doc[1]}\\n')\n text = get_text(doc[0])\n file.write(f'{text}\\n\\n')", "def processSetOfCerFiles(files):\n printHeader()\n \n k = 0\n for f in files:\n k = k + 1\n sz = get_file_size(f)\n with open(f, 'rb') as fb:\n processCerFile(k, fb, sz=sz)", "def check_file_header(fnames, nlines=5):\n from itertools import islice\n for fname in fnames:\n print(f\"\\nPrinting header from {fname} \\n#########################################\")\n with open(fname) as f:\n head = list(islice(f, nlines))\n for line in head:\n print(line)", "def ReadInLASTFile(FileName):\n FinalGroups = []\n with open(FileName, 'r') as f:\n SmalllList = []\n for line in f:\n if line.startswith('#'):\n pass\n else:\n CleanLine = line.strip().split()\n if len(CleanLine) != 0 :\n SmalllList.append(CleanLine)\n else:\n FinalGroups.append(SmalllList)\n SmalllList = []\n\n return FinalGroups", "def merge_one_sensor(slist):\n r = strip_file(slist[0],leave_header=True)\n for s in slist[1:]:\n r += strip_file(s,leave_header=False)\n return r", "def main():\n try:\n filename = sys.argv[1]\n except IndexError:\n sys.exit(\"Usage: TODO\")\n\n with codecs.open(filename, \"r+\", encoding=\"utf8\") as f:\n nb = read(f, as_version=NO_CONVERT)\n stripped = strip_output(nb)\n return stripped", "def processfile(args, fh):\n if args.quick:\n scanner = quickScanZip(args, fh)\n else:\n scanner = findPKHeaders(args, fh)\n\n def checkarg(arg, ent):\n if not arg:\n return False\n return '*' in arg or ent.name in arg\n def checkname(a, b):\n if a and '*' in a: return True\n if b and '*' in b: return True\n l = 0\n if a: l += len(a)\n if b: l += len(b)\n return l > 1\n\n if args.verbose and not (args.cat or args.raw or args.save):\n print(\" 0304 need flgs mth stamp --crc-- compsize fullsize nlen xlen namofs xofs datofs endofs\")\n print(\" 0102 crea need flgs mth stamp --crc-- compsize fullsize nlen xlen clen dsk0 attr osattr datptr namofs xofs cmtofs endofs\")\n for ent in scanner:\n if args.cat or args.raw or args.save:\n if args.quick and isinstance(ent, CentralDirEntry) or \\\n not args.quick and isinstance(ent, LocalFileHeader):\n ent.loaditems(fh)\n do_cat = checkarg(args.cat, ent)\n do_raw = checkarg(args.raw, ent)\n do_save= checkarg(args.save, ent)\n\n do_name= checkname(args.cat, args.raw)\n\n if do_name:\n print(\"\\n===> \" + ent.name + \" <===\\n\")\n\n sys.stdout.flush()\n blks = zipraw(fh, ent)\n\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n if do_cat or do_save:\n blks = skipbytes(blks, 12, args)\n\n if do_cat:\n sys.stdout.buffer.writelines(zipcat(blks, ent))\n if do_raw:\n sys.stdout.buffer.writelines(blks)\n if do_save:\n savefile(args.outputdir, ent.name, zipcat(blks, ent))\n else:\n ent.loaditems(fh)\n if args.verbose or not args.quick:\n print(\"%08x: %s\" % (ent.pkOffset, ent))\n else:\n print(ent.summary())\n if hasattr(ent, \"comment\") and ent.comment and not args.dumpraw:\n print(ent.comment)\n if args.dumpraw and hasattr(ent, \"extraLength\") and ent.extraLength:\n print(\"%08x: XTRA: %s\" % (ent.extraOffset, binascii.b2a_hex(getbytes(fh, ent.extraOffset, ent.extraLength))))\n if args.dumpraw and hasattr(ent, \"comment\") and ent.comment:\n print(\"%08x: CMT: %s\" % (ent.commentOffset, binascii.b2a_hex(getbytes(fh, ent.commentOffset, ent.commentLength))))\n if args.dumpraw and isinstance(ent, LocalFileHeader):\n blks = zipraw(fh, ent)\n if args.password and ent.flags&1:\n blks = zip_decrypt(blks, args.password)\n\n blockdump(ent.dataOffset, blks)", "def read_concat_file(self):\n\n file_list = []\n for i in self.IDs[0:3]:\n with open(i, 'r') as cf:\n cf = cf.read()\n file_list.append(cf)\n return file_list", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def process_file(filename, skip_header=True):\n hist = {}\n fp = file(filename)\n fullwordlist=[]\n # if skip_header:\n # skip_gutenberg_header(fp)\n\n for line in fp:\n holder=process_line(line,hist)\n #print holder\n fullwordlist.extend(holder)\n return fullwordlist", "def removeSeqsInFile(filename,listname):\n from Bio import SeqIO\n mylist = open(listname).read().split()\n myfas = list(SeqIO.parse(filename,\"fasta\"))\n fo = open(filename+\"_out\",\"w\")\n for ele in myfas:\n if ele.id not in mylist:\n fo.write(\">\"+ele.description+\"\\n\"+str(ele.seq)+\"\\n\")\n fo.close()\n return None", "def main():\n # get_history_using_HTTP()\n # merge_files()\n # remove_lines()\n remove_duplicated_lines()", "def used_mods(ffile):\n import re\n import codecs\n\n # Go through line by line,\n # remove comments and strings because the latter can include ';'.\n # Then split at at ';', if given.\n # The stripped line should start with 'use '.\n # After use should be the \"module_name\", ', intrinsic :: module_name', or\n # ', non_intrinsic :: module_name'. We allow also to use \":: module_name\"\n # After module name should only be ', only: ...' or ', a ==> b'\n olist = list()\n of = codecs.open(ffile, 'r', encoding='ascii', errors='ignore')\n for line in of:\n ll = line.rstrip().lower() # everything lower case\n ll = re.sub('!.*$', '', ll) # remove F90 comment\n ll = re.sub('^c.*$', '', ll) # remove F77 comments\n ll = re.sub('\".*?\"', '', ll) # remove \"string\"\n ll = re.sub(\"'.*?'\", '', ll) # remove 'string'\n # check if several commands are on one line\n if ';' in ll:\n lll = ll.split(';')\n else:\n lll = [ll]\n for il in lll:\n iil = il.strip()\n # line should start with 'use '\n if iil.startswith('use '):\n iil = iil[4:].strip() # remove 'use '\n # skip intrinsic modules\n if 'intrinsic' in iil:\n if 'non_intrinsic' in iil:\n iil = re.sub(', *non_intrinsic', '', iil)\n iil = iil.strip()\n else:\n continue # skip to next in lll\n if iil.startswith('::'):\n iil = iil[2:].strip() # remove ':: '\n # remove after ',' if rename-list or only-list\n iil = re.sub(',.*$', '', iil)\n olist.append(iil.strip())\n of.close()\n\n return olist", "def display_algn_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile1')\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n \n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile2')\n j = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile1')\n with open(j, 'r') as fil:\n d = {'CYS':'C','ASP':'D','SER':'S','GLN':'Q','LYS':'K','ILE':'I','PRO':'P','THR':'T','PHE':'F','ASN':'N',\n 'GLY':'G','HIS':'H','LEU':'L','ARG':'R','TRP':'W','TER':'*','ALA':'A','VAL':'V','GLU':'E','TYR':'Y',\n 'MET':'M','XAA':'X'}\n with open(filepath2, 'w') as outf:\n for line in fil:\n if len(line) %3 == 0:\n upper_seq = line.upper()\n single_seq = ''\n for i in range(int(len(upper_seq)/3)):\n single_seq += d[upper_seq[3*i:3*i+3]]\n outf.write(single_seq) \n return single_seq\n else:\n print(\"ERROR: Line was not a factor of 3 in length!\")", "def interpretor(file_list):\n for i in range(len(file_list)):\n l_seq = 0\n l_var = 0\n l_ind = 0\n inds = 0 #This variable is used to specify wheter there are more than 1 \"-\" in a row.\n with open(\"alignments/\"+file_list[i],'r') as f:\n regel = f.read().split() #Viewing each file as a whole.\n for item in regel[5:]: #Only from the 5th element there is relevant information present.\n if item.startswith(\"*\"):\n l_var += (len(item))\n elif item[0].isupper() or item[0] == \"-\": #only lines that starts with capital letters or - are sequence lines.\n for char in item: #Viewing individual character in list item.\n if char == \"-\" or char.isupper(): \n l_seq += 1\n if char == \"-\":\n inds+=1\n elif char.isupper() and inds != 0: # if inds > 1. This means there are more than 1 \"-\" in a row.\n l_ind+=1 # This is important because the program needs to reconginze this as 1 indel.\n inds = 0 # Reset the indel count.\n\n fill_var_calls(file_list[i],l_seq,l_var,l_ind) #After each iteration the the file_var_calls method is executed.", "def full_info(files: List[str], args, dir_: str ='.') -> List[str]:\n temp_info = []\n for item in files:\n f_info = {}\n f_st = os.stat(os.path.join(CURRENT_DIR, dir_, item))\n f_info['mpde'] = f'{stat.filemode(f_st.st_mode):10}'\n f_info['nlink'] = f'{f_st.st_nlink:>3}'\n f_info['uid'] = f'{f_st.st_uid:>3}'\n size = f_st.st_size\n if args.block_size:\n size = ceil(size / args.block_size)\n f_info['size'] = f'{size:>8}'\n date = dt.datetime.fromtimestamp(f_st.st_mtime)\n if (dt.datetime.now() - date).days / 30 > 6:\n date_format = '%b %d %Y'\n else:\n date_format = '%b %d %I:%M'\n f_info['time'] = f'{date.strftime(date_format)} '\n f_info['name'] = f'{item:<}'\n temp_info.append(\n ' '.join([f_info['mpde'], f_info['nlink'], f_info['uid'],\n f_info['size'], f_info['time'], f_info['name']])\n )\n temp_info.append('\\n')\n return temp_info", "def fastLoad(f_list):\n\n data_list = []\n t_1 = datetime.now()\n for i, f in enumerate(f_list):\n t_data = loadFile(f)\n data_list.extend(t_data)\n data_list = [dict(r) for r in set([tuple(d.items()) for d in data_list])]\n print i, datetime.now() - t_1, \"removing duplicates...\"\n print \"Done removing duplicates.\"\n return data_list", "def read_lines(filename=\"\", nb_lines=0):\n with open(filename, \"r\", encoding=\"utf-8\") as file1:\n lines = file1.readlines()\n if nb_lines <= 0 or nb_lines > len(lines):\n print(\"\".join(lines), end='')\n else:\n print(\"\".join(lines[:nb_lines]), end='')", "def print_file(f):\n print marker\n for files in f:\n print files\n print marker", "def multipleFileReadLines(filePaths): \n \n buffers = [] \n filePositions = [] \n \n for filePath in filePaths: \n lines, filePosition= readMultipleFileLinesAndPositions(filePath) \n buffers.append(lines) \n filePositions.append(filePosition) \n \n linesRemaining = True \n \n while linesRemaining: \n currentLines = [] \n for i,fileBuffer in enumerate(buffers): \n currentLines.append(fileBuffer[0].strip()) \n \n del fileBuffer[0] \n \n if ( not(fileBuffer) and linesRemaining): \n lines, filePosition = readMultipleFileLinesAndPositions(filePaths[i],filePositions[i]) \n buffers[i] = lines \n filePositions[i] = filePosition \n linesRemaining = bool(lines) \n \n yield currentLines", "def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()", "def test_split_fasta_diff_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 2, filename_prefix)\r\n\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(2)]\r\n # list of file paths is as expected\r\n self.assertEqual(actual, expected)\r\n # building seq collections from infile and the split files result in\r\n # equivalent seq collections\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))", "def combine(self):\n\n import re\n \n print 'Creating file', self.__filename\n \n bname = (os.path.split(self.__filename))[1]\n bname2 = bname\n \n # bugfix: if file contains characters like +,.,[]\n # properly escape them, otherwise re will fail to match.\n for a, b in zip(['+', '.', '[', ']','$', '(', ')'],\n ['\\+','\\.','\\[','\\]','\\$', '\\(', '\\)']):\n bname2 = bname2.replace(a, b)\n \n chunkre = re.compile(bname2 + '-' + '[0-9]+')\n \n chunkfiles = []\n for f in os.listdir(\".\"):\n print f\n if chunkre.match(f):\n chunkfiles.append(f)\n\n\n print 'Number of chunks', len(chunkfiles), '\\n'\n chunkfiles.sort(self.sort_index)\n\n data=''\n for f in chunkfiles:\n\n try:\n print 'Appending chunk', os.path.join(\".\", f)\n data += open(f, 'rb').read()\n except (OSError, IOError, EOFError), e:\n print e\n continue\n\n try:\n f = open(bname, 'wb')\n f.write(data)\n f.close()\n except (OSError, IOError, EOFError), e:\n raise FileSplitterException, str(e)\n\n print 'Wrote file', bname", "def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()", "def test_split_fasta_equal_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 1, filename_prefix)\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(3)]\r\n\r\n self.assertEqual(actual, expected)\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))", "def unique(fname):\n addresses = []\n with gzip.open(fname, \"rb\") as f:\n lines = f.readlines()\n for line in lines:\n #print(\"[\"+line.split()[1]+\"]\")\n if line.split()[0] not in addresses:\n addresses.append(line.split()[0])\n return addresses", "def readVersionList(filename):\n\ttry:\n\t\tlines = []\n\t\tif os.path.isfile(filename):\n\t\t\twith open(r''+ filename, 'r') as f:\n\t\t\t\tlines = f.readlines()\n\t\treturn lines\n\texcept IOError as e:\n\t\tprint(traceback.format_exc())\n\t\tinfo = filename + 'can\\'t open'\n\t\tdoExit(0, info)", "def _sift(self, fileslist, **arguments):\n\n def sort(reverse, arg, fileslist=fileslist):\n tdict = {fileslist[i][arg] : i for i in xrange(len(fileslist))}\n keys = tdict.keys()\n keys.sort(reverse=reverse)\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n return fileslist\n\n # for time\n if arguments.get('name'):\n reverse = None\n if arguments['name'] == 'reverse':\n reverse = True\n elif arguments['name'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'server_filename')\n\n # for size\n if arguments.get('size'):\n reverse = None\n if arguments['size'] == 'reverse':\n reverse = True\n elif arguments['size'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'size')\n\n # for size\n if arguments.get('time'):\n reverse = None\n if arguments['time'] == 'reverse':\n reverse = True\n elif arguments['time'] == 'no_reverse':\n reverse = False\n fileslist = sort(reverse, 'local_mtime')\n\n # for head, tail, include, exclude\n head = args.head\n tail = args.tail\n include = args.include\n exclude = args.exclude\n if head or tail or include or exclude:\n tdict = {fileslist[i]['server_filename'] : i for i in xrange(len(fileslist))}\n keys1 = [i for i in tdict.keys() if i.lower().startswith(head.encode('utf8').lower())] \\\n if head else []\n keys2 = [i for i in tdict.keys() if i.lower().endswith(tail.decode('utf8').lower())] \\\n if tail else []\n keys3 = [i for i in tdict.keys() if re.search(include, i.encode('utf8'), flags=re.I)] \\\n if include else []\n keys4 = [i for i in tdict.keys() if not re.search(exclude, i.encode('utf8'), flags=re.I)] \\\n if exclude else []\n\n # intersection\n keys = [i for i in [keys1, keys2, keys3, keys4] if i]\n if len(keys) > 1:\n tkeys = keys[0]\n for i in keys:\n tkeys &= i\n keys = tkeys\n elif len(keys) == 1:\n keys = keys[0]\n elif len(keys) == 0:\n keys = []\n\n indexs = [tdict[i] for i in keys]\n fileslist = [fileslist[i] for i in indexs]\n\n dirs = [i for i in fileslist if i['isdir']]\n files = [i for i in fileslist if not i['isdir']]\n if arguments.get('desc') == 1:\n dirs.reverse()\n files.reverse()\n fileslist = dirs + files\n\n return fileslist", "def print_file(list):\n chr_name_list = ['SL2.40ch00','SL2.40ch01','SL2.40ch02','SL2.40ch03','SL2.40ch04','SL2.40ch05','SL2.40ch06','SL2.40ch07','SL2.40ch08','SL2.40ch09','SL2.40ch10','SL2.40ch11','SL2.40ch12']\n for index,chr_list in enumerate(list):\n if chr_list:\n chr = chr_name_list[index]\n for loci in chr_list:\n print \"%s\\t%d\\t%d\\t%s\\t%d\" % (chr,loci[0],loci[1],'\\t'.join(loci[2]),len(loci[2])-loci[2].count('0'))", "def main(\n file_path: str,\n delimiter: str = \" \",\n prefix: str = None,\n postfix: str = None,\n min_parts: int = 0,\n max_parts: int = None,\n) -> None:\n lines = read_lines(file_path)\n lines_with_text = (line for line in lines if len(line) > 0)\n line_combinations = combine_strings(\n lines_with_text, delimiter=delimiter, min_parts=min_parts, max_parts=max_parts\n )\n for combination in line_combinations:\n print_combination(\n combination, delimiter=delimiter, prefix=prefix, postfix=postfix\n )", "def read_list_file(path_file):\n with open(path_file,'r') as f_in:\n lines = f_in.readlines()\n lines = [x for x in lines if not (x.strip() == '' or x.strip()[0] == '#')]\n left_file_list = []\n right_file_list = []\n gt_file_list = []\n conf_file_list = []\n for l in lines:\n to_load = re.split(',|;',l.strip())\n left_file_list.append(to_load[0])\n right_file_list.append(to_load[1])\n if len(to_load)>2:\n gt_file_list.append(to_load[2])\n if len(to_load)>3:\n conf_file_list.append(to_load[3])\n return left_file_list,right_file_list,gt_file_list,conf_file_list", "def _make_input_file_list(binnedfile, num_files):\n outdir_base = os.path.abspath(os.path.dirname(binnedfile))\n outbasename = os.path.basename(binnedfile)\n filelist = \"\"\n for i in range(num_files):\n split_key = \"%06i\" % i\n output_dir = os.path.join(outdir_base, split_key)\n filepath = os.path.join(output_dir,\n outbasename.replace('.fits', '_%s.fits' % split_key))\n filelist += ' %s' % filepath\n return filelist", "def prepare_file(lines):\n return \" \".join(line.strip() for line in lines)", "def fasta_seqs(file_name):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n text = infile.read()\n seqs = text.split('>')\n for seq in seqs:\n try:\n x = seq.split('\\n', 1)\n # sequence will be stored in x[1], and i am removing nextline '\\n' characters that comes with it.\n list.append(x[1].replace('\\n', ''))\n except:\n pass\n return list", "def read_output_file(fname):\n file = open('c:/4nec2/out/' + fname + '.out','r')\n rs=[] #required string\n f=False #have the required strings been found\n rp = 0 #string point\n \n for num, line in enumerate(file, 1):\n if 'FREQ.' in line:\n rp = num\n\n if num == rp + 4 and rp > 0:\n f = True\n\n if num > rp+4 and f and line==\"\\n\":\n f= False\n \n if f:\n rs.append(line)\n \n file.close()\n\n return rs", "def duplicated_line(filename):\n duplicate=0\n with open(filename,encoding=\"utf-8\",errors='ignore') as f:\n scripts=f.readlines()\n #Removes whitespace and blank line \n scripty = filter(lambda x: not re.match(r'^\\s*$', x), scripts)\n #Removes Comments\n script = filter(lambda x: not re.match(r'(?m)^ *#.*\\n?', x), scripty)\n script=list(script)\n with open(filename,encoding=\"utf8\",errors='ignore') as f:\n files=f.readlines()\n #Removes whitespace and blank line \n filey = filter(lambda x: not re.match(r'^\\s*$', x), files)\n #Removes Comments\n file = filter(lambda x: not re.match(r'(?m)^ *#.*\\n?', x), filey)\n file=list(file)\n for cnt, line in enumerate(file):\n if cnt <= len(file)-4:\n for i,item in enumerate(script):\n #Dont compare with that same line and the next 3 line, and don't compare with the last 3 lines\n if cnt != i and i!=cnt+1 and i!=cnt+2 and i!=cnt+3 and i<= len(script)-4 :\n if line == item and file[cnt+1]==script[i+1] and file[cnt+2]==script[i+2] and file[cnt+3]==script[i+3]:\n duplicate+=4\n #delete the duplicates in file and script\n del file[i:i+4]\n del script[i:i+4]\n\n return duplicate", "def _filter_seqs(fn):\n out_file = op.splitext(fn)[0] + \"_unique.fa\"\n idx = 0\n if not file_exists(out_file):\n with open(out_file, 'w') as out_handle:\n with open(fn) as in_handle:\n line = in_handle.readline()\n while line:\n if line.startswith(\"@\") or line.startswith(\">\"):\n fixed_name = _make_unique(line.strip(), idx)\n seq = in_handle.readline().strip()\n counts = _get_freq(fixed_name)\n if len(seq) < 26 and (counts > 1 or counts == 0):\n idx += 1\n print(fixed_name, file=out_handle, end=\"\\n\")\n print(seq, file=out_handle, end=\"\\n\")\n if line.startswith(\"@\"):\n in_handle.readline()\n in_handle.readline()\n line = in_handle.readline()\n return out_file", "def test_get_file_copy_list(self):\n \n so = sys.stdout\n dn = open(os.devnull,\"w\")\n \n # Create a file hierarchy to search for files\n root = tempfile.mkdtemp(prefix=\"test_casava_data_delivery_\")\n date = \"111111\"\n fcs = [\"{}_{}\".format(date,fcid) for fcid in [\"FCA\",\"FCB\"]]\n \n # Create some sample files\n exp_files = []\n samples = []\n for n in xrange(2):\n sample = tempfile.mkdtemp(dir=root)\n samples.append(os.path.basename(sample))\n for fcid in fcs:\n fcdir = os.path.join(sample,fcid)\n nophixdir = os.path.join(fcdir,\"nophix\")\n for d in [fcdir,nophixdir]:\n os.makedirs(d)\n test_names = [\"{:d}_{:s}_1_1_fastq.txt.gz\".format(random.randint(1,8),\n fcid),\n \"{}_CGATGT_L001_R1_001.fastq.gz\".format(samples[-1]),\n \"{}_CGATGT_L001_R1_001.fastq..gz\".format(samples[-1]),]\n for test_name in test_names:\n test_file = os.path.join(d,test_name)\n open(test_file,\"w\").close()\n exp_files.append([samples[-1],\n fcid,\n os.path.basename(d) == \"nophix\",\n test_file,\n os.path.join(samples[-1],fcid),\n create_final_name(os.path.basename(test_name),date,fcid.split(\"_\")[-1],samples[-1])])\n \n # Get the list of files to copy under various conditions\n \n for deliver_all_fcs in [False, True]:\n for fcid in fcs:\n for deliver_nophix in [False, True]:\n for skip_sample_list in [[],[samples[0]],[samples[1]],samples]:\n sys.stdout = dn\n obs_to_copy = sorted(get_file_copy_list(root,\"\",fcid,deliver_all_fcs,deliver_nophix,skip_sample_list))\n sys.stdout = so\n exp_to_copy = sorted([ef[3:6] for ef in exp_files if (deliver_all_fcs or ef[1] == fcid) and \\\n deliver_nophix == ef[2] and \\\n ef[0] not in skip_sample_list])\n #import pdb; pdb.set_trace()\n self.assertListEqual(obs_to_copy,\n exp_to_copy,\n \"The files to copy result did not match the expected for \" \\\n \"{:s}\".format(\", \".join([\"{:s}: {:s}\".format(k,v) for k, v in \\\n dict(zip([\"deliver_all_fcs\",\n \"fcid\",\n \"deliver_nophix\",\n \"skip_samples\"],\n [str(deliver_all_fcs),\n fcid,\n str(deliver_nophix),\n \" \".join(skip_sample_list)])).items()])))", "def __write_dupe_file(self, filename):\n sortedList = sorted(self.dupeList, key=lambda file: file[0])\n with open(filename, mode='w') as outfile:\n for size, md5, filename, ino in sortedList:\n outfile.write(\"%s %s %s %s\\n\" % (size, md5, ino, filename))", "def merge_files_in_order(pdf_list, list_only, output_file):\n output_file = output_file + \".pdf\"\n if not list_only:\n output = PdfFileWriter()\n outputStream = file(output_file, \"wb\")\n total_pages = 0 \n for pdf_in in pdf_list:\n try:\n pdf = PdfFileReader(file(pdf_in, \"rb\"))\n num_pages = pdf.getNumPages()\n except IOError:\n print \"skipping \", pdf_in\n continue\n if list_only:\n print pdf_in, ':', num_pages\n else:\n for i in range(num_pages):\n output.addPage(pdf.getPage(i))\n output.write(outputStream)\n total_pages += num_pages\n would_be = \"would be\"\n if not list_only:\n outputStream.close()\n would_be = \"\"\n print total_pages, \"pages\", would_be, \"written to\", output_file", "def char_strip(self):\n\n if not self.file_list:\n self.print_to_log(\"No files fit parameters, exiting\")\n return None\n\n\n result = []\n\n #pass list of files, set to inplace, and byte mode\n fi = fileinput.FileInput(self.file_list,\n inplace=1,\n mode='U')\n fname = \"\"\n count = 0\n self.error = 0\n for line in fi:\n\n #create info for logging\n if fi.isfirstline():\n #skip for first file\n if fi.lineno() > 1:\n result.append(\"Processed %s replaced '%s' by '%s' a total of %s\" % (\n fname, self.char_to_strip, self.char_for_replace, str(count)))\n count = 0\n fname = fi.filename()\n ltemp = ''\n #test and replace\n for char in line:\n if char == self.char_to_strip:\n count += 1\n #if you need to handle occurrences in the batch file\n self.error = 1\n char = self.char_for_replace\n ltemp += char\n sys.stdout.write(ltemp)\n fname = fi.filename()\n #logging for last file\n result.append(\"Processed %s replaced '%s' by '%s' a total of %s\" % (\n fname, self.char_to_strip, self.char_for_replace, str(count)))\n fi.close()\n #write out to log\n for item in result:\n self.print_to_log(item)", "def extract_programs(outputf):\t\n programs = []\n with open(outputf,'r') as f:\n\t combo_lines = f.readlines()\n for combo_line in combo_lines:\n combo = combo_line.split(' ',1)[1]\n\t programs.append(combo)\n return programs", "def get_tool_version_files():\n similar_files = defaultdict(list)\n for path in Runtime_Datasets.RAW_FILE_PATHS:\n filename = get_file_name(path)\n filename = filename.rsplit('_', 1)[0]\n similar_files[filename].append(path)\n\n Runtime_Datasets.RAW_FILE_PATHS = similar_files", "def findDuplicateWorkingFiles(self, initialList, curInfix, newInfix):\n Duplicate_List = []\n for fname in initialList:\n infixStream = iccs_apex.whatInfixIsStream(fname)\n if (infixStream == curInfix):\n prefixStream, postfixStream = string.split(fname, infixStream)\n A_File_Name = prefixStream + newInfix + postfixStream\n if (os.path.exists(A_File_Name)):\n Duplicate_List = Duplicate_List + [A_File_Name]\n \n return Duplicate_List", "def compareFiles(baseFile_path, testTempFile):\n baseFile = open(baseFile_path, \"r\")\n testTempFile.seek(0) \n## only lines that have changed\n testoutput = []\n testTempFile.seek(0) \n baseFile.seek(0)\n m_base = baseFile.readlines()\n clean_base = []\n m_temp = testTempFile.readlines() \n clean_temp = []\n ignore_chars = '\\n\\t '\n for line in m_base:\n if not line == '\\n':\n clean_base += [line.strip(ignore_chars)]\n for line in m_temp: \n if not line == '\\n':\n clean_temp += [line.strip(ignore_chars)] \t\n for line in difflib.context_diff(clean_base, clean_temp):\n testoutput += [line] \n \n## all lines diff \n# diff = difflib.ndiff(baseFile.readlines(), testTempFile.readlines())\n# print ''.join(diff)\n baseFile.close() \n diffFile_name = baseFile_path.replace(\"_Base.output\",\".diff\")\n diffFile = open(diffFile_name, \"w\")\n \n if len(testoutput) > 1:\n for line in difflib.context_diff(m_base, m_temp):\n print line\n diffFile.write(line)\n diffFile.close() \n assert ( len(testoutput) == 1 )", "def test_split_fasta_diff_num_seqs_per_file_alt(self):\r\n # start with 59 seqs (b/c it's prime, so should make more\r\n # confusing splits)\r\n in_seqs = SequenceCollection.from_fasta_records(\r\n [('seq%s' % k, 'AACCTTAA') for k in range(59)], DNA)\r\n infile = in_seqs.to_fasta().split('\\n')\r\n\r\n # test seqs_per_file from 1 to 1000\r\n for i in range(1, 1000):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n\r\n actual = split_fasta(infile, i, filename_prefix)\r\n\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n # remove the files now, so if the test fails they still get\r\n # cleaned up\r\n remove_files(actual)\r\n\r\n # building seq collections from infile and the split files result in\r\n # equivalent seq collections\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))", "def files_to_map(\n files,\n despike_l1b=False,\n only_long_exposures=False,\n only_short_exposures=False,\n only_short_flare_exposures=False,\n):\n # Avoid circular imports\n from sunkit_instruments.suvi.suvi import despike_l1b_array\n\n if isinstance(files, str):\n files = [files]\n files = sorted(files)\n if any(fn in os.path.basename(files[0]) for fn in COMPOSITE_MATCHES):\n composites = True\n elif any(fn in os.path.basename(files[0]) for fn in L1B_MATCHES):\n composites = False\n else:\n raise ValueError(\n f\"First file {files[0]} does not look like a SUVI L1b file or L2 HDR composite.\"\n )\n\n datas = []\n headers = []\n for afile in files:\n logging.debug(f\"Reading {afile}\")\n if composites:\n if any(fn in os.path.basename(afile) for fn in COMPOSITE_MATCHES):\n header, data, _ = read_suvi(afile)\n datas.append(data)\n headers.append(header)\n else:\n warn_user(\n f\"File {afile} does not look like a SUVI L2 HDR composite. Skipping.\"\n )\n else:\n if any(fn in os.path.basename(afile) for fn in L1B_MATCHES):\n header, data, dqf_mask = read_suvi(afile)\n if despike_l1b:\n data = despike_l1b_array(data, dqf_mask)\n if only_long_exposures:\n if \"long_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n elif only_short_exposures:\n if \"short_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n elif only_short_flare_exposures:\n if \"short_flare_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n else:\n datas.append(data)\n headers.append(header)\n else:\n warn_user(f\"File {afile} does not look like a SUVI L1b file. Skipping.\")\n if len(datas) == 1:\n return sunpy.map.Map(datas[0], headers[0])\n elif len(datas) > 1:\n return sunpy.map.Map(list(zip(datas, headers)), sequence=True)\n else:\n warn_user(\"List of data/headers is empty.\")", "def head(file_name):\n #from itertools import islice\n with open('../test_files/' + file_name, 'r') as infile:\n list = infile.readlines()\n #printing the 1st 10 lines\n print('list of first 10 lines',list[:10])", "def getReadOnGeneFile(rnameList, len_param):\n log.info(\"Select reads that are on genes\")\n for ch in rnameList:\n tcount = 0\n \n geneS = {}#gene start\n geneE = {}#gene end\n g_direct = {}#gene direction\n readS = {}#read start\n readE = {}#read End\n readDic = {}#readDic[id] = read\n sortGeneId = {}\n sortReadId = {}\n genefile = os.path.join(working_dir, 'removeOverlap.'+ch+'.gff')\n readfile = os.path.join(working_dir, 'MappedRead.'+ch+'.sam')\n rgfile = os.path.join(working_dir, 'ReadOnGeneList.'+ch+'.tab')\n log.info(\"Generate \" + rgfile)\n f=open(rgfile, \"w\") \n \n geneS, geneE, g_direct = getGFFStartEnd(genefile, len_param)\n sortGeneId = sortId(geneS)\n \n readS, readE,readDic = getSAMStartEnd(readfile)\n sortReadId = sortId(readS)\n ys = 0\n \n for x in range(len(sortGeneId)):\n \n gID = sortGeneId[x]#gene id\n gs = geneS.get(gID)#gene start\n ge = geneE.get(gID)#gene end\n gd = g_direct.get(gID)\n glineList = []\n sameG = False\n \n for y in range(ys,len(sortReadId)):\n rID = sortReadId[y]\n rs = readS.get(rID)\n re = readE.get(rID)\n if rs >= gs:\n if re <= ge:\n f.write(gID)\n f.write('\\t')\n f.write(str(gs))\n f.write('\\t')\n f.write(str(ge))\n f.write('\\t')\n f.write(gd)\n f.write('\\t')\n f.write(rID)\n f.write('\\t')\n f.write(str(rs))\n f.write('\\t')\n f.write(str(re))\n f.write('\\t')\n f.write(readDic.get(rID))\n elif re > ge:\n ys = y\n break\n elif rs > ge:\n ys = y\n break\n f.close()", "def mergeAllSortedFiles():\n entries = os.listdir('output/Temp/input')\n for entry in entries:\n arr = []\n with open(\"output/Temp/input/\" + entry) as file:\n for line in file:\n line = int(line.strip())\n arr.append(line)\n mergeSortedToFile(arr)", "def get_lines(fhandle):\n lines = fhandle.read().split('\\n')\n n_lines = len(lines)\n lines = set(lines)\n n_duplicates = n_lines - len(lines)\n return lines, n_lines, n_duplicates", "def process_all_leading_genes(f_path):\n with open(f_path, 'r') as f:\n contents = f.read()\n parts = contents.strip().split('\\t')\n genes = parts[2:]\n return genes", "def trimmer(fileNames=[]):\n\n if not fileNames:\n return None\n pathtofile, file = os.path.split(fileNames[0])\n\n \"\"\"\n The files will be created in the folder whose name is stored in the variable 'genFolder'\n \"\"\"\n destDir = os.path.join(pathtofile, genFolder)\n if not os.path.exists(destDir):\n os.mkdir(destDir)\n\n \"\"\"\n Create the three files and open them in write mode\n \"\"\"\n facultyFilename = os.path.join(destDir, \"Faculty_Hash_File.csv\")\n\n facultyWriter = csv.writer(open(facultyFilename, 'w', newline=''))\n facultyWriter.writerow(outHashFileColumns)\n\n for eachfile in fileNames:\n iter_record = iter(get_filtered_record(eachfile))\n\n \"\"\"\n Read the first row. It contains only the column names.\n Column names should not contain any special characters.\n \"\"\"\n inputFileColumns = next(iter_record)\n str_inputFileColumns = ' '.join(inputFileColumns)\n\n if not set(requiredHashFileColumns).issubset(inputFileColumns):\n print(\"Unable to Find all Required Columns in file \" + eachfile +\n \"\\nColumns Required: \" + requiredHashColumns +\n \"\\nColumns Found: \" + str_inputFileColumns)\n continue\n\n \"\"\"\n 1. Get the indexes of required data.\n 2. Save them in a named tuple that can give the index based on column name.\n 3. 'recordFileColumns' contains the list of strings that are the names of the required columns for processing.\n 4. Get the Index of these columns in the current file and save them in a named tuple.\n \"\"\"\n indexes = [inputFileColumns.index(i) for i in requiredHashFileColumns]\n recIndex = RequiredHashRecord(*indexes)\n\n for row in iter_record:\n\n hashlist = [row[recIndex.category], row[recIndex.firstname], row[recIndex.lastname], row[recIndex.department]]\n hashcode = get_hash(hashlist)\n writeRowContent = [row[recIndex.category], row[recIndex.firstname], row[recIndex.lastname], row[recIndex.department], row[recIndex.uid], hashcode]\n\n facultyWriter.writerow(writeRowContent)\n\n return [facultyFilename]", "def build_table():\n with contextlib.ExitStack() as stack:\n files = [stack.enter_context(gzip.open(f, 'rt')) for f in sys.argv[1:]]\n iters = [(line.split() for line in f) for f in files]\n for it in iters:\n next(it)\n key = operator.itemgetter(0)\n table = []\n for k, g in itertools.groupby(merge(*iters, key=key), key=key):\n props = list(g)\n if len(props) == len(iters):\n table.append([k] + [x[1] for x in props])\n for snp in table:\n print(*snp)", "def filter_lines(in_filename, in_filename2,out_filename):\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r') as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(\",\")\n fips = vals[0]\n if(fips not in fourteen_set):\n fourteen_set.add(fips)\n \n for line in in_f2:\n vals = line.strip().split(\",\")\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if(fips not in fourteen_set):\n new_line = str(fips)+\",\"+str(count)+\"\\n\"\n out_f.write(new_line)\n missing_convert += 1\n\n return (proper_convert, missing_convert)", "def _find_multilane_groups(files):\n\n pattern_multilane = re.compile(r\"[._]L(\\d+)[._]\")\n pattern_pair_lane_combo = re.compile(r\"([._][rR][12])?[._]L\\d+[._]([rR][12])?\")\n\n def _group_for(file_path):\n \"\"\"Create group names by removing Lx and Rx elements from the filename.\"\"\"\n return re.sub(pattern_pair_lane_combo, \"\", os.path.basename(file_path))\n\n def _create_group_map(elem_list, paired):\n \"\"\"Create multilane file groups with elements in proper order based on file list.\"\"\"\n # Create groups for the multilane files\n group_map = defaultdict(list)\n for elem in elem_list:\n search_elem = elem if not paired else elem[0]\n if pattern_multilane.search(search_elem):\n group = _group_for(search_elem)\n group_map[group].append(elem)\n\n # Only multifile groups are returned\n return {\n group: sorted(elems, key=lambda x: x[0] if paired else x)\n for group, elems in group_map.items()\n if len(elems) > 1\n }\n\n def _with_gaps_removed(group_map, paired):\n \"\"\"Return a new map having groups with gaps in elements removed.\"\"\"\n gapped_groups = set()\n for group, elems in group_map.items():\n # Verify we're getting 1, 2, 3, ...\n expected_sequence = list(range(1, len(elems) + 1))\n if paired:\n fwd_nums = [\n int(pattern_multilane.search(se).group(1)) for se in [fwd for fwd, _ in elems]\n ]\n rev_nums = [\n int(pattern_multilane.search(se).group(1)) for se in [rev for _, rev in elems]\n ]\n if fwd_nums != expected_sequence or rev_nums != expected_sequence:\n gapped_groups.add(group)\n else:\n nums = [int(pattern_multilane.search(se).group(1)) for se in elems]\n if nums != expected_sequence:\n gapped_groups.add(group)\n\n return {group: elems for group, elems in group_map.items() if group not in gapped_groups}\n\n single_files = [f for f in files if isinstance(f, str)]\n paired_files = [f for f in files if isinstance(f, tuple)]\n\n multilane_pairs = _create_group_map(paired_files, paired=True)\n multilane_singles = _create_group_map(single_files, paired=False)\n\n # Search for unmatched files for paired end multilane files and remove offending groups,\n # e.g. [(Sample_R1_L001.fq, Sample_R2_L001.fq), Sample_R2_L002.fq]\n for filename in single_files:\n if pattern_multilane.search(filename):\n group = _group_for(filename)\n if group in multilane_pairs:\n del multilane_pairs[group]\n\n # Remove groups with gaps, e.g. [`Sample_R1_L001.fq`, `Sample_R1_L003.fq`]\n multilane_pairs = _with_gaps_removed(multilane_pairs, paired=True)\n multilane_singles = _with_gaps_removed(multilane_singles, paired=False)\n\n multilane_groups = list(multilane_singles.values())\n multilane_groups.extend(list(multilane_pairs.values()))\n\n return multilane_groups", "def parse_previously_printed(path):\n if path is not None:\n prev_printed = set([l.strip() for l in open(path)])\n else:\n prev_printed = set([])\n return prev_printed", "def convertFiles():\n\n #### Get file lists\n tmp = os.path.join(remarkableBackupDirectory,remContent)\n files = [x for x in os.listdir(tmp) if \".\" not in x]\n\n for i in range(0, len(files)):\n # get file reference number\n refNrPath = os.path.join(remarkableBackupDirectory, remContent,\n files[i])\n # get meta Data\n meta = json.loads(open(refNrPath + \".metadata\").read())\n fname = meta[\"visibleName\"]\n fname = fname.replace(\" \", \"_\")\n # Does this lines file have an associated pdf?\n AnnotPDF = os.path.isfile(refNrPath + \".pdf\")\n # Get list of all rm files i.e. all pages\n npages = len(glob.glob(refNrPath + \"/*.rm\"))\n if npages != 0:\n if AnnotPDF:\n # we have found an annotated pdf\n # now make sure it has the right ending\n if meta[\"visibleName\"][-4:] != \".pdf\":\n syncFilePath = os.path.join(syncDirectory, \"*\",\n meta[\"visibleName\"] + \".pdf\")\n else:\n syncFilePath = os.path.join(syncDirectory, \"*\",\n meta[\"visibleName\"])\n\n # does the file exist in our system?\n inSyncFolder = glob.glob(syncFilePath) != []\n\n if inSyncFolder:\n # have we exported this thing before?\n local_annotExist = \\\n glob.glob(syncFilePath[:-4] + \"_annot.pdf\") != []\n # first, assume, it needs converting\n remoteChanged = True\n if local_annotExist:\n # if it already exists check when it was last updated\n local_annotPath = \\\n glob.glob(syncFilePath[:-4]+\"_annot.pdf\")[0]\n local_annot_mod_time = os.path.getmtime(local_annotPath)\n # rm time is in ms\n remote_annot_mod_time = int(meta[\"lastModified\"])/1000\n # has this version changed since we last exported it?\n remoteChanged = \\\n remote_annot_mod_time > local_annot_mod_time\n # update if the remote version has changed\n if remoteChanged:\n origPDF = glob.glob(syncFilePath)[0]\n #####\n convertAnnotatedPDF(fname, refNrPath, origPDF)\n #####\n else:\n print(fname + \"hasn't been modified\")\n else:\n print(fname + \" does not exist in the sync directory\")\n # TODO allow y/n input whether it should be copied there\n # anyway\n else:\n # we found a note\n print(\"exporting Notebook \" + fname)\n syncFilePath = os.path.join(syncDirectory, notesDirectory,\n fname + \".pdf\")\n inSyncFolder = glob.glob(syncFilePath) != []\n remoteChanged = True\n if inSyncFolder:\n local_annot_mod_time = os.path.getmtime(syncFilePath)\n remote_annot_mod_time = int(meta['lastModified'])/1000\n remoteChanged = remote_annot_mod_time > local_annot_mod_time\n if remoteChanged:\n #####\n convertNotebook(fname, refNrPath)\n #####\n else:\n print(fname + \"has not changed\")", "def ReturnChain(currentFile):\n with open(currentFile) as fileIn:\n lines = fileIn.readlines()\n Chainlist=[]\n for line in lines:\n if line.startswith('SEQRES'):\n List = line.split()\n Chainlist.append(List[2])\n #print(Chainlist)\n Chain = set(Chainlist)\n chain = sorted(list(Chain))\n return chain", "def get_data_split(file_name, referents=False):\n with open(file_name, 'r') as file:\n data = file.readlines()\n data = [line.strip() for line in data if line != '1-----\\n' and line != '1-----']\n scene_mapping = utt_to_scene('gold_lexicon.txt')\n all_utterances = [e.split(\" \")[1:] for e in data[::2]]\n short_utterances = []\n long_utterances = []\n short_scenes = []\n long_scenes = []\n for utt in all_utterances:\n if len(utt) >= 5:\n long_utterances.append(utt)\n scene = [scene_mapping[word] for word in utt]\n if referents:\n long_scenes.append(scene)\n else:\n scene = list(itertools.chain.from_iterable(scene))\n long_scenes.append(scene)\n elif len(utt) <= 3:\n short_utterances.append(utt)\n scene = [scene_mapping[word] for word in utt]\n if referents:\n short_scenes.append(scene)\n else:\n scene = list(itertools.chain.from_iterable(scene))\n short_scenes.append(scene)\n return short_utterances, long_utterances, short_scenes, long_scenes", "def testDuplicateFiles(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 foo/../file1_1.cc\nFILE 2 bar/../file1_1.cc\nFILE 3 baz/../file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 1\n1008 4 46 1\n100c 4 44 1\n\"\"\"\n self.assertParsed(INPUT, [], EXPECTED_OUTPUT)", "def Seperate(f_read, f_write_name):\n lines = f_read.readlines()\n line_s = [line.split() for line in lines]\n\n for i in range(6, 13):\n nbytes = pow(2,i)\n f_write = f_write_name + str(nbytes) + \"b.txt\"\n f = open(f_write, \"w+\")\n\n for line in line_s:\n if line[3] == str(nbytes):\n f.write(\" \".join(line))\n f.write(\"\\n\")\n f.close()", "def splitter(fasta_file, output, limit, large_handling=False):\n file_ = open(fasta_file, 'r')\n file_count = 1\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n nt_count = 0\n for seq in SeqIO.parse(fasta_file, 'fasta'):\n if large_handling == True and len(str(seq.seq)) >= int(limit):\n file_count += 1\n largefile = open(output.rstrip(\"/\")+\"/%s_%05d_XL.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n largefile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\")\n largefile.close()\n else:\n nt_count += len(str(seq.seq))\n outfile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\") \n if nt_count >= int(limit):\n outfile.close()\n file_count += 1\n nt_count = 0\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n outfile.close()", "def preselect(input_files: list) -> list:\n checked_files = []\n for file in input_files:\n if os.path.isfile(file):\n checked_files.append(file)\n\n summary_df = pd.DataFrame(columns=['file', 'size'])\n\n summary_df['file'] = checked_files\n summary_df['size'] = [os.path.getsize(file) for file in checked_files]\n\n summary_df = summary_df[summary_df['size'].duplicated(keep=False)]\n\n return summary_df['file'].tolist()", "def loadAllFiles(f_list):\n\n t_1 = datetime.now()\n\n # first list\n i_list = []\n id_list = []\n for f in f_list:\n f_t = loadFile(f)\n i_list.extend(f_t)\n # clean list\n ln_i_list = len(i_list)\n c_list = []\n for i, f in enumerate(i_list):\n if f[\"id\"] not in id_list:\n id_list.append(f[\"id\"])\n c_list.append(f)\n print (i * 100.) / ln_i_list, datetime.now() - t_1, i\n return c_list", "def retrieve(self,version=None):\n result = []\n groups = (os.path.split(x) for x in self.list_filenames())\n groups2 = itertools.groupby(groups,operator.itemgetter(0))\n groups3 = ((k,[x[1] for x in g]) for k,g in groups2)\n for (result_version, filenames) in groups3:\n if not version or version == result_version:\n for filename in filenames:\n filename = os.path.join(self.archive_path,result_version,filename)\n result.append(RunResults.load(filename))\n return result", "def get_strs_from_input(file_name,mode):\n foo = open(file_name,mode) \n seq = foo.read().split(\">\") \n strs = []\n for s in seq:\n if(s!=\"\"):\n strings=s.split()\n s_1=strings[0]\n s_2=''.join(strings[1:])\n strs.append(s_2) \n return strs", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()", "def redisperse_list(files,dw,w1,w2,key='spec'):\r\n input_list = ','.join(files)\r\n disp_files = [f.replace(key, key+'-disp') for f in files]\r\n output_disp_list = ','.join(disp_files)\r\n iraf.unlearn('dispcor')\r\n iraf.dispcor.input = input_list\r\n iraf.dispcor.output = output_disp_list\r\n # keep existing wavelength endpoints\r\n iraf.dispcor.dw = dw\r\n iraf.dispcor.w1 = w1\r\n iraf.dispcor.w2 = w2\r\n iraf.dispcor.flux = 'no'\r\n iraf.dispcor()\r\n # write text files\r\n for output in disp_files:\r\n iraf.wspectext(output, output.replace('fits', 'txt'), header=\"no\")\r\n\r\n return disp_files", "def read_files(n=15):\n list_of_differences = [[0] for i in range(n)]\n\n for i in range(n):\n with open('differences'+str(i)+'.txt', 'r') as file:\n for line in file:\n list_of_differences[i].append(int(line.rstrip('\\n')))\n\n return list_of_differences", "def remove_duplicates(file, number_of_fastas, path, output_name):\n\n path_to_pbds = path + 'Modeling/cleaned_template_pdbs/'\n path_to_fastas = path + 'Modeling/cleaned_template_fastas/'\n path_to_alignnment = path + 'Modeling/fasta_alns_and_identities/' + file\n fastas = parse_multifasta_file(path_to_alignnment, number_of_fastas)\n uniq_fastas = []\n with open(output_name, \"w\") as f:\n for i in range(number_of_fastas):\n name, seq = next(fastas)\n if seq not in uniq_fastas:\n uniq_fastas.append(seq)\n f.write('>' + name + '\\n')\n f.write(seq + '\\n')\n else:\n os.remove(path_to_pbds + name + '.pdb')\n os.remove(path_to_fastas + name + '.fasta')\n shutil.move(output_name, path + 'Modeling/fasta_alns_and_identities/')\n return len(uniq_fastas)", "def check_duplicates(filename=None):\r\n i=0\r\n for line in open(filename):\r\n a=line.split(\" \")\r\n i=i+1\r\n b=set(a[:])\r\n for item in b:\r\n if a.count(item) > 1:\r\n print \"line\",i,\"---->\",item\r\n print \"end\"", "def remove_matching_reads(filename, cont_file):\n if not os.path.exists(cont_file + '.bwt'):\n cml = shlex.split('bwa index %s' % cont_file)\n subprocess.call(cml)\n cml = 'bwa mem -t 2 %s %s 2> /dev/null | samtools view -f 4 -h - | samtools bam2fq - ' % (cont_file, filename)\n cml += '| seqtk seq -A - > clean_reads.fasta'\n\n subprocess.call(cml, shell=True)\n return 'clean_reads.fasta'", "def make_items_distinct_in_file(path=get_run_path(), filename_in='', sort=False,\n prefix_or_postfix=0, fix_phrase=''):\n\n set_items = set()\n\n if path[len] != '/':\n path += '/'\n\n # if filename_out:\n # out_file_path = path + filename_out\n\n if filename_in:\n in_file_path = path + filename_in\n\n with open(in_file_path) as file:\n for line in file.read().splitlines():\n set_items.add(line)\n else:\n for filename in glob.glob(os.path.join(path)):\n with open(filename) as file_x:\n for line in file_x:\n set_items.add(line)\n\n def make_distinct_file(file_name):\n with open(file_name) as f:\n for f_line in f.read().splitlines():\n set_items.add(f_line)\n\n def make_merged_distinct_file(dir_path):\n for file_x in glob.glob(os.path.join(dir_path)):\n with open(filename) as file_x:\n for x_line in file_x:\n set_items.add(x_line)\n\n if set_items.__len__() > 0:\n with open(out_file_path, \"w\") as file_out:\n if sort:\n list_sorted = sorted(set(set_items))\n file_out.write(\"\\n\".join(list_sorted))\n else:\n file_out.write(\"\\n\".join(set_items))", "def load_and_print_file_list(file_list):\n for file in file_list:\n hdu_list = load_file(file)\n print(\"'{0}' has {1} hdus in it\".format(file, len(hdu_list)))\n for ii in range(len(hdu_list)):\n hdu1 = hdu_list[ii] # FITS HDU counting is from 1\n print('BITPIX type of HDU{0} = {1}'.format(ii + 1,\n hdu1.header['BITPIX']))\n # be sure to close the file handle\n hdu_list.close()" ]
[ "0.5725787", "0.5708584", "0.5698696", "0.5599208", "0.5580545", "0.5571598", "0.5569876", "0.5565192", "0.5528212", "0.55211085", "0.551443", "0.55045366", "0.54886717", "0.54601747", "0.5417886", "0.54174167", "0.5414416", "0.54129356", "0.5406018", "0.53970194", "0.53854364", "0.5381576", "0.53785247", "0.5371965", "0.53543913", "0.53441083", "0.5339531", "0.5326073", "0.5305077", "0.5301095", "0.5294771", "0.52937526", "0.52904505", "0.5282252", "0.5258898", "0.52464825", "0.5240595", "0.5231788", "0.5220451", "0.52170956", "0.5217027", "0.5215135", "0.5209727", "0.5203129", "0.5199641", "0.519336", "0.5191415", "0.51813877", "0.5178007", "0.5177203", "0.5176752", "0.51664567", "0.5166411", "0.5164888", "0.515606", "0.5141708", "0.5140377", "0.51387846", "0.51339567", "0.51323843", "0.5131379", "0.5127997", "0.5119975", "0.5117631", "0.51048964", "0.5088263", "0.5087052", "0.508704", "0.5082643", "0.50815743", "0.507359", "0.5073148", "0.507314", "0.50687087", "0.5067586", "0.50633526", "0.50619", "0.50516766", "0.5048799", "0.5047755", "0.5047263", "0.50472534", "0.50469196", "0.50432205", "0.5042563", "0.5040644", "0.5038935", "0.5035871", "0.5032546", "0.5032306", "0.5028801", "0.50247324", "0.5022419", "0.5021012", "0.501872", "0.50169355", "0.5015268", "0.50088483", "0.5008596", "0.50009936" ]
0.52034676
43
Description Handle missing values by replacing them with either the default value or the mean/min/max value (for nontext columns only). An indicator column can optionally be concatenated, if theinput column type is numeric.
def transforms_missingvaluehandler( column, data, output_data=None, model=None, replace_with='Def', impute_by_slot=True, concat=True, **params): entrypoint_name = 'Transforms.MissingValueHandler' inputs = {} outputs = {} if column is not None: inputs['Column'] = try_set( obj=column, none_acceptable=False, is_of_type=list, is_column=True) if data is not None: inputs['Data'] = try_set( obj=data, none_acceptable=False, is_of_type=str) if replace_with is not None: inputs['ReplaceWith'] = try_set( obj=replace_with, none_acceptable=True, is_of_type=str, values=[ 'DefaultValue', 'Mean', 'Minimum', 'Maximum']) if impute_by_slot is not None: inputs['ImputeBySlot'] = try_set( obj=impute_by_slot, none_acceptable=True, is_of_type=bool) if concat is not None: inputs['Concat'] = try_set( obj=concat, none_acceptable=True, is_of_type=bool) if output_data is not None: outputs['OutputData'] = try_set( obj=output_data, none_acceptable=False, is_of_type=str) if model is not None: outputs['Model'] = try_set( obj=model, none_acceptable=False, is_of_type=str) input_variables = { x for x in unlist(inputs.values()) if isinstance(x, str) and x.startswith("$")} output_variables = { x for x in unlist(outputs.values()) if isinstance(x, str) and x.startswith("$")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables, output_variables=output_variables) return entrypoint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fix_missing(df, col, name, na_dict):\n if is_numeric_dtype(col):\n if pd.isnull(col).sum() or (name in na_dict):\n df[name+'_na'] = pd.isnull(col)\n filler = na_dict[name] if name in na_dict else col.median()\n df[name] = col.fillna(filler)\n na_dict[name] = filler\n return na_dict", "def fill_nan_in_numeric(df):\n print(\" --- Filling NaN in Numerics.\")\n thresh = get_min_filled_threshold(df)\n columns = df.columns\n numerical = [x for x in columns if x.startswith('n_')]\n # fill NaN with mean or median, based on std dev\n for col in numerical:\n filled = get_non_missing_count(df[col])\n if filled < thresh:\n df[col] = df[col].fillna(-1)\n else:\n std = df[col].std()\n if std < 1:\n mean = df[col].mean()\n df[col] = df[col].fillna(mean)\n else:\n median = df[col].median()\n df[col] = df[col].fillna(mean)\n\n print(\" --- Finished filling NaN in Numerics.\")\n return df", "def impute_missing(df):\n\n for name in df.select_dtypes(\"number\"):\n df[name] = df[name].fillna(0)\n for name in df.select_dtypes(\"category\"):\n df[name] = df[name].fillna(\"None\")\n return df", "def override_missing_value(data, headers, missing_value='', **_):\n return ([[missing_value if v is None else v for v in row] for row in data],\n headers)", "def fill_missing(df,strategy='mean', missingValue=np.nan):\n c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']\n imp = Imputer(inputCols=c_name,outputCols=c_name,strategy=strategy, missingValue=missingValue).fit(df)\n return imp,imp.transform(df)", "def fill_missing(self):\n df = self.df\n # Filling with default values\n logger.debug(\"Filling from distributions...\")\n for field in HeatStrokeDataFiller.default_map or field in HeatStrokeDataFiller.positive_default:\n if field not in df.columns:\n logger.warning(\"(%s) missing from data-frame columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to default: %s\" % (field, HeatStrokeDataFiller.default_map[field]))\n default_value = HeatStrokeDataFiller.default_map[field]\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=False)\n how_many_to_fill = np.sum(where)\n if field in HeatStrokeDataFiller.positive_default:\n # Use default positive dietributions\n distribution = HeatStrokeDataFiller.positive_default[field]\n df[field].loc[where] = distribution(how_many_to_fill)\n else:\n logger.debug(\"Using default %s for field: %s\" % (default_value, field))\n # Use default values\n df[field].loc[where] = np.array([default_value] * how_many_to_fill)\n\n # Filling with Zeros\n logger.debug(\"Fillling with zeros...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_zero:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to 0\" % field)\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = np.zeros(how_many_to_fill)\n\n # Filling in columns with the average from the rest of the column\n logger.debug(\"Filling with agerages...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_average:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from data-frame columns\" % field)\n continue\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n data = df[field][np.invert(where)]\n mean = np.mean(data)\n std = np.std(data)\n if mean == np.nan or std == np.nan:\n mean, std = (0, 0)\n logger.debug(\"Setting missing in \\\"%s\\\" with: %.3f +/- %.3f\" % (field, mean, std))\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = mean + std * np.random.random(how_many_to_fill)\n\n fields_not_modified = set(df.columns) - set(HeatStrokeDataFiller.default_map.keys()) - HeatStrokeDataFiller.fields_to_fill_with_zero - HeatStrokeDataFiller.fields_to_fill_with_zero\n logger.debug(\"Fields not modified: %s\" % fields_not_modified.__str__())\n return df", "def fill_missing_values(cols):\n age = cols[0]\n pclass = cols[1]\n\n if pd.isnull(age):\n if pclass == 1:\n return 37\n elif pclass == 2:\n return 29\n else:\n return 24\n else:\n return age", "def DealWithMissingValues(data_set: pd.DataFrame):\n data_set.fillna(method=\"pad\", inplace=True)", "def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y", "def mean_impute(self, column_val):\n mean = np.mean(column_val)\n column_val = column_val.fillna(mean)\n return column_val", "def coerce_empty_numeric_values(self):\n if \"numeric\" in self.annot_types:\n numeric_columns = self.file.xs(\n \"numeric\", axis=1, level=1, drop_level=False\n ).columns.tolist()\n self.file[numeric_columns].replace(\"\", np.nan, inplace=True)", "def code_unknown_to_nan(data, attribute_values):\n attribute_values_unknown = attribute_values[attribute_values['Meaning'] == \"unknown\"]\n for i in range(len(attribute_values_unknown)):\n colname = attribute_values_unknown.iloc[i]['Attribute']\n unknown_values = eval('[' + str(attribute_values_unknown.iloc[i]['Value']) + ']')\n try:\n data[colname] = data[colname].replace(unknown_values, float('nan'))\n except:\n pass\n return data", "def replace_nan(data):\r\n lst_ind = np.array(['valence_intensity', 'anger_intensity',\r\n 'fear_intensity', 'sadness_intensity', 'joy_intensity'])\r\n for i in lst_ind:\r\n native = data[:][i]\r\n avg = np.nanmean(native)\r\n data[:][i] = np.where(np.isnan(native), avg, native)\r\n return data", "def replace_na(data, replace=\"average\", remove=False, columns):\n \n return", "def missing_values(self):\n missing_values = self.data.isna().sum()\n if missing_values:\n fracture = missing_values / self.data.count()\n return f\"N={missing_values},{round(fracture, 2)}%\"\n else:\n return \"no missing values\"", "def fillna_method(request: Any) -> Any:\n return request.param", "def fillna_method(request: Any) -> Any:\n return request.param", "def __fillnan(df):\n\t\tcol_names = ['budget', 'popularity', 'runtime', 'vote_average', 'vote_count']\n\t\tfor col_name in col_names:\n\t\t\tdf[col_name] = df[col_name].fillna(df[col_name].median())\n\t\treturn df", "def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())", "def impute(df, median=False, mean=False, negative=False, zero=False, list_missing=[]):\n df_copy = df.copy()\n\n if len(list_missing) == 0:\n list_missing = df_copy.columns[df_copy.isna().any()].tolist()\n\n if median:\n #Impute missing values with median\n for i in list_missing:\n df_copy[i].fillna(\n (df_copy[i].median()), inplace=True)\n print(\"Imputation with median done\")\n\n elif mean:\n #Impute missing values with mean\n for i in list_missing:\n df_copy[i].fillna(\n (df_copy[i].mean()), inplace=True)\n print(\"Imputation with mean done\")\n\n elif negative:\n for i in list_missing:\n df_copy[i].fillna(-1, inplace=True)\n print(\"Imputation with negative value done\")\n\n elif zero:\n for i in list_missing:\n df_copy[i].fillna(0, inplace=True)\n print(\"Imputation with zero done\")\n\n else:\n print(\"No method choosen: Missing values at: \", list_missing)\n\n\n return df_copy", "def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value", "def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value", "def fillna(self, value=None, downcast=None):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")", "def _add_missing_param_values(self, mean, model=None):\n if self.default_param_values is not None:\n mean['value'] = mean['value'].fillna(mean['param'].map(self.default_param_values))\n elif model is not None:\n self.default_param_values = self._get_parameters_from_model(model)\n mean['value'] = mean['value'].fillna(mean['param'].map(self.default_param_values))\n\n if mean[['value']].isnull().values.any():\n raise MissingParametersErrors(\"'param_covariance' contains parameters that are absent from 'param_mean'.\"\n \" Please add these parameters to 'param_mean' or include a PySB model\")\n return mean", "def _fillna_meta_cols(self):\n for col_name, fill_value in self._fillna.items():\n if col_name in self._hybrid_meta.columns:\n self._hybrid_meta[col_name].fillna(fill_value, inplace=True)\n else:\n self.__warn_missing_col(col_name, action='fill')\n\n self._hybrid_meta[self.__solar_rpi_n].fillna(-1, inplace=True)\n self._hybrid_meta[self.__wind_rpi_n].fillna(-1, inplace=True)", "def handle_missing_values(dataset, missing_values_header, missing_label):\n\n return dataset[dataset[missing_values_header] != missing_label]", "def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n ):\n return super().fillna(value, method, axis, inplace, limit, downcast)", "def missing_values(df):\n\n # penalise missing review scores\n df[\"prop_review_score\"].fillna(-1, inplace=True)\n df[\"prop_location_score1\"].fillna(-1, inplace=True)\n df[\"prop_location_score2\"].fillna(-1, inplace=True)\n df[\"visitor_hist_starrating\"].fillna(-1, inplace=True)\n df[\"visitor_hist_adr_usd\"].fillna(-1, inplace=True)\n\n # replace price by mean of hotels with same starrating\n mean_price_starrating = df.groupby(\"prop_starrating\")[\"prop_log_historical_price\"].transform(\"mean\")\n df[\"prop_log_historical_price\"].fillna(mean_price_starrating, inplace=True)\n\n # fill by worst possible value in dataset\n aff_min = df[\"srch_query_affinity_score\"].min()\n df[\"srch_query_affinity_score\"].fillna(aff_min, inplace=True)\n\n # TODO: is dit worst???? hoezo is verder weg slechter?\n orig_max = df[\"orig_destination_distance\"].max()\n df[\"orig_destination_distance\"].fillna(orig_max, inplace=True)\n\n # remaining mv's are replaced by mean of column\n # df = df.fillna(df.mean())\n print(\"er zijn nog zoveel nans: \", df.isnull().sum().sum())\n\n return df", "def test_fill_values(parallel, read_basic):\n text = \"\"\"\nA, B, C\n, 2, nan\na, -999, -3.4\nnan, 5, -9999\n8, nan, 7.6e12\n\"\"\"\n table = read_basic(text, delimiter=\",\", parallel=parallel)\n # The empty value in row A should become a masked '0'\n assert isinstance(table[\"A\"], MaskedColumn)\n assert table[\"A\"][0] is ma.masked\n # '0' rather than 0 because there is a string in the column\n assert_equal(table[\"A\"].data.data[0], \"0\")\n assert table[\"A\"][1] is not ma.masked\n\n table = read_basic(\n text, delimiter=\",\", fill_values=(\"-999\", \"0\"), parallel=parallel\n )\n assert isinstance(table[\"B\"], MaskedColumn)\n assert table[\"A\"][0] is not ma.masked # empty value unaffected\n assert table[\"C\"][2] is not ma.masked # -9999 is not an exact match\n assert table[\"B\"][1] is ma.masked\n # Numeric because the rest of the column contains numeric data\n assert_equal(table[\"B\"].data.data[1], 0.0)\n assert table[\"B\"][0] is not ma.masked\n\n table = read_basic(text, delimiter=\",\", fill_values=[], parallel=parallel)\n # None of the columns should be masked\n for name in \"ABC\":\n assert not isinstance(table[name], MaskedColumn)\n\n table = read_basic(\n text,\n delimiter=\",\",\n fill_values=[(\"\", \"0\", \"A\"), (\"nan\", \"999\", \"A\", \"C\")],\n parallel=parallel,\n )\n assert np.isnan(table[\"B\"][3]) # nan filling skips column B\n # should skip masking as well as replacing nan\n assert table[\"B\"][3] is not ma.masked\n assert table[\"A\"][0] is ma.masked\n assert table[\"A\"][2] is ma.masked\n assert_equal(table[\"A\"].data.data[0], \"0\")\n assert_equal(table[\"A\"].data.data[2], \"999\")\n assert table[\"C\"][0] is ma.masked\n assert_almost_equal(table[\"C\"].data.data[0], 999.0)\n assert_almost_equal(table[\"C\"][1], -3.4) # column is still of type float", "def fill_nans(data):\n for col in data.columns:\n data[col].fillna(-999, inplace=True)", "def ImputeMissingValues(self, **kwargs):\n if self._frozen:\n raise Exception(\"Dataset already frozen, no modifications allowed\")\n \n # we should check it there are any duplicated columns in the arguments\n if \"columns\" in kwargs.keys():\n all_columns = [c[\"colname\"] for c in self._columns]\n tmp = kwargs[\"columns\"]\n # check if columns have been validated\n flag = 0\n for c in tmp:\n if c not in all_columns:\n flag = 1\n print(\"Unknown or unvalidated column: '{}'\".format(c))\n if flag>0:\n raise Exception(\"Unknown or unvalidated columns, aborting\")\n if np.unique(tmp).shape[0] != len(tmp):\n raise Exception(\"Duplicated columns, aborting\")\n # check that if a column has been passed as a parameter, all columns for the same group are passed too\n for column in tmp:\n group = [c[\"group\"] for c in self._columns if c[\"colname\"]==column][0]\n columns_in_group = [c[\"colname\"] for c in self._columns if c[\"group\"]==group]\n if all([c in tmp for c in columns_in_group]) != True:\n raise Exception(\"All one-hot encoded mono-label variables of the same group must be included (column: '{}'\".format(column))\n \n columns = tmp\n else:\n # by default : take all columns which are in the X dataset\n columns = [c[\"colname\"] for c in self._columns if c[\"dataset\"]==\"X\"]\n \n df = self._proc_data.copy()\n \n # Remove columns with only missing values\n cols_to_remove = []\n for coln in columns:\n if np.sum(pd.isna(df[coln]))==df.shape[0]:\n print(\" !!!! Column {} (qty) only has missing values, will be removed\".format(coln))\n cols_to_remove.append(coln)\n # Check if 0 variance => would lead to NA values\n for coln in columns:\n if np.nanvar(df[coln])==0:\n print(\" !!!! Column {} (qty) has 0 variance, will be removed\".format(coln))\n cols_to_remove.append(coln)\n if len(cols_to_remove)>0:\n columns = [c for c in columns if c not in cols_to_remove] \n df = df.loc[:,columns]\n mean = np.nanmean(df,axis=0)\n std = np.nanstd(df,axis=0)\n df = (df-mean)/std\n \n print(\" >>>> Total missing values:\\n{}\".format(np.sum(pd.isna(df))))\n \n ar = np.array(df)\n \n # impute\n imputer = KNNImputer(n_neighbors=5, weights=\"distance\")\n imputed_np = imputer.fit_transform(ar)\n \n # unnormalize\n unnorm_imputed_np = imputed_np*std+mean\n unnorm_imputed_df = df.copy()\n # send back to dataframe, with same structure: index, columns...\n unnorm_imputed_df.loc[:,:] = unnorm_imputed_np\n \n # for binary variables : convert to 0/1 and ensure that variables in the same group have only one 1\n # convert by group to make it easier\n # one entry per group\n bin_columns_by_group = {}\n for c in self._columns:\n if c[\"mode\"]==1 and c[\"colname\"] in columns:\n if c[\"group\"] in bin_columns_by_group.keys():\n bin_columns_by_group[c[\"group\"]].append(c[\"colname\"])\n else:\n bin_columns_by_group[c[\"group\"]]=[c[\"colname\"]]\n \n for cols in bin_columns_by_group.values():\n if len(cols)==1: # binary\n unnorm_imputed_df[cols[0]] = np.minimum(1, np.maximum(0, np.round(unnorm_imputed_df[cols[0]])))\n else:\n a = np.argmax(np.array(unnorm_imputed_df[cols]), axis=1)\n b = np.zeros((a.size, a.max()+1))\n b[np.arange(a.size),a] = 1\n unnorm_imputed_df[cols] = b\n \n self._proc_data.loc[:,columns] = unnorm_imputed_df", "def get_missing(self):\n missing_values = self.df[self.col_name].isnull().sum()\n return missing_values", "def na_value():\n return pd.NA", "def treat_null_values(df, method):\n \n if method == 'drop':\n df.dropna(inplace = True)\n elif method == 'mean':\n df.fillna(round(df.mean(),1), inplace = True)\n elif method == 'median':\n df.fillna(df.median(), inplace = True)\n elif method == 'mode':\n df.fillna(df.mode()[0], inplace = True)\n else:\n df.fillna('NA', inplace = True)", "def _fill_nan(\n df: pd.DataFrame,\n categorical_fill: object = \"Missing\",\n numerical_fill: object = -1\n) -> pd.DataFrame:\n print(\"Filling nan observations...\")\n df = df.copy()\n for col in df.columns:\n if col in CATEGORICAL_TRANS:\n # Categorical columns.\n df[col].fillna(categorical_fill, inplace=True)\n else:\n # Numerical columns.\n # df[col].fillna(numerical_fill(df[col]), inplace=True)\n df[col].fillna(-1, inplace=True)\n assert not np.any(df.isna())\n return df", "def _use_inf_as_null(key):\n from pandas.core.config import get_option\n flag = get_option(key)\n if flag:\n globals()['_isnull'] = _isnull_old\n else:\n globals()['_isnull'] = _isnull_new", "def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)", "def compute_column_means_with_incomplete_data(df):\n X = np.array(df)\n return np.nanmean(X, axis = 0)", "def fillna(\n self, value: Any = None, method: str = None, dtype: Dtype = None\n ):\n result = libcudf.replace.replace_nulls(\n input_col=self, replacement=value, method=method, dtype=dtype\n )\n return self._copy_type_metadata(result)", "def correct_miss_fill(ds):\n for d in ds.data_vars:\n try:\n ds[d].attrs.update({'missing_value': ds[d]._FillValue})\n except:\n pass\n return xr.decode_cf(ds)", "def NA():\n return float('nan')", "def ISBLANK(value):\n raise NotImplementedError()", "def default_missing_values(cur, conn):\n for query in default_values_queries:\n cur.execute(query)\n conn.commit()", "def encode_nans(table, column_name):\n def replace(entry):\n if entry == 'nan' or pd.isnull(entry):\n return None\n else:\n return entry\n assert (isinstance(table, Table)), \"Input not a supported type.\"\n column = table.apply(replace, column_name)\n return table.append_column(column_name, column)", "def mean_default_zero(inputs):\n # pylint disable necessary for numpy and pandas\n if len(inputs) == 0: # pylint: disable=g-explicit-length-test\n return 0\n else:\n return np.mean(inputs)", "def data_missing(data):\n return type(data)._from_sequence([None, data[0]])", "def value_nan_handle(self, method: str = 'fill'):\n assert method in ['fill', 'drop'], 'Method must be either fill or drop passed as string.'\n if method == 'fill':\n self.tsdf.fillna(method='pad', inplace=True)\n else:\n self.tsdf.dropna(inplace=True)\n return self", "def parse_missing_fields(data):\n def _get_nan_synonims(k):\n if k == \"extra_security_descriptors\":\n return [pd.NaT, np.nan]\n else:\n return [pd.NaT, '', np.nan]\n\n data = {id_no: {k: (v if v not in _get_nan_synonims(k) and not pd.isna(v) \n else None) for k,v in datapoint.items()} for id_no, datapoint in data.items()}\n return data", "def fill_missing(self) -> None:\n\n self.fill_missing_rows()\n self.fill_missing_source_parameters()\n return", "def _validate_fillna_cols_prefixed(self):\n for col in self._fillna:\n self.__validate_col_prefix(\n col, (SOLAR_PREFIX, WIND_PREFIX), input_name='fillna'\n )", "def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data", "def handle_missing_data(self, dataframe):\n return dataframe", "def impute(self, columns, method='median', all_null='raise'):\n # Ensure all_null is one of the valid choices.\n allowed = {'drop', 'raise', 'ignore'}\n if all_null not in allowed:\n raise ValueError(\n 'all_null must be one of: %s' % ', '.join(allowed))\n\n self.verify_columns_in_dataset(columns)\n\n # If all_null='raise', check all columns first to avoid side effects.\n if all_null == 'raise':\n for col in columns:\n if self.column_is_all_null(col):\n raise ValueError(\"all null column '%s'\" % col)\n\n for col in columns:\n if self.column_is_all_null(col):\n if all_null == 'drop':\n self.remove_feature(col)\n logging.info(\"all null column '%s' was dropped\" % col)\n continue\n # Already checked all_null == 'raise'\n else:\n logging.info(\"all null column '%s' ignored\" % col)\n\n # Compute fill value and fill all NaN values.\n column = self.dataset[col]\n fill_value = getattr(column, method)()\n self.dataset[col] = column.fillna(fill_value)\n\n # Store fill_value imputed.\n self.imputations[col] = fill_value", "def handel_missing_values(dataset, missing_values_header, missing_label):\n \n return dataset[dataset[missing_values_header] != missing_label]", "def handle_na(self, data: pd.DataFrame) -> pd.DataFrame:\n return remove_missing(\n data,\n self.params[\"na_rm\"],\n list(self.REQUIRED_AES | self.NON_MISSING_AES),\n self.__class__.__name__,\n )", "def missing(self, value):\n self.MISSING = value", "def test_empty_input():\n assert _currency_column_to_numeric(\"\") == \"ORIGINAL_NA\"", "def _fill_na_by_ratio_checker(func):\n\n @wraps(func)\n def wrapper_checker(database, column_name):\n _CheckInput._check_database_input(database)\n _CheckInput._check_column_name(column_name)\n _CheckInput._check_column_in_database(column_name, database)\n func(database, column_name)\n return wrapper_checker", "def test_fill_missing_value(self):\n try:\n # test_size need to be a positive float number\n fill_missing_value(DATA, NEEDED, INPUTS, OUTPUT, test_size=-1.0)\n raise Exception(\"Illegal input test failed!\")\n except AssertionError:\n pass\n\n try:\n # n_estimators need to be a positive integer\n fill_missing_value(DATA, NEEDED, INPUTS, OUTPUT, n_estimators=-1)\n except AssertionError:\n pass\n\n try:\n # max_depth need to be a positive integer\n fill_missing_value(DATA, NEEDED, INPUTS, OUTPUT, max_depth=1.2)\n except AssertionError:\n pass\n\n filled_data = fill_missing_value(DATA, NEEDED, INPUTS, OUTPUT)\n\n assert isinstance(filled_data, pd.DataFrame),\\\n \"Type error. pd.DataFrame expected\"", "def fix_null_vals(dataset):\n\tprint(\"\\tFixing null values\")\n\n\tif not dataset.isnull().any().any():\n\t\treturn dataset\n\telse:\n\t\treturn dataset.fillna(method=\"ffill\")", "def __warn_missing_col(col_name, action):\n msg = (\"Skipping {} values for {!r}: Unable to find column \"\n \"in hybrid meta. Did you forget to prefix with \"\n \"{!r} or {!r}? \")\n w = msg.format(action, col_name, SOLAR_PREFIX, WIND_PREFIX)\n logger.warning(w)\n warn(w, InputWarning)", "def missing_values(self, layout={}, **kwargs):\n df = self._data.isna().astype(int)\n kwargs.update(\n {'zmin': 0, 'zmax': 1,\n 'colors': 'reds', 'ncolors': 9,\n 'xgap': 3, 'ygap': 3,\n 'showscale': False, }\n )\n\n layout = recursive_update(\n layout, updater={\n 'xaxis': {'showgrid': False, 'zeroline': False},\n 'yaxis': {'showgrid': False, 'zeroline': False},\n })\n return df.iplot.heatmap(layout=layout, **kwargs)", "def impute(self, columns, method='median', all_null='raise'):\n # Ensure all_null is one of the valid choices.\n allowed = {'drop', 'raise', 'ignore'}\n if all_null not in allowed:\n raise ValueError(\n 'all_null must be one of: %s' % ', '.join(allowed))\n\n self.verify_columns_in_dataset(columns)\n\n # If all_null='raise', check all columns first to avoid side effects.\n if all_null == 'raise':\n for col in columns:\n if self.train_column_is_all_null(col):\n raise ValueError(\"all null column '%s'\" % col)\n\n for col in columns:\n if self.train_column_is_all_null(col):\n if all_null == 'drop':\n self.remove_feature(col)\n logging.info(\"all null column '%s' was dropped\" % col)\n continue\n # Already checked all_null == 'raise'\n else:\n logging.info(\"all null column '%s' ignored\" % col)\n\n # Compute fill value and fill all NaN values.\n train_column = self.train[col]\n fill_value = getattr(train_column, method)()\n self.train.loc[:, col] = train_column.fillna(fill_value)\n self.test.loc[:, col] = self.test[col].fillna(fill_value)\n\n # Store fill_value imputed.\n self.imputations[col] = fill_value", "def default_na_value(self):\n dkind = self.dtype.kind\n if dkind == \"M\":\n return np.datetime64(\"nat\", self.time_unit)\n else:\n raise TypeError(\n \"datetime column of {} has no NaN value\".format(self.dtype)\n )", "def non_null_validation(x):\n return not pd.isnull(x), {}", "def fix_data(self, df):\n return df.dropna(axis='columns', how='all').fillna(0.0)", "def missing_values_col(df):\n null_count = df.isnull().sum()\n null_percentage = (null_count / df.shape[0]) * 100\n empty_count = pd.Series(((df == ' ') | (df == '')).sum())\n empty_percentage = (empty_count / df.shape[0]) * 100\n nan_count = pd.Series(((df == 'nan') | (df == 'NaN')).sum())\n nan_percentage = (nan_count / df.shape[0]) * 100\n return pd.DataFrame({'num_missing': null_count, 'missing_percentage': null_percentage,\n 'num_empty': empty_count, 'empty_percentage': empty_percentage,\n 'nan_count': nan_count, 'nan_percentage': nan_percentage})", "def fill_mean(df):\n df = df.fillna(df.mean().fillna(0).to_dict())\n return df", "def impute(data):\n for column in data:\n column_data = data[column]\n if column_data.dtype in (np.int, np.float):\n fill_value = column_data.mean()\n else:\n fill_value = column_data.mode()[0]\n\n data[column] = data[column].fillna(fill_value)\n\n return data", "def fill_mising(self, dict):\t\n\t\tfor name, df in dict.items():\n\t\t\tdf = df.fillna(method='pad')\n\t\t\tdict[name] = df\n\t\treturn dict", "def process_generic(x, lb, ub):\n x = x.abs()\n if x.dtype == 'float64':\n #print('float')\n x.loc[x.apply(lambda x: not x.is_integer())] = np.nan\n x.loc[(x <= lb ) | (x > ub)] = np.nan\n\n return x", "def fill_missing_data_points(data):\n return data.interpolate()", "def __init__(self, *args, **kwargs):\n kwargs[\"allow_nan\"] = False\n super().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n kwargs[\"allow_nan\"] = False\n super().__init__(*args, **kwargs)", "def _clean(self, dataset):\n # Replace missing values with numpy's NaN. The missing value is\n # usually 1e+20, but values can be like 1.0000002e+20, which is\n # different. Ergo the inequality.\n for var in dataset.data_vars.itervalues():\n if 'missing_value' in var.attrs:\n missing_data_value = var.missing_value\n try:\n var.values[var.values >= missing_data_value] = np.NaN\n except ValueError:\n print \"Encountered ValueError in {0}. Ignoring\".format(var.name)", "def return_nan_handle(self, method: str = 'fill'):\n assert method in ['fill', 'drop'], 'Method must be either fill or drop passed as string.'\n if method == 'fill':\n self.tsdf.fillna(value=0.0, inplace=True)\n else:\n self.tsdf.dropna(inplace=True)\n return self", "def missing_data(self, by='marker'):\n d = np.copy(self.geno).astype(float)\n d[d == -9] = np.nan\n if by == 'marker' or by == 0:\n return np.isnan(d[:,:,0]).mean(0)\n elif by == 'individual' or by == 1:\n return np.isnan(d[:,:,0]).mean(1)\n else:\n raise ValueError(\"`by` should be either 'marker' or 'individual'.\")", "def na_value() -> pandas.NA:\n return pandas.NA", "def na_value() -> pandas.NA:\n return pandas.NA", "def fillna(df, method=\"zeros\"):\n\n if method == \"mean\":\n return df.fillna(df.mean())\n elif method == \"median\":\n return df.fillna(df.median())\n elif method == \"mode\":\n return df.fillna(df.mode().iloc[0])\n else:\n return df.fillna(0)", "def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n################################################################################################################ \n missing_data_stats=pd.DataFrame(df.isnull().sum(),columns=['Naturally_Missing_Values'])\n \n # Loop over all the rows/attributes in feat_info\n for i in range(len(feat_info)):\n att=feat_info['attribute'][i] # Get name of attribute\n codes=feat_info['missing_or_unknown'][i].replace('[',\"\").replace(']',\"\").split(',') # Get values from string\n\n # Loop over all the individual values extratcted from '\tmissing_or_unknown' column\n for j in range(len(codes)):\n if codes[j]!='': # skip empty values as nothing needs to be replaced\n try:\n df.loc[df[att]==int(codes[j]),att] = np.nan # replaces numeric values with NaNs\n except:\n df.loc[df[att]==(codes[j]),att] = np.nan # replaces string values with NaNs\n \n # Merge and plot the naturally missing and overall missing values\n missing_data_stats=pd.merge(missing_data_stats,pd.DataFrame(df.isnull().sum(),\n columns=['Overall_Missing_Values']),left_index=True,right_index=True)\n \n################################################################################################################ \n # Perform an assessment of how much missing data there is in each column of the dataset.\n missing_data_stats['Overall_Missing_Percentage']=(missing_data_stats['Overall_Missing_Values']/len(df))*100\n sorted_missing_data_stats=missing_data_stats.sort_values(by='Overall_Missing_Percentage',ascending=False)\n \n # Investigate patterns in the amount of missing data in each column.\n plt.figure(figsize=(15,30))\n sns.barplot(x='Overall_Missing_Percentage', y=sorted_missing_data_stats.index, \n data=sorted_missing_data_stats);\n plt.title('Percentage of Missing Data by Attribute',size=20)\n \n################################################################################################################\n # remove selected columns and rows, ...\n # Define criteria of outliers removal\n outlier=30 # Outlier if % missing more than\n\n #Get outlier columns and drop them from data\n outlier_columns=sorted_missing_data_stats[sorted_missing_data_stats['Overall_Missing_Percentage']>outlier].index\n for i in outlier_columns:\n df.drop(i,axis=1,inplace=True)\n\n################################################################################################################\n # Sum all the missing values by rows\n nan_count_rows=df.isnull().sum(axis=1).sort_values(ascending=False)\n\n # Plot and visualize the distribution of missing rows\n plt.figure(figsize=(20,8))\n sns.countplot(nan_count_rows)\n\n plt.xlabel('Missing Values in each Row')\n plt.ylabel('Count of Rows')\n plt.title('Count of Rows vs Missing Values in each Row')\n \n \n # Define threshold value\n row_nan_threshold=0\n\n # Rows above threshold\n at=df.loc[nan_count_rows>row_nan_threshold]\n\n # Rows below threshold\n bt=df.loc[nan_count_rows<=row_nan_threshold]\n \n################################################################################################################ \n # select, re-encode, and engineer column values.\n\n # Assess categorical variables: which are binary, which are multi-level, and\n # which one needs to be re-encoded?\n\n # Get all categorical features\n cat_att=feat_info[feat_info['type']=='categorical']['attribute']\n\n # Loop through all the categorical features to identify binary numeric, binary non-numeric and multi-level \n #categories\n bin_num=[] #binary and numeric (type1)\n bin_non_num=[] #binary and non_numeric (type2)\n ml_cat=[] #multi-level categories (type3)\n\n for i in cat_att:\n try:\n # get number of unique values\n uv=bt[i].nunique()\n dt=bt[i].dtype\n\n # For binary variable with numeric value\n if uv==2 and dt!='object':\n bin_num.append(i) #append\n\n # For binary variable with non-numeric value\n elif uv==2 and dt=='object':\n bin_non_num.append(i) #append\n\n # For multi-level categories \n elif uv>2:\n ml_cat.append(i) #append\n\n\n # Exception for columns which are already dropped from data\n except Exception as e:\n print('Note=> Column does not exist: ', e)\n\n print('\\nBinary_Numeric:\\n', bin_num , '\\n\\nBinary_Non_Numeric:\\n' , bin_non_num , '\\n\\nMulti-Level:\\n' , ml_cat)\n \n################################################################################################################\n # Import Libraries\n from sklearn.preprocessing import LabelEncoder\n\n # Initialize encoder\n le=LabelEncoder()\n\n # columns list to encode\n col_list=bin_non_num+ml_cat\n\n # Create a dataframe of encoded features\n bt_encoded=bt.copy(deep=True)\n\n # Encode all columns in column list\n for cols in col_list:\n bt_encoded[cols]=le.fit_transform(bt[cols])\n\n################################################################################################################\n # Create a list of all mixed features\n mixed_att=feat_info[feat_info['type']=='mixed']['attribute']\n\n for i in mixed_att:\n # Investigate \"PRAEGENDE_JUGENDJAHRE\" and engineer two new variables.\n if i=='PRAEGENDE_JUGENDJAHRE':\n bt_encoded.loc[(bt_encoded[i]<=2) & (bt_encoded[i]>=1) ,i+'_Decade']=1\n bt_encoded.loc[(bt_encoded[i]<=4) & (bt_encoded[i]>=3),i+'_Decade']=2\n bt_encoded.loc[(bt_encoded[i]<=7) & (bt_encoded[i]>=5),i+'_Decade']=3\n bt_encoded.loc[(bt_encoded[i]<=9) & (bt_encoded[i]>=8),i+'_Decade']=4\n bt_encoded.loc[(bt_encoded[i]<=13) & (bt_encoded[i]>=10),i+'_Decade']=5 \n bt_encoded.loc[(bt_encoded[i]<=15) & (bt_encoded[i]>=14),i+'_Decade']=6\n\n bt_encoded[i+'_Movement']=1\n bt_encoded.loc[(bt_encoded[i]==1) | (bt_encoded[i]==3) |\n (bt_encoded[i]==5) | (bt_encoded[i]==8) |\n (bt_encoded[i]==10) | (bt_encoded[i]==12) |\n (bt_encoded[i]==14) ,i+'_Movement']=2\n\n # Investigate \"CAMEO_INTL_2015\" and engineer two new variables.\n elif i=='CAMEO_INTL_2015':\n bt_encoded[i+'_Wealth']=bt_encoded[i].apply(lambda x:int(x[0]))\n bt_encoded[i+'_Life_Stage']=bt_encoded[i].apply(lambda x:int(x[1]))\n\n # Investigate \"LP_LEBENSPHASE_GROB\" and engineer a new variables (for low,avg,high income groups) \n elif i=='LP_LEBENSPHASE_GROB':\n #low income\n bt_encoded[i+'_income']=1 \n\n #avg income\n bt_encoded.loc[(bt_encoded[i]==3) | (bt_encoded[i]==5) |\n (bt_encoded[i]==8) | (bt_encoded[i]==11) |\n (bt_encoded[i]==12) ,i+'_income']=2 \n #high income\n bt_encoded.loc[(bt_encoded[i]==9) ,i+'_income']=3 \n\n\n # Investigate \"WOHNLAGE\" and engineer a new variables (for Rural and Non Rural Flag) \n elif i=='WOHNLAGE':\n bt_encoded[i+'_Rural']=1\n bt_encoded.loc[(bt_encoded[i]==7) | (bt_encoded[i]==8),i+'_Rural']=2\n\n\n # Investigate \"PLZ8_BAUMAX\" and engineer a new variables (for family and business buildings) \n elif i=='PLZ8_BAUMAX':\n bt_encoded[i+'_Bldg_Type']=1\n bt_encoded.loc[bt_encoded[i]==5,i+'_Bldg_Type']=2\n \n################################################################################################################\n # Return the cleaned dataframe.\n #Remove original mixed columns from final dataset if new columns have been engineered\n for i in mixed_att:\n try:\n bt_encoded.drop(i,axis=1,inplace=True) \n except Exception as e:\n print('Note=> Column does not exist: ', e)\n return bt_encoded", "def handle_nan():\n df = pd.read_excel(\"final_dataset.xlsx\")\n # print(df.isnull().sum())\n\n df = df[pd.notnull(df['food_names'])]\n df2 = df.copy()\n genders = df2.pop('gender')\n genders = genders.unique()\n for gender in genders:\n for column in df:\n if column == \"food_names\" or column == \"BMI\":\n continue\n m1 = (df['gender'] == gender)\n # median = df.loc[m1, column].median()\n df.loc[m1, column] = df.loc[m1, column].fillna(df.loc[m1, column].median())\n\n #\n # for column in df:\n # if column == \"food_names\" or column == \"BMI\":\n # continue\n # df[column] = df[column].fillna(median_df[column])\n\n\n indices = list(np.where(df['BMI'].isna()))[0]\n for index in indices:\n df.loc[index, 'BMI'] = df.loc[index, 'weight'] / math.pow(df.loc[index, 'height'], 2)\n\n median_df = df.median(skipna=True, numeric_only=True)\n df['BMI'] = df['BMI'].fillna(median_df['BMI'])\n\n\n print(df.isnull().sum())\n\n writer = pd.ExcelWriter('final_dataset_with_median.xlsx', engine='xlsxwriter')\n df.to_excel(writer, sheet_name='Sheet1')\n writer.save()", "def replace_nan_num(filename, columns, value_dic):\n\th = pyfits.open(filename, mode='update')\n\tfor col in columns:\n\t\tif value_dic.has_key(col):\n\t\t\tval = value_dic[col]\n\t\telse:\n\t\t\tval = 0\n\t\tdata = h[1].data.field(col)\n\t\th[1].data.field(col)[:] = where(isnan(data), val, data)\n\th.flush()\n\th.close()", "def nonull(val):\n return val if not pd.isnull(val) else None", "def fix_missing_data(contour_data_list):\n contour_data = np.array(contour_data_list)\n if contour_data.any() == \"\":\n logger.warning(\" Missing values detected.\")\n missing_values = np.where(contour_data == \"\")[0]\n if missing_values.shape[0] > 1:\n logger.warning(\" More than one value missing, fixing this isn't implemented yet...\")\n else:\n logger.warning(\" Only one value missing.\")\n missing_index = missing_values[0]\n missing_axis = missing_index % 3\n if missing_axis == 0:\n logger.warning(\" Missing value in x axis: interpolating.\")\n if missing_index > len(contour_data) - 3:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[0]\n elif missing_index == 0:\n lower_val = contour_data[-3]\n upper_val = contour_data[3]\n else:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[missing_index + 3]\n contour_data[missing_index] = 0.5 * (lower_val + upper_val)\n elif missing_axis == 1:\n logger.warning(\" Missing value in y axis: interpolating.\")\n if missing_index > len(contour_data) - 2:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[1]\n elif missing_index == 0:\n lower_val = contour_data[-2]\n upper_val = contour_data[4]\n else:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[missing_index + 3]\n contour_data[missing_index] = 0.5 * (lower_val + upper_val)\n else:\n logger.warning(\" Missing value in z axis: taking slice value\")\n temp = contour_data[2::3].tolist()\n temp.remove(\"\")\n contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))\n return contour_data", "def format_optional_vector(x):\n\n # If vector is None or all elements are NaN, then return none\n # Otherwise format the vector as normal\n if x is None or np.all(np.isnan(x)):\n return 'none'\n else:\n return format_vector(x)", "def correct_for_missing_labels(df, annotation_values):\n columns = list(df.columns)\n missing_labels = [x for x in annotation_values if x not in columns]\n\n if not len(missing_labels) > 0:\n return(df)\n else:\n for msslbl in missing_labels:\n df[msslbl] = 0\n return(df)", "def test_nodata_value(self):\n\n # Read files with -9999 as nominated nodata value\n for filename in [os.path.join(TESTDATA, 'Population_2010_clip.tif'),\n os.path.join(HAZDATA,\n 'Lembang_Earthquake_Scenario.asc')]:\n\n R = read_layer(filename)\n A = R.get_data(nan=False)\n\n # Verify nodata value\n Amin = min(A.flat[:])\n msg = ('Raster must have -9999 as its minimum for this test. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin == -9999, msg\n\n # Verify that GDAL knows about this\n nodata = R.get_nodata_value()\n msg = ('File %s should have registered nodata '\n 'value %i but it was %s' % (filename, Amin, nodata))\n assert nodata == Amin, msg\n\n # Then try using numpy.nan\n A = R.get_data(nan=True)\n\n # Verify nodata value\n Amin = numpy.nanmin(A.flat[:])\n msg = ('True raster minimum must exceed -9999. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin > -9999, msg\n\n # Then try with a number\n A = R.get_data(nan=-100000)\n\n # Verify nodata value\n Amin = numpy.nanmin(A.flat[:])\n msg = ('Raster must have -100000 as its minimum for this test. '\n 'We got %f for file %s' % (Amin, filename))\n assert Amin == -100000, msg\n\n # Try with illegal nan values\n for illegal in [{}, (), [], None, 'a', 'oeuu']:\n try:\n R.get_data(nan=illegal)\n except InaSAFEError:\n pass\n else:\n msg = ('Illegal nan value %s should have raised '\n 'exception' % illegal)\n raise RuntimeError(msg)", "def replace_nan_value(value):\n return str(value)\n # return value", "def replace_invalid_values(row):\n invalid_values = [math.inf, -math.inf, math.nan]\n return [x if x not in invalid_values else None for x in row]", "def test_default_cleaning_style_with_fill(currency_df):\n result = currency_df.currency_column_to_numeric(\n \"d_col\",\n fill_all_non_numeric=995,\n )\n expected = pd.DataFrame(\n {\n \"a_col\": [\" 24.56\", \"-\", \"(12.12)\", \"1,000,000\"],\n \"d_col\": [np.nan, 995, 1.23, -1_000],\n }\n )\n assert_frame_equal(result, expected)", "def fill_nan_in_category(df):\n print(\" --- Filling NaN in Categories.\")\n columns = df.columns\n categorical = [x for x in columns if x.startswith('c_')]\n df[categorical] = df[categorical].fillna('missing')\n print(\" --- Finished filling NaN in Categories.\")\n return df", "def replace_missingvalues_bandmean(X):\n if X.ndim != 4:\n raise ValueError('Input not valid, no [pic, row, column, band] data format')\n\n zeros = np.where(X[:,:,:] == 0)\n\n bandmean = {}\n\n for i in sorted(np.unique(zeros[3])):\n bandmean.update({i:np.mean(X[:,:,:,i])})\n\n for i in range(0,len(zeros[0])):\n pic, row, column, band = zeros[0][i],zeros[1][i],zeros[2][i],zeros[3][i]\n mean = bandmean.get(band)\n X[pic,row,column,band] = int(mean)\n\n return X", "def data_missing() -> ExtensionArray:\n data_matrix = np.arange(\n 2 * 10 * 10 * 3,\n dtype=np.float_,\n ).reshape(2, 10, 10, 3)\n data_matrix[0, ...] = np.NaN\n grid_points = [\n np.arange(10),\n np.arange(10) / 10,\n ]\n\n return skfda.FDataGrid(data_matrix, grid_points=grid_points)", "def notna(self):\n return super().notna()", "def set_nan_as_string(data, replace_str='0'):\n for i, x in enumerate(data):\n for key, value in x.items():\n if value == '':\n x[key] = replace_str\n data[i] = x", "def untruncatedMean(self):\n self.raiseAnError(NotImplementedError,'untruncatedMean not yet implemented for ' + self.type)", "def untruncatedMean(self):\n self.raiseAnError(NotImplementedError,'untruncatedMean not yet implemented for ' + self.type)", "def test_missing_to_nan(self, input_data, value, expected):\n actual = data._missing_to_nan(input_data, value)\n pd.testing.assert_series_equal(actual, expected)\n self.assertIsNot(actual, input_data)", "def __init__(self, fill_value=np.nan):\n self.fill_value = fill_value" ]
[ "0.58770204", "0.57689667", "0.5714844", "0.5643517", "0.56218046", "0.5612133", "0.5571508", "0.552125", "0.55067784", "0.5474215", "0.5456898", "0.54213846", "0.53971267", "0.53654104", "0.5360555", "0.53490347", "0.53490347", "0.5342924", "0.53326184", "0.53209394", "0.53080004", "0.53080004", "0.5306598", "0.530552", "0.5282971", "0.52787405", "0.52656466", "0.52335703", "0.52252823", "0.5216208", "0.52144223", "0.5200549", "0.518789", "0.51821876", "0.51391125", "0.5136172", "0.51347375", "0.51347375", "0.51152146", "0.51115614", "0.51056194", "0.50912577", "0.5061533", "0.5061462", "0.5041785", "0.5039132", "0.50196487", "0.5006931", "0.5004018", "0.50011104", "0.49994272", "0.4995407", "0.49889076", "0.4982324", "0.49815798", "0.4980937", "0.49742997", "0.49689984", "0.49660656", "0.49632385", "0.49591932", "0.4957638", "0.49299648", "0.49214712", "0.49152806", "0.48982117", "0.48916042", "0.487852", "0.4873681", "0.48698935", "0.48552933", "0.4854386", "0.48431882", "0.48431882", "0.48373088", "0.48296806", "0.48272896", "0.48124978", "0.48124978", "0.48120722", "0.48019862", "0.4797291", "0.47815308", "0.4767471", "0.47581968", "0.47536546", "0.47502783", "0.4749335", "0.47459388", "0.47444797", "0.47432488", "0.47369167", "0.47362465", "0.47332764", "0.47327718", "0.4728701", "0.47207978", "0.47207978", "0.47198114", "0.47190982" ]
0.54455906
11
Append shore abbreviation to the base reference.
def reference(self): licence = self.context if IEnvironmentBase.providedBy(licence): return licence.reference to_shore = queryAdapter(licence, IShore) ref = '{} {}'.format(licence.reference, to_shore.display()) return ref
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_abbr(over_abbreviated, street_name):\n m = over_abbr_re.search(street_name)\n if m:\n abbr = m.group()\n over_abbreviated[abbr].add(street_name)", "def expand_abbreviation(abbr, doc_type = 'html', profile_name = 'plain'):\n\ttree = parse_into_tree(abbr, doc_type)\n\tif tree:\n\t\treturn replace_variables(re.sub('\\|', insertion_point, tree.to_string(profile_name) or ''))\n\t\t\n\treturn ''", "def addAlias(self, name):\r\n self._otherNames.append(name.strip().lower())", "def add_journal_abbrev(b):\n if 'journal' in b:\n if 'journal_abbrev' not in b:\n # Create one abbrev\n journal, abbrev = identify_some_journals(b)\n b['journal_abbrev'] = abbrev\n b['journal'] = journal", "def addDefName( self, name ):\n nm= self.fullNameFor( name )\n if nm is None: return None\n if nm[-3:] == '...':\n self.logger.debug( \"Abbreviated reference {!r}\".format(name) )\n return None # first occurance is a forward reference using an abbreviation\n if nm not in self.named:\n self.named[nm]= []\n self.logger.debug( \"Adding empty chunk {!r}\".format(name) )\n return nm", "def abbreviation(self, abbreviation):\n self._abbreviation = abbreviation", "def addReferenceGlyph(self, *args):\n return _libsbml.GeneralGlyph_addReferenceGlyph(self, *args)", "def sub(abbreviation: str, alias: str) -> str:\n return f'<sub alias=\"{alias}\">{abbreviation}</sub>'", "def update_short_name(name):\n # First verify that the common errors have been fixed\n name = update_street_name(name)\n\n # Find the abbreviation to replace\n m = over_abbr_re.search(name)\n if m:\n if m.group() in abbreviations:\n name = over_abbr_re.sub(abbreviations[m.group()], name)\n\n return name", "def AutoRef(base, resource, *args):\n return Ref(AutoName(base, resource, *args))", "def setSBaseRef(self, *args):\n return _libsbml.SBaseRef_setSBaseRef(self, *args)", "def set_abbreviation(self, abbreviation_name_prefix):\n abbreviation_name = abbreviation_name_prefix + self.vendor_unique_id\n self.set_value_into_input_field(self.abbreviation_textbox_locator, abbreviation_name)\n self.wait_for_ajax_spinner_load()\n return abbreviation_name", "def addSpeciesReferenceGlyph(self, *args):\n return _libsbml.ReactionGlyph_addSpeciesReferenceGlyph(self, *args)", "def get_ig_name ( base_name ) :\n return base_name + '-GW'", "def add_agr_prefix_by_species_taxon(identifier, taxon_id):\n species_dict = {\n 7955: 'ZFIN:',\n 6239: 'WB:',\n 10090: '', # No MGI prefix\n 10116: '', # No RGD prefix\n 559292: 'SGD:',\n 4932: 'SGD:',\n 7227: 'FB:',\n 9606: '', # No HGNC prefix\n 2697049: '' # No SARS-CoV-2 prefix\n }\n\n new_identifier = species_dict[taxon_id] + identifier\n\n return new_identifier", "def add_strand(self, strand: int, reference_sequence: Seq = None):\n self.location = FeatureLocation(self.location.start, self.location.end, strand)\n self._set_sequence(reference_sequence=reference_sequence)", "def set_fullname(self, value):\n raise NotImplementedError('set_fullname')", "def make_reference(self):\n self.make_reference2()", "def append(self):\n target_index = get_index_from_alias(self.alias_name)\n if not target_index:\n self.replace()\n else:\n self.index_all(target_index)", "def weaveReferenceTo( self, aWeb, aWeaver ):\n self.fullName= aWeb.fullNameFor( self.name )\n txt= aWeaver.referenceTo( self.fullName, self.seq )\n aWeaver.codeBlock( txt )", "def reference(self, name):\n pass", "def handle_entityref(self, name):\r\n self.fed.append('&%s;' % name)", "def add_alias(self, alias):\n if alias != self.name:\n self.alias = alias", "def setUseAbbreviations(self, value):\n return self._set(useAbbreviations=value)", "def set_fullname(self, value):\n self.fullname = value", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def wrap_with_abbreviation(abbr, text, doc_type='html', profile='plain'):\n\ttree = parse_into_tree(abbr, doc_type)\n\tif tree:\n\t\trepeat_elem = tree.multiply_elem or tree.last\n\t\trepeat_elem.set_content(text)\n\t\trepeat_elem.repeat_by_lines = bool(tree.multiply_elem)\n\t\treturn replace_variables(re.sub('\\|', insertion_point, tree.to_string(profile) or ''))\n\telse:\n\t\treturn None", "def expand_abbrevs(name):\n key = name.upper()\n for abbrev, word in ABBREVS.iteritems():\n key = re.sub(abbrev, word, key)\n \n #Remove (.*) from the street name\n key = re.sub(r'\\(.*?(:?\\)|$)', '', key)\n \n #Unify names\n key = NUMBER_IN_NAMES_REGEX.sub(lambda i: i.group(1) + \" \", key)\n key = re.sub(u\"Ё\", u\"Е\", key)\n key = re.sub(u\"[\\\"'«»№]\", u\" \", key)\n\n # remove \"им\" prefix\n key = re.sub(ur'[^\\s]ИМ[\\.\\s]+', u' ', key)\n\n #Change name parts order\n words = key.split(r\" \")\n words.sort()\n key = \" \".join(words)\n\n key = re.sub(u\"\\s+\", u\" \", key).strip()\n\n logging.debug(\"Street name %s was converted to %s\" % (name, key))\n \n return key", "def anchor():\n return 'concat'", "def store_name(author):\n with open(\"ref.txt\",\"a\") as fp:\n fp.write(str(reference)+\"\"+'\\n')", "def abbrev(self):\n return self.ABBREV", "def __extend_uri(prefixes, short):\n for prefix in prefixes:\n if short.startswith(prefix):\n return short.replace(prefix + ':', prefixes[prefix])\n return short", "def set_abbr(self, value):\n self._set_one_attribute(self.AttributeNames.ABBR, value)\n return self", "def get_full_name_with_academic_title(self) -> str:\n base_name = super().get_full_name()\n return f'{self.title} {base_name}' if self.title else base_name", "def add_strand(self, strand):\n assert isinstance(strand, Strand)\n assert strand not in self.strand_list\n self.strand_list.append(strand)\n strand.beta_sheet = self", "def createSBaseRef(self):\n return _libsbml.SBaseRef_createSBaseRef(self)", "def named_back_reference(name:str) -> str:\n # TODO error handling \n return f\"\\\\k<{name}>\"", "def replace(self, name, *args):\n\n self._add(True, self.authority, name, *args)", "def to_crate_str(self):\n return (self.base + self.suffix).replace(\"~\", \"-\")", "def _add_branch(wit_path, name, head):\n\n with open(os.path.join(wit_path, '.wit', 'references.txt'), 'a') as data:\n data.write(''.join(f'\\n{name}={head}'))", "def addAlias(self, alias, node):", "def add_alias(self, alias):\n self.alias.append(alias)", "def setReferencePrimary(self, reference: ghidra.program.model.symbol.Reference) -> None:\n ...", "def get_alias(self):", "def add_name(self, node):\n if 'name' in self.options:\n name = nodes.fully_normalize_name(self.options.pop('name'))\n if 'name' in node:\n del(node['name'])\n node['names'].append(name)\n self.state.document.note_explicit_target(node, node)", "def __add__(self, new_name: Tuple[str, str]) -> None:\n self.formal_names.update({new_name[0]: new_name[1]})", "def get_abbreviation(res_type, abbr):\n\treturn get_settings_resource(res_type, abbr, 'abbreviations')", "def create_internal_elb_dns_name ( base_name, name ) :\n return 'lb.' + create_dns_name( base_name, name )", "def shorten_name(region_name, atlas):\n sub_list = dict(ctx=harvard_oxford_ctx_subs,\n sub=harvard_oxford_sub_subs)\n for pat, rep in sub_list[atlas]:\n region_name = re.sub(pat, rep, region_name).strip()\n return region_name", "def ref( self, aWeb ):\n self.resolve( aWeb )\n return self.fullName", "def updatePreview(self, baseName, *args):\n\n prefix = str(self.prefix.text())\n suffix = str(self.suffix.text())\n\n string = \"\"\n if len(prefix) > 0:\n string += prefix + \"_\"\n\n string += baseName\n\n if len(suffix) > 0:\n string += \"_\" + suffix\n\n self.previewName.setText(string)", "def expand_shortened_reference_name(short_name, full_reference_names):\n if short_name in full_reference_names:\n return short_name\n\n candidates = [\n full_name for full_name in full_reference_names if full_name.startswith(short_name)\n ]\n if len(candidates) == 1:\n return candidates[0]\n elif len(candidates) > 1:\n raise ValueError(\n \"Multiple candidates found trying to expand '{0}'. Found '{1}'. \"\n \"Searched '{2}'\".format(\n short_name,\n ','.join(candidates),\n ','.join(full_reference_names),\n )\n )\n else:\n raise ValueError(\n \"Unable to expand '{0}'. \"\n \"Searched '{1}'\".format(\n short_name,\n ','.join(full_reference_names),\n )\n )", "def get_add_on():\n #List of all the add ons made with a list comprehension\n add_on_list = [[a, b] for a in list(string.ascii_lowercase) for b in list(string.ascii_lowercase)]\n global a_base\n #reset the a_base if it gets to high\n if a_base + a_key > len(add_on_list) - 1:\n a_base = -1\n #sets value of add_on\n add_on = add_on_list[a_base + a_key]\n add_on = \"\".join(add_on)\n a_base += a_key\n return add_on", "def getSBaseRef(self, *args):\n return _libsbml.SBaseRef_getSBaseRef(self, *args)", "def appendAnnotation(self, *args):\n return _libsbml.SpeciesReference_appendAnnotation(self, *args)", "def _complete_name(self):\n for record in self:\n if record.parent_id:\n record.complete_name = record.parent_id.complete_name + ' / ' + record.name\n else:\n record.complete_name = record.name", "def build_abbreviation(agency_name):\n abbreviation = ''\n for ch in agency_name:\n if ch in string.ascii_uppercase:\n abbreviation += ch\n return abbreviation", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def AutoName(base, resource, *args):\n auto_name = '%s-%s' % (base, '-'.join(list(args) + [default.AKA[resource]]))\n if not RFC1035_RE.match(auto_name):\n raise Error('\"%s\" name for type %s does not match RFC1035 regex (%s)' %\n (auto_name, resource, RFC1035_RE.pattern))\n return auto_name", "def create_location_sublink(x: str) -> str:\n tmpname = strip_location_subtext(x)\n if tmpname in point_locations:\n loc = point_locations[tmpname]\n tmpstr = create_location_link(loc, tmpname, do_print, path=\"../locations/\")\n if tmpname != x:\n tmpstr += x[len(tmpname):]\n tmpstr = tmpstr.replace(\"<!>\", fetch_fa_glyph(\"bad location\"))\n # tmpstr = tmpstr.replace(\"<?>\", fetch_fa_glyph(\"questionable id\"))\n tmpstr = tmpstr.replace(\"<?>\", \"\").strip()\n else:\n tmpstr = x\n return tmpstr", "def test_rebind_prefix_replace(tmp_path: Path, store_name: str, override: bool) -> None:\n graph = make_graph(tmp_path, store_name)\n graph.bind(\"egsub\", EGNSSUB_V0)\n if override:\n graph.bind(\"egsub\", EGNSSUB_V1, override=override, replace=True)\n check_ns(graph, {\"egsub\": EGNSSUB_V1})\n else:\n graph.bind(\"egsub\", EGNSSUB_V1, override=override, replace=True)\n check_ns(graph, {\"egsub\": EGNSSUB_V0})", "def library_registry_short_name(self, value):\n if value:\n value = value.upper()\n if '|' in value:\n raise ValueError(\n \"Library registry short name cannot contain the pipe character.\"\n )\n value = str(value)\n self._library_registry_short_name = value", "def full_name(self, full_name):\n\n self._full_name = full_name", "def full_name(self, full_name):\n\n self._full_name = full_name", "def weaveShortReferenceTo( self, aWeb, aWeaver ):\n txt= aWeaver.referenceTo( None, self.seq )\n aWeaver.codeBlock( txt )", "def shn_abbreviate(word, size=48):\n\n if word:\n if (len(word) > size):\n word = \"%s...\" % word[:size - 4]\n else:\n return word\n else:\n return word", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def _merge_ensembl_aliases_with_ucsc():\n result = {}\n for ucsc_name, ensembl_name in ucsc_to_ensembl_reference_names.items():\n result[ensembl_name] = [ucsc_name] + \\\n ensembl_reference_aliases.get(ensembl_name, [])\n return result", "def add_prefix(self, name, uri):\n\n self.prefixes.append('%s: %s' % (name, uri))", "def append_amp(self, amp_props):\n self._amp_names.append(amp_props.name)", "def weaveShortReferenceTo( self, aWeb, aWeaver ):\n self.weaveReferenceTo( aWeb, aWeaver )", "def createReferenceGlyph(self):\n return _libsbml.GeneralGlyph_createReferenceGlyph(self)", "def chain_full_name(alignment, chain):\n return '%s_%s' % (alignment, chain)", "def name(self):\n return f\"BlueAir {self._ba_name}\"", "def pronoun_fix(self, section):\n return re.sub(reg_abbrev, repl, section)", "def addressToName(self, address):\n pass", "def add(variable, value):\n prefixes[variable] = value", "def stat_name_abbr(self, stat_name_abbr):\n\n self._stat_name_abbr = stat_name_abbr", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def create_field_href(reference: NamedRef,\n env: BuildEnvironment) -> pending_xref:\n\n text = reference.name\n options = {'refdoc': env.docname,\n 'refdomain': \"py\",\n 'reftype': \"obj\",\n 'reftarget': reference.ref}\n\n refnode = pending_xref(reference.name, **options)\n classes = ['xref', \"py\", '%s-%s' % (\"py\", \"obj\")]\n refnode += emphasis(text, text, classes=classes)\n return refnode", "def abbreviation(self):\n return self._abbreviation", "def changeAlias(self, alias, node):", "def appendString(self, org, app):\n if org == \"-\":\n return app\n else:\n return org + \" + \" + app", "def deabbreviate(self, st):\n\t\tabbrs = {'gws': 'greater western sydney giants',\n\t\t\t\t 'gwsg': 'greater western sydney giants',\n\t\t\t\t 'afl': 'australian football league',\n\t\t\t\t 'nrc': 'national rugby championship',\n\t\t\t\t 'nrl': 'national rugby league',\n\t\t\t\t 'syd': 'sydney',\n\t\t\t\t 'mel': 'melbourne',\n\t\t\t\t 'melb': 'melbourne',\n\t\t\t\t 'bris': 'brisbane',\n\t\t\t\t 'brisb': 'brisbane',\n\t\t\t\t 'gc': 'gold coast',\n\t\t\t\t 'adel': 'adelaide',\n\t\t\t\t 'canb': 'canberra',\n\t\t\t\t 'mt': 'mount',\n\t\t\t\t 'utd': 'united',\n\t\t\t\t 'cty': 'city',\n\t\t\t\t 'football club': 'fc',\n\t\t\t\t 'snr': 'senior',\n\t\t\t\t 'jr': 'junion',\n\t\t\t\t 'nsw': 'new south wales' ,\n\t\t\t\t 'vic': 'victoria',\n\t\t\t\t 'tas' : 'tasmania',\n\t\t\t\t 'sa': 'south australia',\n\t\t\t\t 'wa': 'western australia',\n\t\t\t\t 'act': 'australian capital territory',\n\t\t\t\t 'nt': 'northern territory',\n\t\t\t\t 'qld': 'queensland',\n\t\t\t\t 'champs': 'championships', \n\t\t\t\t 'champ': 'championship', \n\t\t\t\t 'soc': 'society',\n\t\t\t\t 'ent': 'entertainment',\n\t\t\t\t 'intl': 'international', \n\t\t\t\t 'int': 'international', \n\t\t\t\t 'aust': 'australian'}\n\n\t\t# first replace full state names by abbreviations;\n\t\tfor ab in abbrs:\n\t\t\tst = re.sub(r'\\b' + ab + r'\\b', abbrs[ab], st)\n\n\t\treturn st", "def __str__(self):\n if self.reference is None:\n return '%s - %s - %s' % (self.name, self.qualifier, self.location)\n else:\n return '%s - %s - %s - %s' % (self.name, self.qualifier, self.location, self.reference)", "def createSpeciesReferenceGlyph(self):\n return _libsbml.ReactionGlyph_createSpeciesReferenceGlyph(self)", "def AddAliases(self, aliases):\n self._legacy = False\n if aliases:\n self._aliases.update(aliases)", "def _abbreviate_name(self, row: Series)->str:\n return row['first_name'][0]+'.'+row['last_name']", "def seal(self, ref):\n self.instructions.append(Seal(ref))", "def setRef(self,reference):\n (iMod,iObj) = reference\n self.rnam.setData(struct.pack('i',iObj)[:3] + struct.pack('B',iMod))\n self.setChanged()", "def _set_base_namelists(self):\n\n # Create namelists\n hydro_namelist = self.model.hydro_namelists\n hrldas_namelist = self.model.hrldas_namelists\n\n self.base_hydro_namelist = hydro_namelist.patch(self.domain.hydro_namelist_patches)\n self.base_hrldas_namelist = hrldas_namelist.patch(self.domain.hrldas_namelist_patches)", "def add_hasa(self, child, n=1):\n self.refers[child] = self.refers.get(child, 0) + n", "def __str__(self):\r\n return \"{} plus flagfall of ${}\".format(super().__str__(), self.flagfall)", "def _add_state(self, prefix):\n for i in range(len(self.states)):\n self.states[i] = prefix + self.states[i]\n\n self.q_0 = prefix + self.q_0\n\n for i in range(len(self.final)):\n self.final[i] = prefix + self.final[i]\n\n keys = list(self.transition.keys())\n for key in keys:\n new_key = prefix + key\n self.transition[new_key] = []\n for i in range(len(self.transition[key])):\n self.transition[new_key].append(prefix + self.transition[key][i])\n del self.transition[key]", "def normalize_reference_name(name):\n return name.strip().lower().replace(\"-\", \"_\").replace(\" \", \"_\")", "def add(self, bento_name, bento_version):", "def _add_to_ref(self, rec_curr, line):\n # Examples of record lines containing ':' include:\n # id: GO:0000002\n # name: mitochondrial genome maintenance\n # namespace: biological_process\n # def: \"The maintenance of ...\n # is_a: GO:0007005 ! mitochondrion organization\n if line[:4] == \"id: \":\n assert not rec_curr.id\n rec_curr.id = line[4:]\n elif line[:8] == \"alt_id: \":\n rec_curr.alt_ids.add(line[8:])\n elif line[:6] == \"name: \":\n assert not rec_curr.name\n rec_curr.name = line[6:]\n elif line[:11] == \"namespace: \":\n assert not rec_curr.namespace\n rec_curr.namespace = line[11:]\n elif line[:6] == \"is_a: \":\n rec_curr._parents.add(line[6:].split()[0])\n elif line[:13] == \"is_obsolete: \" and line[13:] == \"true\":\n rec_curr.is_obsolete = True\n elif self.optobj and ':' in line:\n self.optobj.update_rec(rec_curr, line)", "def combine_state_names_and_abbreviations():\n return sorted(us_state_abbrev.values())[:10] + sorted(states)[-10:]", "def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word" ]
[ "0.59830004", "0.5703842", "0.5681109", "0.5655727", "0.5474441", "0.54697645", "0.5435609", "0.5420919", "0.53656024", "0.53300464", "0.5218761", "0.5197725", "0.51889247", "0.51828516", "0.51511014", "0.5144388", "0.5134165", "0.5127529", "0.51188904", "0.51158106", "0.5102266", "0.50946903", "0.5089451", "0.5054653", "0.50494635", "0.50408655", "0.50339615", "0.5014095", "0.50026125", "0.49925503", "0.49851775", "0.49586692", "0.4956796", "0.49554384", "0.494003", "0.4936394", "0.49250534", "0.49233103", "0.49144566", "0.49126184", "0.49097103", "0.49013636", "0.48937538", "0.48886138", "0.48618615", "0.48328087", "0.48227498", "0.4820708", "0.481936", "0.4814114", "0.48090512", "0.4805896", "0.47873193", "0.47817704", "0.47598046", "0.47579873", "0.4750099", "0.47449756", "0.4732708", "0.472693", "0.47237146", "0.4716313", "0.47131395", "0.47131395", "0.47072968", "0.47052148", "0.46974322", "0.46974322", "0.4696747", "0.4691401", "0.46908936", "0.46891052", "0.46884102", "0.4684519", "0.4675925", "0.46735454", "0.46563765", "0.46531963", "0.46485344", "0.46317077", "0.46303356", "0.4627003", "0.46235782", "0.462184", "0.4620653", "0.46183148", "0.46137065", "0.4613294", "0.46127346", "0.46084806", "0.4607497", "0.46007904", "0.4600479", "0.45943955", "0.45924577", "0.45890078", "0.4586776", "0.4586236", "0.45839104", "0.4577852" ]
0.4860687
45
if the intent name exists, then return without action, otherwise create a blank intent
def create_intent(intent_name): try: response=client.get_intent( name=intent_name, version="$LATEST" ) print "There is a %s intent in your account, please consider delete it or using another name" %(intent_name) return except: pass response=client.put_intent( name=intent_name, description='the demo intent', sampleUtterances=[ 'Can I book a hotel', ], confirmationPrompt={ 'messages': [ { 'contentType': 'PlainText', 'content': 'Your hotel booking is ready, do you want to place an order?' }, ], 'maxAttempts': 2, }, rejectionStatement={ 'messages': [ { 'contentType': 'PlainText' , 'content': 'Ok. I will discard the hotel booking information' }, ], }, conclusionStatement={ 'messages': [ { 'contentType': 'PlainText', 'content': 'Your hotel booking has been confirmed' }, ], }, fulfillmentActivity={ 'type': 'ReturnIntent' } ) print "Intent %s created successfully" %(intent_name) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_intent(self, intent_name):\n for name, intent in self:\n if name == intent_name:\n return intent\n else:\n return None", "def exists_intent_action(self, intent_keyword):\n pass", "def get_intent_action(self, intent_keyword):\n pass", "def get_alexa_intent(data: dict) -> Union[str, None]:\n if \"request\" in data and \"intent\" in data[\"request\"] and \"name\" in data[\"request\"][\"intent\"]:\n return data[\"request\"][\"intent\"][\"name\"]\n else:\n return None", "def process_intent(self, intent: Intent, game: Game):\n return intent", "def add_intent_action(self, intent_keyword, intent_function):\n pass", "def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action", "def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action", "def intent(req, session):\n intent = req['intent']\n if session.setdefault('attributes', {}) is None:\n # Ensure that there's always a dictionary under \"attributes\".\n session['attributes'] = {}\n\n # If the user has already opened a dialog, handle incorrect\n # Intents from Alexa due to misunderstandings or user error.\n if session['attributes'].get('add_address') and \\\n not intent['name'] in ADD_ADDRESS_INTENTS:\n # Try to recover if Alexa misunderstood\n # an address as a station name.\n if intent['name'] == 'CheckStatusIntent' and \\\n intent['slots'].get('station_name', {}).get('value'):\n intent['name'] = 'AddAddressIntent'\n intent['slots'].setdefault('address_street', {})['value'] = \\\n intent['slots']['station_name']['value']\n else:\n return reply.build(\"I didn't understand that as an address. \"\n \"Please provide an address, such as \"\n \"\\\"123 north State Street\\\".\",\n reprompt=\"What's the street number and name?\",\n persist=session['attributes'],\n is_end=False)\n elif session['attributes'].get('remove_address') and \\\n not intent['name'] in REMOVE_ADDRESS_INTENTS:\n # If the user wanted to remove an address, but didn't\n # give an intelligible response when we requested\n # confirmation, then assume the answer is no.\n intent['name'] = 'AMAZON.NoIntent'\n\n # Dispatch each Intent to the correct handler.\n if intent['name'] == 'CheckBikeIntent':\n if not intent['slots']['bikes_or_docks'].get('value'):\n # If something went wrong understanding the bike/dock\n # value, fall back on the status check.\n return check_status(intent, session)\n else:\n return check_bikes(intent, session)\n elif intent['name'] == 'CheckStatusIntent':\n return check_status(intent, session)\n elif intent['name'] == 'ListStationIntent':\n return list_stations(intent, session)\n elif intent['name'] == 'CheckCommuteIntent':\n return check_commute(intent, session)\n elif intent['name'] == 'AddAddressIntent':\n return add_address(intent, session)\n elif intent['name'] == 'CheckAddressIntent':\n return check_address(intent, session)\n elif intent['name'] == 'RemoveAddressIntent':\n return remove_address(intent, session)\n elif intent['name'] == 'AMAZON.NextIntent':\n return next_intent(intent, session)\n elif intent['name'] == 'AMAZON.YesIntent':\n return yes_intent(intent, session)\n elif intent['name'] == 'AMAZON.NoIntent':\n return no_intent(intent, session)\n elif intent['name'] in ['AMAZON.StopIntent', 'AMAZON.CancelIntent']:\n return reply.build(\"Okay, exiting.\", is_end=True)\n elif intent['name'] == 'AMAZON.HelpIntent':\n return reply.build(\"You can ask me how many bikes or docks are \"\n \"at a specific station, or else just ask the \"\n \"status of a station. Use the %s station \"\n \"name, such as \\\"%s\\\". \"\n \"If you only remember one cross-street, you \"\n \"can ask me to list all stations on a particular \"\n \"street. If you've told me to \\\"add an address\\\", \"\n \"I can remember that and use it when you \"\n \"ask me to \\\"check my commute\\\". \"\n \"What should I do?\" %\n (config.network_name, config.sample_station),\n persist=session['attributes'],\n is_end=False)\n else:\n return reply.build(\"I didn't understand that. Try again?\",\n persist=session['attributes'],\n is_end=False)", "def _get_action_from_name(self, name):\n\n container = self._action\n if name is None:\n return None\n\n for action in container:\n if \"/\".join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action", "def on_intent(request, session):\n\n intent = request['intent']\n\n print(\"on_intent:\", intent)\n\n if intent[\"name\"] == \"AntwortIntent\":\n return handle_answer_request(intent, session)\n elif intent[\"name\"] == \"DontKnowIntent\":\n return handle_answer_request(intent, session)\n elif intent['name'] == \"AMAZON.RepeatIntent\":\n return handle_repeat_request(intent, session)\n elif intent['name'] == \"AMAZON.StopIntent\" or intent['name'] == \"AMAZON.CancelIntent\":\n return handle_finish_session_request(intent, session)\n elif intent['name'] == \"AMAZON.HelpIntent\":\n return get_help(intent, session)\n elif intent['name'] == \"StartQuizIntent\" or intent['name'] == \"AMAZON.StartoverIntent\":\n if session[\"new\"] == False:\n return get_welcome_message(restart=True)\n #if no intent is identified:\n return get_help(intent, session)", "def get_action_by_name(self, name):\n for action in self.all_actions:\n if action.name == name:\n return action\n return None", "def handle_intent(intent_name):\n if intent_name in name_to_handler:\n return name_to_handler[intent_name]()\n else:\n return question_answer(intent_name)", "def check_intent_attr(self, node, arg):\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n is_ptr = declarator.is_indirect()\n intent = attrs[\"intent\"]\n if intent is None:\n if node is None:\n # do not default intent for function pointers\n pass\n elif declarator.is_function_pointer():\n intent = \"in\"\n elif not is_ptr:\n intent = \"in\"\n elif arg.const:\n intent = \"in\"\n elif arg.typemap.sgroup == \"void\":\n # void *\n intent = \"in\" # XXX must coordinate with VALUE\n else:\n intent = \"inout\"\n # XXX - Do hidden arguments need intent?\n else:\n intent = intent.lower()\n if intent in [\"in\", \"out\", \"inout\"]:\n meta[\"intent\"] = intent\n else:\n raise RuntimeError(\"Bad value for intent: \" + attrs[\"intent\"])\n if not is_ptr and intent != \"in\":\n # Nonpointers can only be intent(in).\n raise RuntimeError(\"{}: Only pointer arguments may have intent attribute\".format(node.linenumber))\n meta[\"intent\"] = intent\n return intent", "def delete_intent(intent_name):\n try:\n client.get_intent(\n name=intent_name,\n versionOrAlias='$LATEST'\n )\n answer=raw_input(\"Do you want to delete %s from your account(Y/y for YES, other NO):\" %(intent_name))\n if answer in ['Y', 'y']:\n client.delete_intent(\n name=intent_name\n )\n print \"You chose to delete the intent %s, deleted...\" %(intent_name)\n else:\n print \"You chose not to delete the inten t%s, exiting...\" %(intent_name)\n except:\n print \"There is no intent called %s, exiting...\" %(intent_name)\n return", "def _intent(self) -> MessageIntent:\r\n pass", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n\n # process the intents\n if intent_name == \"comenzar\":\n return get_fact_response()\n elif intent_name == \"otravez\":\n return get_fact_response()\n elif intent_name == \"AMAZON.YesIntent\":\n return get_fact_response()\n elif intent_name == \"AMAZON.NoIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n else:\n print(\"invalid Intent reply with help\")\n return get_help_response()", "def create_action(instance, verb, user):\n return instance.activities.create(action=verb, owner=user)", "def dispatch(intent_request):\n\n #logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n print(\"asdd\")\n print(intent_name)\n \n # Dispatch to your bot's intent handlers\n if intent_name == 'GreetingIntent':\n return greeting_intent(intent_request)\n elif intent_name == 'DiningSuggestionsIntent':\n return dining_suggestion_intent(intent_request)\n elif intent_name == 'ThankYouIntent':\n return thank_you_intent(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def find_action_by_name(self, name): # because I dont want possible actions to be a dictionary- it fucks stuff up\n return next(x for x in self.possible_actions if name.lower() == x.name.lower())", "def get_dialogflow_intent(self, data: dict) -> Union[Any, None]:\n if \"result\" in data and \"action\" in data[\"result\"]:\n self.dialogflow_v = 1\n return data[\"result\"][\"action\"]\n elif \"queryResult\" in data and \"action\" in data[\"queryResult\"]:\n self.dialogflow_v = 2\n return data[\"queryResult\"][\"action\"]\n else:\n return None", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'GreetingIntent':\n return GreetingIntent(intent_request)\n elif intent_name == 'DiningSuggestionsIntent':\n return DiningSuggestionsIntent(intent_request)\n elif intent_name == 'ThankYouIntent' :\n return ThankYouIntent(intent_request)\n \n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def no_intent(intent, session):\n if session.get('attributes', {}).get('add_address') and \\\n session['attributes']['next_step'] == 'store_address':\n session['attributes']['next_step'] = 'num_and_name'\n return reply.build(\"Okay, what street number and name do you want?\",\n reprompt=\"What's the street number and name?\",\n persist=session['attributes'],\n is_end=False)\n elif session.get('attributes', {}).get('remove_address'):\n return remove_address(intent, session)\n else:\n return reply.build(\"Sorry, I don't know what you mean. Try again?\",\n persist=session.get('attributes', {}),\n is_end=False)", "def getActionByName(name):\n for action in getActions():\n if action.name == name:\n return action\n\n raise errors.NoSuchAction(name)", "def filter_intent(self, intent: Intent):\n return True", "def _action(self, service_name: str, action_name: str) -> Optional[UpnpAction]:\n service = self._service(service_name)\n if not service:\n return None\n\n if not service.has_action(action_name):\n return None\n\n return service.action(action_name)", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n\n if intent_name not in skillmap:\n intent_name = \"NullSkill\"\n\n if intent_name in skillmap:\n try:\n return skillmap[intent_name].execute(intent, session)\n except Exception as e:\n traceback.print_exc()\n return SkillBase().respond(\"Sorry I missed that\", \"Error\", str(e))\n else:\n raise ValueError(\"Invalid intent\")", "def get_intent(a):\n intent_dict = {\n 1: lookup_pwd,\n 2: store_pwd,\n 3: update_pwd,\n 4: update_master,\n 5: save_and_exit\n }\n return intent_dict[int(a)]()", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # intents_object = get_custom_intents()\n print (\"************\")\n print (intent_request)\n # fall_back = True\n # final_function = ''\n # for temp_intent in intents_object:\n # if temp_intent == intent_name:\n # fall_back = False\n # final_function = temp_intent[1]\n # break\n # if(fall_back):\n # return custom_handlers.get_fallback_msg()\n # else:\n # return final_function(intent, session)\n \n # Dispatch to your skill's intent handlers\n if intent_name == \"welcome_intent\":\n return custom_handlers.get_welcome_msg(intent, session)\n elif intent_name == \"search_intent\":\n return custom_handlers.get_search_msg(intent, session)\n elif intent_name == \"architecture\":\n return custom_handlers.get_architecture_msg(intent, session)\n elif intent_name == \"saybye\":\n return custom_handlers.get_saybye_response(intent, session)\n elif intent_name == \"myname\":\n return custom_handlers.get_myname_response(intent, session)\n elif intent_name == \"ask\":\n return custom_handlers.get_ask_response(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return custom_handlers.get_welcome_response(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return custom_handlers.handle_session_end_request(intent, session)\n else:\n return custom_handlers.get_fallback_msg(intent, session)", "def on_intent(request, session):\n\n intent_name = request['intent']['name']\n \n # process the intents\n if intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n \n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n \n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n \n elif intent_name == \"recognizeDates\":\n slots = request['intent']['slots']\n date_start_slot = slots.get('dateStart',{'value':'NA'}).get('value','NA')\n date_end_slot = slots.get('dateEnd',{'value':'NA'}).get('value','NA')\n\n return get_intent_response(date_start_slot,date_end_slot)\n \n elif intent_name == \"PollHprofs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n elif intent_name == \"SpinVMs\":\n slots = request['intent'].get('slots','')\n print(slots)\n speechOutput = \"Under development\"\n return response(speech_response(speechOutput, True))\n\n else:\n print(\"For invalid Intents reply with help\")\n return get_help_response()", "def respond_to_intent(self, intent):\n if type(intent) is BARTQueryIntent:\n return self.respond_to_bart_intent(intent)\n elif type(intent) is BusQueryIntent: \n return self.respond_to_bus_intent(intent)\n else:\n return HelpIntent()", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n \r\n #print (intent)\r\n try :\r\n intent_name_value = intent['slots']['friend_name']['value']\r\n except :\r\n print(\"**** Can't find name\")\r\n\r\n try: \r\n intent_gender_value = intent['slots']['gender']['value']\r\n # print(\"****intent_gender_value: \" + intent_name_value)\r\n except :\r\n print(\"**** Can't find gender\")\r\n\r\n #friend_name = intent_value\r\n print(\"****session: \" + str(session))\r\n print(\"****Intent found is: \" + str(intent))\r\n print(\"****Intent Name found is: \" + str(intent_name))\r\n #print(\"****intent_gender_value found is: \" + str(intent_gender_value))\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"welcomeIntent\" and (intent_gender_value == \"her\" or intent_gender_value == \"she\"):\r\n return say_hello_to_girl(intent_name_value)\r\n elif intent_name == \"welcomeIntent\" and (intent_gender_value == \"his\" or intent_gender_value == \"he\"):\r\n return say_hello_to_boy(intent_name_value) \r\n elif intent_name == \"jokeIntent\" :\r\n return joke_story(session)\r\n elif intent_name == \"foodIntent\" :\r\n return favorite_food(session)\r\n elif intent_name == \"secretIntent\" :\r\n return secret_story(session)\r\n elif intent_name == \"songIntent\" :\r\n return favorite_song(session)\r\n elif intent_name == \"quoteIntent\" :\r\n return favorite_quote(session)\r\n elif intent_name == \"gameIntent\" :\r\n return favorite_game(session)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request(session)\r\n elif intent_name == \"AMAZON.FallbackIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def handle(self, message: discord.Message, intent: Intent) -> Optional[str]:\n pass", "def launch_intent():\n welcome_message = \"On which cloud would you like to launch Galaxy?\"\n return question(welcome_message).reprompt(help_text)", "async def is_not_shortcut_name(argument, context, platform_type, verbose):\n data_object = getattr(context, platform_type + \"_data\")\n if argument not in data_object.shortcuts:\n return argument\n\n if verbose:\n custom_msg = await context.language.get_text(\n \"existing_\" + platform_type + \"_shortcut_name\")\n\n await embed_messages.invalid_argument(context, argument, custom_msg)\n\n return None", "def dispatch(intent_request):\n\n intent_name = intent_request[\"currentIntent\"][\"name\"]\n\n # Dispatch to bot's intent handlers\n if intent_name == \"recommendPortfolio\":\n return recommend_portfolio(intent_request)\n\n raise Exception(\"Intent with name \" + intent_name + \" not supported\")", "def filter_action(action):\n return True\n if not action.get('app_id'):\n return None\n if not action.get('user_id'):\n return None\n return action", "def get_action(self, name: str) -> Action:\n return self.get_session.query(self.action_model).filter_by(name=name).one_or_none()", "def __init__(self, intent=None):\n super(IntentRequest, self).__init__()\n default_attr = dict(intent=Intent())\n self.intent = intent\n self._set_default_attr(default_attr)", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n print(intent)\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"WhoIs\":\n intent_search = intent['slots']['ASN']['value']\n return whois(intent_search)\n elif intent_name == \"WherePeer\":\n intent_search = intent['slots']['company']['value']\n return wherePeer(intent_search)\n elif intent_name == \"WhoPeers\":\n intent_search = intent['slots']['IX']['value']\n return whoPeers(intent_search)\n elif intent_name == \"WhosAt\":\n intent_search = intent['slots']['facility']['value']\n return whosAt(intent_search)\n elif intent_name == \"RouteServers\":\n return routeServers()\n else:\n raise ValueError(\"Invalid intent\")", "def create_action(self, name):\n action = self.get_action(name)\n if action is None:\n try:\n action = self.action_model()\n action.name = name\n self.get_session.add(action)\n self.get_session.commit()\n return action\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_ADD_PERMISSION.format(e))\n self.get_session.rollback()\n return action", "async def activity(self, ctx:utils.Context, activity_type:str, *, name:str=None):\n\n if name:\n activity = discord.Activity(name=name, type=getattr(discord.ActivityType, activity_type.lower()))\n else:\n await self.bot.set_default_presence()\n return\n await self.bot.change_presence(activity=activity, status=self.bot.guilds[0].me.status)", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "def default_intent(self, f):\n self._default_intent_view_func = f\n\n return f", "def on_intent(intent_request, session):\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"test\":\r\n return get_test_response()\r\n elif intent_name==\"inputoutputIntent\":\r\n return get_inputOutputIntent_response(intent)\r\n elif intent_name==\"lightsIntent\":\r\n return get_lightsIntent_response(intent)\r\n elif intent_name==\"shadesIntent\":\r\n return get_shadesIntent_response(intent)\r\n elif intent_name==\"volumeIntent\":\r\n return get_volumeIntent_response(intent)\r\n elif intent_name==\"InputPresetIntent\":\r\n return get_InputPresetIntent_response(intent)\r\n elif intent_name==\"monitorsIntent\":\r\n return get_monitorsIntent_response(intent)\r\n elif intent_name==\"bossIntent\":\r\n return get_bossIntent_response()\r\n elif intent_name==\"AudioCall\":\r\n return get_AudioCall_response(intent)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def detach_intent(self, intent_name):\n new_parsers = [\n p for p in self.engine.intent_parsers if p.name != intent_name\n ]\n self.engine.intent_parsers = new_parsers", "def generate_base_intent_and_message(self,opponent:Player) -> str:\r\n self.action_base = self.base.strategy(opponent)\r\n self.list_base_action.append(self.action_base)\r\n self.intent_sent_prev = self.intent_sent\r\n self.intent_sent = self.generate_message()\r\n return self.intent_sent", "def action_name(self) -> typing.Optional[str]:\n return self._values.get(\"action_name\")", "def action_name(self) -> typing.Optional[str]:\n return self._values.get(\"action_name\")", "def action_name(self) -> typing.Optional[str]:\n return self._values.get(\"action_name\")", "def action_name(self) -> typing.Optional[str]:\n return self._values.get(\"action_name\")", "def action_name(self) -> typing.Optional[str]:\n return self._values.get(\"action_name\")", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n return diningsuggestions_intent(intent_request)", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n #temp = intent_request['intent']['name']['slots']['name']\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"MoveRight\":\r\n return set_session(intent, session)\r\n elif intent_name == \"MoveLeft\":\r\n return set_session(intent, session)\r\n elif intent_name == \"MoveForward\":\r\n return set_session(intent, session)\r\n elif intent_name == \"MoveBackward\":\r\n return set_session(intent, session)\r\n elif intent_name == \"Help\":\r\n return AskNow(intent, session)\r\n elif intent_name == \"Hint\":\r\n return bfs(intent, session)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n print(\"---INTENT: \" + intent_name)\n\n # Dispatch to your skill's intent handlers\n try:\n if intent_name == \"GetSynonymIntent\":\n return get_synonym(intent, session)\n elif intent_name == \"GetRandomSynonymIntent\":\n return get_random_synonym(intent, session)\n elif intent_name == \"GetAllSynonymsIntent\":\n return get_all_synonyms(intent, session)\n elif intent_name == \"GetAntonymIntent\":\n return get_antonym(intent, session)\n elif intent_name == \"GetRandomAntonymIntent\":\n return get_random_antonym(intent, session)\n elif intent_name == \"GetAllAntonymsIntent\":\n return get_all_antonyms(intent, session)\n elif intent_name == \"GetPOSIntent\":\n return get_pos(intent, session)\n elif intent_name == \"GetRhymeIntent\":\n return get_rhyme(intent, session)\n elif intent_name == \"GetRandomRhymeIntent\":\n return get_random_rhyme(intent, session)\n elif intent_name == \"GetDefinitionIntent\":\n return get_definition(intent, session)\n elif intent_name == \"GetRandomDefinitionIntent\":\n return get_random_definition(intent, session)\n elif intent_name == \"GetAllDefinitionsIntent\":\n return get_all_definitions(intent, session)\n elif intent_name == \"GetSyllablesIntent\":\n return get_syllables(intent, session)\n elif intent_name == \"GetFrequencyIntent\":\n return get_frequency(intent, session)\n elif intent_name == \"GetPronunciationIntent\":\n return get_pronunciation(intent, session)\n elif intent_name == \"GetAllCommandsIntent\":\n return get_all_commands()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that command. I can find definitions, synonyms, antonyms, and more if you say something like 'a synonym for happy'.\", None, True)\n return build_response({}, response)\n\n except:\n response = build_speechlet_response(\"Error\", \"Sorry, I don't know that word!\", None, True)\n return build_response({}, response)", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"MakeCoffee\":\r\n return make_coffee(intent, session)\r\n elif intent_name == \"TurnCoffeeMachine\":\r\n return turn_coffee_machine(intent, session)\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return turn_off_coffee_machine()\r\n else:\r\n return invalid_intent()\r\n #raise ValueError(\"Invalid intent\")\r", "def onAudioIntent(self, *args, intentName):\n\n print(intentName, *args)\n if intentName == 'name' and len(args) > 0:\n self.name = args[0]\n self.nameLock.release()\n elif intentName == 'origin' and len(args) > 0:\n self.origin = args[0]\n self.originLock.release()\n elif intentName == 'age' and len(args) > 0:\n for arg in args:\n if arg.isdigit():\n self.age = arg\n self.ageLock.release()\n elif intentName == 'exclusion' and len(args) > 0:\n self.exclusion = args[0]\n self.exclusionLock.release()\n elif intentName == 'conflict' and len(args) > 0:\n self.conflict = args[0]\n self.conflictLock.release()\n elif intentName == 'inhumanity' and len(args) > 0:\n self.inhumanity = args[0]\n self.inhumanityLock.release()\n elif intentName == 'family' and len(args) > 0:\n self.family = args[0]\n self.familyLock.release()\n elif intentName == 'reason' and len(args) > 0:\n self.reason = args[0]\n self.reasonLock.release()\n elif intentName == 'route' and len(args) > 0:\n self.route = args[0]\n self.routeLock.release()\n elif intentName == 'entrance' and len(args) > 0:\n self.entrance = args[0]\n self.entranceLock.release()\n elif intentName == 'yesno' and len(args) > 0:\n self.documentation = args[0]\n self.documentationLock.release()\n elif intentName == 'company' and len(args) > 0:\n self.company = args[0]\n self.companyLock.release()", "def on_intent(intent_request, session):\r\n\r\n print(\"on_intent requestId=\" + intent_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n \r\n if intent_name == \"unsafe\":\r\n send_message_alerts()\r\n session_attributes = {}\r\n card_title = \"Welcome, this is Emma\"\r\n speech_output = \"Calling police, Connected with police , Police on the way. Police will be in 1 min . Your relatives and frieds are all informed. Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me\"\r\n \r\n # If the user either does not reply to the welcome message or says something\r\n # that is not understood, they will be prompted again with this text.\r\n reprompt_text = \"Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me ,Help Me, Help Me, Help Me, Help Me, Help Me \"\r\n \r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n \r\n \r\n \r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")", "def unhandled_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n intent_name = get_intent_name(handler_input)\n if intent_name == 'ChallengeBossIntent':\n speech_text = 'You need to be in the boss room to challenge the boss. '\n elif intent_name == 'EnterMazeIntent':\n speech_text = 'You already have a maze in progress. Would you like to resume the maze or discard the maze? '\n elif intent_name == 'ResumeMazeIntent' or intent_name == 'DiscardMazeIntent':\n speech_text = 'You are already in a maze or you don\\'t have a maze in progress. Say enter the maze or discard the maze. '\n elif intent_name == 'LocationIntent':\n speech_text = 'You need to be in a maze to locate yourself. Say enter the maze or resume the maze. '\n elif intent_name == 'MoveIntent':\n speech_text = 'You need to be in a maze to take a move. Say enter the maze or resume the maze. '\n else:\n speech_text = 'I am not sure what you are saying. '\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent_name = \"\"\n if 'intent' in intent_request:\n intent = intent_request['intent']\n if 'name' in intent:\n intent_name = intent['name']\n\n # Dispatch to your skill's intent handlers\n if not intent_name:\n return get_help_response()\n elif intent_name == \"Hello\":\n return say_hello()\n elif intent_name == \"Brandon\":\n return say_brandon()\n elif intent_name == \"Warning\":\n return say_warning()\n elif intent_name == \"Dance\":\n return say_dance_lights()\n elif intent_name == \"Spot\":\n return say_spot_light()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n return say_hello()\n return get_help_response()", "def map_activity_name(self, name):\n newname = self.amappings.get(name)\n if newname is None:\n return 'Other'\n else:\n return newname", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NumberFact\":\n return num_fact(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n # Dispatch to your skill's intent handlers\n if intent_name == \"test\":\n return get_test_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"forecast\":\n return get_forecast_response()\n elif intent_name == \"detailedforecast\":\n return get_detailed_forecast_response()\n elif intent_name == \"uscanadaforecast\":\n return get_uscanada_forecast_response()\n elif intent_name == \"detaileduscanadaforecast\":\n return get_detailed_uscanada_forecast_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(event_request, session):\n print(\"=====on_intent requestId: \" + event_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = event_request['intent']\n intent_name = event_request['intent']['name']\n print(\"=====intent is: \" + intent_name)\n\n if intent_name == \"AnswerIntent\":\n print(\"=====AnswerIntent fired...\")\n if 'attributes' in session:\n if 'questions' in session['attributes']:\n return handle_answer_request(intent, session)\n\n # we probably got here because user said something other than\n # yes or no after asking if they wanted to play the game again\n print(\"=====no attributes ending game\")\n return play_end_message()\n if intent_name == \"GameIntent\":\n print(\"=====GameIntent fired...\")\n # if there's a session and we're in a game treat this as an answer\n # unfortunately it will be wrong but it's better than starting over\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n return play_new_game(False)\n if intent_name in (\"AMAZON.StartOverIntent\", \"AMAZON.YesIntent\"):\n print(\"=====StartOverIntent or YesIntent fired...\")\n return play_new_game(True)\n if intent_name == \"AMAZON.NoIntent\":\n print(\"=====NoIntent fired...\")\n # if there's a session and we're in a game treat this as a wrong answer\n if 'attributes' in session:\n if session['attributes']['game_status'] == \"in_progress\":\n return handle_answer_request(intent, session)\n # otherwise end the game\n return play_end_message()\n if intent_name in (\"AMAZON.StopIntent\", \"AMAZON.CancelIntent\"):\n print(\"=====StopIntent or CancelIntent fired\")\n return play_end_message()\n if intent_name == 'AMAZON.HelpIntent':\n print(\"=====HelpIntent...\")\n tts = \"During the game I'll give you 6 random brain teasers and only 8 \"\\\n \"seconds to anser each one... To make your mind muscles stronger, I \"\\\n \"won't repeat any of the questions, so try to remember all the \"\\\n \"details... You can say 'Start Over' if you'd like a new game, \"\\\n \"or make your guess for the last question...\"\n return speech(tts, session['attributes'], False, None)", "def get_intent(msg):\n if re.search(MapController.MAP_REGEX, msg.content) and client.user.id in msg.raw_mentions:\n return Intent.MAP\n elif re.match(Controller.KEY_REGEX, msg.content):\n return Intent.DIRECT\n else:\n return Intent.NONE", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"RandNumIntent\":\n return generate_random_num(intent, session)\n elif intent_name == \"RangeRandNumIntent\":\n return generate_random_num(intent, session)\n elif intent_name == \"DiceIntent\":\n return generate_random_num(intent, session, num1=1, num2=6)\n elif intent_name == \"HundredDiceIntent\":\n return generate_random_num(intent, session, num1=1, num2=100)\n elif intent_name == \"RouletteIntent\":\n return generate_random_num(intent, session, num1=1, num2=10)\n elif intent_name == \"SelectIntent\":\n return generate_random_num(intent, session, num1=1)\n elif intent_name == \"RepeatIntent\":\n if 'attributes' not in session:\n return handle_error_status()\n else:\n attributes = session.get('attributes')\n return generate_random_num(intent, session, **attributes)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def decide_action(self):\t\t\t\t\t#defining the function to decide the action\n recognizer, audio = self.speech.listen_for_audio()\t\t#listening for the audio\n\n # received audio data, now we'll recognize it using Google Speech Recognition\n speech = self.speech.google_speech_recognition(recognizer, audio)\t#storing the speech into variable as a text\n\n if speech is not None:\t\t#if speech is not recognized\n try:\n req = requests.get('https://api.wit.ai/message?v=20160918&q=%s' % speech,\n headers={\"Authorization\": wit_ai_token})\t\t#getting the wit.ait token and checking it\n print req.text\t\t\t#printing the text\n json_responce = json.loads(req.text)\t\t#printing the responce\n entities = None\t\t\t#inititaling the entities\n intent = None\t\t\t#initialising the intent\n if 'entities' in json_responce and 'Intent' in json_responce['entities']:\t#checking the the intents and entitites\n entities = json_responce['entities']\t\t#entities \n intent = json_responce['entities']['Intent'][0][\"value\"]\t#intents \n\n print intent\t#printing the intents\n if intent == 'greeting':\t#checking the intent type\n self.__text_action(self.nlg.greet()) #getting the function of the intent\n elif intent == 'snow white':\t\t#checking the intent type\n self.__text_action(self.nlg.snow_white())\t\t#getting the function of the intent\n elif intent == 'weather':\t\t#checking the intent type\n self.__weather_action(entities)\t#getting the function of the intent\n elif intent == 'news':\t\t\t#checking the intent type\n self.__news_action()\t#getting the function of the intent\n elif intent == 'maps':\t\t\t#getting the function of the intent\n self.__maps_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'holidays':\t\t#getting the function of the intent#checking the intent type\n self.__holidays_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'appearance':\t\t#getting the function of the intent#checking the intent type\n self.__appearance_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user status':\t\t#getting the function of the intent#checking the intent type\n self.__user_status_action(entities)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'user name':\t\t\t#getting the function of the intent#checking the intent type\n self.__user_name_action()\t\t\t#getting the function of the intent#checking the intent type\n elif intent == 'personal status':\t\t#getting the function of the intent#checking the intent type\n self.__personal_status_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'joke':\t\t\t#getting the function of the intent#checking the intent type\n self.__joke_action()\t\t#getting the function of the intent#checking the intent type\n elif intent == 'insult':\t\t#getting the function of the intent#checking the intent type\n self.__insult_action()\t#getting the function of the intent#checking the intent type\n return\t\t\t\t#retuning\n elif intent == 'appreciation':\t\t\t#getting the function of the intent#checking the intent type\n self.__appreciation_action()\t\t\t#getting the function of the intent#checking the intent type\n return\n elif intent == 'music':\t\t\t#getting the function of the intent#checking the intent type\n self.__music_action(music_file)\t\t#getting the function of the intent#checking the intent type\n elif intent == 'navigation':\t\t\t#getting the function of the intent#checking the intent type\n self.__navigate_action()\n elif intent == 'tasks':\n self.__calender_events()\n\t\telif intent == 'guide':\n self.__guide()\n elif intent == 'web':\n self.__web()\n elif intent == 'video':\n self.__video()\n else: # No recognized intent\n self.__text_action(\"I'm sorry, I don't know about this yet.\")\n return\n\n except Exception as e:\n print \"Failed wit !\"\t\t\t#error message\n print(e)\t\t\t#printing the error\n traceback.print_exc()\n self.__text_action(\"I'm sorry, I couldn't understand what you mean !!\") #printing message\n return\t\t\t\t\n\n self.decide_action()", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'ScheduleMeeting':\n return schedule_meeting(intent_request)\n raise Exception('Intent with name ' + intent_name + ' not supported')", "async def test_intent(self, dm):\n request = create_request(\"other\", \"intent\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"intent\"", "def test_intent_support(self):\n dispatcher = self.get_dispatcher()\n for intent in self.get_intents():\n self.assertIsNot(dispatcher(intent), None)", "def apply_action(self, cmd_name, *args):\n\n action = Action(self.tahoma_device.url)\n action.add_command(cmd_name, *args)\n self.controller.apply_actions(\"HomeAssistant\", [action])", "def on_intent(intent_request, session):\n\n\tprint(\"on_intent requestId=\" + intent_request['requestId'] +\n\t\t \", sessionId=\" + session['sessionId'])\n\n\tintent = intent_request['intent']\n\tintent_name = intent_request['intent']['name']\n\n\t# Sends the request to one of our intents\n\tif intent_name == \"sendVideoIntent\":\n\t\treturn sendVideo(intent, session)\n\telif intent_name == \"setVolumeIntent\":\n\t\treturn setVolume(intent, session)\n\telif intent_name == \"AMAZON.PauseIntent\":\n\t\treturn pauseVideo(intent, session)\n\telif intent_name == \"AMAZON.ResumeIntent\":\n\t\treturn resumeVideo(intent, session)\n\telif intent_name == \"AMAZON.HelpIntent\":\n\t\treturn get_welcome_response()\n\telif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n\t\treturn handle_session_end_request()\n\telse:\n\t\traise ValueError(\"Invalid intent\")", "def open_intent_envelope(message):\n intent_dict = message.data\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))", "def get_action(action_name):\n action = justrok.Globals.action_collection.action(action_name)\n if action is None:\n justrok.logger.error('action %r not found', action_name)\n return lambda: None\n else:\n return action.trigger", "def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n elif intent_name == \"Ja_Bitte\":\n return Ja_Bitte_session(intent, session)\n else:\n raise ValueError(\"Invalid intent\")", "def get_action(self, context):\n pass", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"<YOUR INTENT NAME HERE>\":\n # Update the wordsmith_data variable with your data. Use key, value\n # pairs where the key is the column name in Wordsmith and the value is\n # the value contained in that column\n wordsmith_data = { 'column1': 'value1', 'column2': 'value2' }\n narrative = wordsmith.generate(WORDSMITH_API_KEY, WORDSMITH_PROJECT_SLUG, WORDSMITH_TEMPLATE_SLUG, wordsmith_data)\n if 'errors' not in narrative:\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generated Response', narrative['data']['content'],\n '<REPROMPT TEXT HERE>', True))\n else:\n if not isinstance(narrative['errors'], list) :\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(narrative['errors']['detail']),\n '<REPROMPT TEXT HERE>', True))\n else:\n details = ', '.join([e['details'] for e in narrative['errors']])\n return build_response(session.get('attributes', {}), build_speechlet_response('Wordsmith Generation Error', 'Wordsmith reported the following error: {}'.format(details),\n '<REPROMPT TEXT HERE>', True))\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"CountryStatusIntent\":\n return get_country_info(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_start_end_response(False)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return get_start_end_response(True)\n else:\n return get_start_end_response(False)", "def on_intent(intent_request, session, state):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n \n \n # If new user, and intent is not setting up, prompt to set up first\n # If corrupted user, prompt to set up again\n userId = session[\"user\"][\"userId\"]\n query_user = get_info(userId)\n print(query_user)\n if (len(query_user) == 0 and intent_name != \"NewUserCollectInfoIntent\") or \\\n (len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS):\n if len(query_user) > 0 and len(query_user[0].keys()) != NUM_DB_COLS:\n delete_info(userId)\n \n return new_user_intro(session, state)\n\n handlers = {\n \"GetMainFocusIntent\": get_main_focus_intent_response,\n \"CheckinKeepMainFocusIntent\": keep_main_focus_intent,\n \"CheckinReplaceMainFocusIntent\": replace_main_focus_intent,\n \"ExecuteMorningRoutineIntent\": execute_morning_routine_intent,\n \"ExecuteEveningRoutineIntent\": execute_evening_routine_intent,\n \"AMAZON.YesIntent\": handle_yes_intent,\n \"AMAZON.NoIntent\": handle_no_intent,\n \"AMAZON.CancelIntent\": handle_session_end_request,\n \"AMAZON.StopIntent\": handle_session_end_request,\n }\n \n # Handlers that need more arguments\n if intent_name not in handlers:\n if intent_name == \"SetMorningRoutineIntent\":\n return set_routine_intent(intent, session, state, MORNING)\n elif intent_name == \"SetEveningRoutineIntent\":\n return set_routine_intent(intent, session, state, EVENING)\n elif intent_name == \"GetMorningRoutineIntent\":\n return get_routine_intent(intent, session, state, MORNING)\n elif intent_name == \"GetEveningRoutineIntent\":\n return get_routine_intent(intent, session, state, EVENING)\n elif intent_name == \"NewUserCollectInfoIntent\":\n return new_user_collect_info_intent(intent_request, session, state)\n elif intent_name == \"SetNameIntent\":\n return set_name_intent(intent_request, session, state)\n \n try:\n return handlers[intent_name](intent, session, state)\n except Exception as e:\n # This exception probably came from inside a handler\n print(e)\n raise ValueError(\"Invalid intent: \"+intent_name)", "def chooseAction(self):\n print \"nothing\"\n pass", "def yes_intent(intent, session):\n if session.get('attributes', {}).get('add_address') and \\\n session['attributes']['next_step'] == 'store_address':\n return store_address(intent, session)\n elif session.get('attributes', {}).get('remove_address'):\n return remove_address(intent, session)\n else:\n return reply.build(\"Sorry, I don't know what you mean. Try again?\",\n persist=session.get('attributes', {}),\n is_end=False)", "def optional_action_map(env, inner_action):\n if hasattr(env, 'wrap_action'):\n return env.wrap_action(inner_action)\n else:\n return inner_action", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n \n if intent_name == 'BillInquiry':\n return billinquiry(intent_request)\n elif intent_name == 'billpayment': \n return billpayment(intent_request)\n elif intent_name == 'RebootSystem': \n return rebootsystem(intent_request)\n else:\n return default_answer(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def help_intent_handler(handler_input):\n return help_request(handler_input, MINUS_POINTS, QUIT_MINUS_POINTS)", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'Help':\n return order_help(intent_request)\n elif intent_name == 'FastFood':\n return order_snacks(intent_request)\n elif intent_name == 'Beverages':\n return order_beverages(intent_request)\n elif intent_name == 'Admin':\n return admin(intent_request)\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n if intent_name == \"GUTSIntent\":\n session_attributes = {}\n return build_response(session_attributes, build_speechlet_response(\n \"GUTSCard\", \"I have the GUTS\", \"I love hackathons\", True))\n\n if intent_name == \"LoveAIntent\":\n #session_attributes = {}\n #if loveStage = 1:\n # return build_response(session_attributes, build_speechlet_response(\n # \"Love1Card\", \"I love Theo!\", \"I love Theo so much!\", False))\n return handle_love_A_intent(session)\n\n if intent_name == \"LoveBIntent\":\n return handle_love_B_intent(session)\n\n if intent_name == \"LoveCIntent\":\n return handle_love_C_intent(session)", "def dispatch(intent_request):\n logger.debug(\n 'dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n logger.debug('HERE {}'.format(intent_request['currentIntent']['name']))\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'createCuration':\n print(\"In createCuration\", intent_name)\n return find_suggestion(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AddToCart\":\n return quary(intent, session)\n else:\n print(\"invalid intent\")\n raise ValueError(\"Invalid intent\")", "def detach_intent(self, intent_name):\n self.bus.emit(Message(\"detach_intent\", {\"intent_name\": intent_name}))", "def map_activity_name2(self, name):\n newname = self.amappings2.get(name)\n if newname is None:\n return 'Other'\n else:\n return newname", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"WhensNextTrainIntent\":\n return get_next_train(intent, session)\n elif intent_name == \"SetFavoriteStationIntent\":\n return set_favorite_station(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response(session)\n elif intent_name == \"AMAZON.StopIntent\" or intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response(session)\n else:\n raise ValueError(\"Invalid intent\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"HelloWorldIntent\":\n return handle_session_end_request()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"action\")", "def obtain_action(self):\r\n\t\treturn", "def default_action(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_action\")", "def on_intent(intent_request, session):\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NewMessageIntent\":\n return record_new_message(intent, session)\n elif intent_name == \"SaveIntent\":\n return process_save_intent(intent, session)\n elif intent_name == \"ReadMessageIntent\":\n return read_message(intent, session)\n elif intent_name == \"AddUserIntent\":\n return add_user(intent, session)\n elif intent_name == \"RemoveMessageIntent\":\n return remove_messages(intent, session)\n \n \n elif intent_name == \"AMAZON.HelpIntent\":\n return process_help_intent(intent, session)\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")", "def intent(self, intent_name, mapping=None, convert=None, default=None):\n if mapping is None:\n mapping = dict()\n if convert is None:\n convert = dict()\n if default is None:\n default = dict()\n\n def decorator(f):\n self._intent_view_funcs[intent_name] = f\n self._intent_mappings[intent_name] = mapping\n self._intent_converts[intent_name] = convert\n self._intent_defaults[intent_name] = default\n\n return f\n return decorator", "def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'bitbotSetNewAlert':\n return set_currency_alert(intent_request)\n # elif intent_name == 'Temp':\n # return set_currency_alert(intent_request)\n\n raise Exception('Intent with name ' + intent_name + ' not supported')", "def choose_action(self):\r\n pass" ]
[ "0.67302954", "0.6667681", "0.6538136", "0.6383118", "0.61762303", "0.59395283", "0.58650994", "0.58650994", "0.58105075", "0.5734404", "0.5690178", "0.56890965", "0.5672978", "0.5672482", "0.5650137", "0.56120867", "0.55995744", "0.55600524", "0.5517704", "0.54916435", "0.5489112", "0.54630333", "0.543421", "0.54318935", "0.5425409", "0.54152995", "0.5372827", "0.53699315", "0.5335601", "0.5324251", "0.5323126", "0.5290983", "0.5285084", "0.52536416", "0.5246088", "0.5228275", "0.5221403", "0.52145696", "0.52132756", "0.5200591", "0.51800525", "0.51660764", "0.5153761", "0.5124121", "0.5122579", "0.51153123", "0.51136404", "0.5084445", "0.5084445", "0.5084445", "0.5084445", "0.5084445", "0.507779", "0.50723183", "0.5066313", "0.5049496", "0.50430423", "0.5039955", "0.50390124", "0.5038118", "0.50246394", "0.501615", "0.5011915", "0.5006227", "0.4979937", "0.4959374", "0.49575502", "0.4949884", "0.4933286", "0.49326062", "0.49256152", "0.4924932", "0.4911044", "0.4903493", "0.4900221", "0.48912713", "0.48607433", "0.48594457", "0.48531753", "0.48519447", "0.48421025", "0.48400775", "0.48328644", "0.48327398", "0.48242137", "0.4816047", "0.48126093", "0.48119855", "0.48111042", "0.4809243", "0.47981265", "0.4793502", "0.4787134", "0.47545624", "0.47430515", "0.4739509", "0.47350714", "0.47257832", "0.472307", "0.47114298" ]
0.62379736
4
delete the specified intentfrom your account.
def delete_intent(intent_name): try: client.get_intent( name=intent_name, versionOrAlias='$LATEST' ) answer=raw_input("Do you want to delete %s from your account(Y/y for YES, other NO):" %(intent_name)) if answer in ['Y', 'y']: client.delete_intent( name=intent_name ) print "You chose to delete the intent %s, deleted..." %(intent_name) else: print "You chose not to delete the inten t%s, exiting..." %(intent_name) except: print "There is no intent called %s, exiting..." %(intent_name) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_account(self, account):\n \n pass", "def delete(self, args):\n try:\n db = get_db('intents')\n intents = db.delete_intent(args['intent'])\n resp = jsonify(intents=intents)\n resp.status_code = 200\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error)\n resp.status_code = 400\n return resp", "def delete_activity():\n pass", "def delete_account(self):\n Credential.account_list.remove(self)", "def delete_account(self):\n signals.before_gameaccount_deleted.send(gameaccount=self.gameaccount)\n db.delete(self.gameaccount)", "def delete(self, accountId, reason, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method delete\")", "def delete(account):\n account.stripe_account.delete()\n account.delete()", "def account_delete(request):\n fields = [\"email\", \"token\"]\n\n # serializes the quert string to a dict (neeto)\n args = request.args\n\n query_validation = validate_query_params(args, fields)\n # check that body validation succeeded\n if query_validation[1] != 200:\n return query_validation\n\n auth = azure_refresh_token(args[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n\n account_db = Database(\"accounts\")\n storage = Storage(\"biit_profiles\")\n try:\n account_db.delete(args[\"email\"])\n storage.delete(args[\"email\"] + \".jpg\")\n return http200(\"Account deleted\")\n except:\n return http400(\"Error in account deletion\")", "def delete(self, data):\n url = self.base_url + '/v2/account/delete/'\n return self._call_vendasta(url, data)", "def delete_account(self):\n print('-=' * 12 + \" Delete Account \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n delete_flag = self.auth.delete_account(mob_num, password)\n if delete_flag:\n print(\"The account is permently deleted\")\n self.logging_page()\n else:\n print(\"Mobile Number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.delete_account, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def delete(self, application_id):", "def delete_my_account():\n # Remove user ownerships\n for p in current_user.projects:\n p.user_id = None\n p.save()\n # Delete user posts\n [ a.delete() for a in current_user.activities ]\n # Delete user account\n current_user.delete()\n logout_user()\n flash('We are sorry to see you go. Your profile has been deleted.', 'info')\n return redirect(url_for('public.home'))", "def delete_account():\n\n username = current_user.get_id()\n app.delete_user(username)\n logger.info('Deleted account of user ' + username + '.')\n logout_user()\n logger.info('Logged ' + username + ' out after account deletion.')\n return Response('Account successfully deleted.')", "def delete_user():", "def delete(self, account_id):\n self.client.delete_account(account_id)", "def delete_account():\n print(\"\\n\")\n print(messages.delete_account)\n u_id = pyip.inputInt(\"User Id: \", greaterThan=0)\n\n credentials = {\"id\":u_id}\n result = BankOperationsBackend.delete_account(credentials)\n start_again() if result else BankOperationsUi.delete_account()", "def remove_address(intent, session):\n sess_data = session.setdefault('attributes', {})\n sess_data['remove_address'] = True\n\n # Retrieve stored data just to check if it exists or not.\n user_data = database.get_user_data(session['user']['userId'])\n if not user_data:\n return reply.build(\"I already don't remember any addresses for you.\",\n is_end=True)\n elif sess_data.get('awaiting_confirmation'):\n # The user has requested removal and\n # we requested confirmation\n if intent['name'] == 'AMAZON.NoIntent':\n return reply.build(\"Okay, keeping your stored addresses.\",\n is_end=True)\n elif intent['name'] == 'AMAZON.YesIntent':\n succ = database.delete_user(session['user']['userId'])\n if succ:\n return reply.build(\"Okay, I've forgotten all the addresses \"\n \"you told me.\", is_end=True)\n else:\n # Only get here if the database interaction fails somehow\n return reply.build(\"Huh. Something went wrong.\", is_end=True)\n else:\n # Shouldn't ever get here.\n return reply.build(\"Sorry, I don't know what you mean. \"\n \"Try again?\", persist=sess_data, is_end=False)\n else:\n # Prompt the user for confirmation of data removal.\n sess_data['awaiting_confirmation'] = True\n return reply.build(\"Do you really want me to forget the addresses \"\n \"you gave me?\",\n reprompt='Say \"yes\" to delete all stored addresses '\n 'or \"no\" to not change anything.',\n persist=sess_data,\n is_end=False)", "def delete(self, args, intent):\n if 'all' in args.keys() and args['all'] == True:\n try:\n db = get_db('expressions')\n db_results = db.delete_all_intent_expressions(intent)\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions)\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp \n elif args['expressions']:\n try:\n db = get_db('expressions')\n db_results = db.delete_expressions_from_intent(intent, args['expressions'])\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions, deleted_expressions=args['expressions'])\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp", "def delete(ctx, query, force, password, remember):\n\n _init_session(ctx, password, remember)\n session = ctx.obj[\"session\"]\n creds = session.list_credentials()\n hits = _search(creds, query, True)\n if len(hits) == 0:\n click.echo(\"No matches, nothing to be done.\")\n elif len(hits) == 1:\n cred = hits[0]\n if force or (\n click.confirm(\n f\"Delete account: {_string_id(cred)} ?\",\n default=False,\n err=True,\n )\n ):\n session.delete_credential(cred.id)\n click.echo(f\"Deleted {_string_id(cred)}.\")\n else:\n click.echo(\"Deletion aborted by user.\")\n\n else:\n _error_multiple_hits(ctx, hits)", "def delete_activity(recipe_id, activity_id):\n if 'name' in session:\n PLAN.users[session['name']].delete_activity(recipe_id, activity_id)\n return redirect(url_for('view_activities', recipe_id=recipe_id))\n return redirect(url_for('log_in'))", "def delete_credential(self, credential):\r\n return self.delete(self.credential_path % (credential))", "def delete(ctx):\n click.echo('deleting')\n ctx.delete()\n click.echo('done')", "def delete_account(request):\n ubanks = request.user.userbank.all()\n for ubank in ubanks:\n ubank.delete()\n user = request.user\n log_out(request)\n user.delete()\n return HttpResponse(\"Account succesfully deleted\")", "def delete_app(AppId=None):\n pass", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def delete():", "def delete(cls, aws_cloud_account_id: str):\n\t\tpass", "def delete_account(self, accountid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'accountid': accountid}\n url = SECURE_API_URL + \"raas/v1/account/delete\"\n return self._lr_object._get_json(url, payload)", "def _delete_spam_action(act, session):\n if act is None:\n return\n act.item.spam_flag_counter -= 1\n session.delete(act)", "def destroy(self):\n\t\tos.remove(self.account_file)", "def delete_credential(credentials):\n credentials.delete_credentials()", "def detach_intent(self, intent_name):\n self.bus.emit(Message(\"detach_intent\", {\"intent_name\": intent_name}))", "def delete_account(self) -> None:\n\n msg = QtWidgets.QMessageBox()\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/newPrefix/new.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n msg.setWindowIcon(QtGui.QIcon(icon))\n msg.setIcon(QtWidgets.QMessageBox.Warning)\n password = self.lineEdit_9.text()\n self.lineEdit_9.clear()\n if not password:\n msg.setWindowTitle(\"Delete account\")\n msg.setText(\"Please fill all fields.\")\n msg.exec_()\n else:\n if validate_mp(self.email, password):\n msg.setWindowTitle(\"Delete account\")\n msg.setText(\"Are you sure you want delete your account?\")\n msg.setInformativeText(\"Deleting your account cannot be undone-you will no longer have access to any data you have stored in Vault Plus.\")\n msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n msg.setDefaultButton(QtWidgets.QMessageBox.No)\n reply = msg.exec()\n if reply == QtWidgets.QMessageBox.Yes:\n vaultplusDB.delete_user(self.email)\n adminDB.delete_user(self.email)\n path = Path(\"users\", self.uid[1:])\n if path.exists():\n shutil.rmtree(path)\n return True", "def delete(self, accounttype):\n accounttype = Accounttype.query\\\n .filter(Accounttype.name == accounttype).one()\n db.session.delete(accounttype)\n db.session.commit()\n return jsonify(accounttype)", "def delete_user():\n #TODO user delete\n pass", "def delete_account_id(account_id):\n conn = get_connect()\n conn.execute(\"DELETE from account WHERE accountId = ?\", [account_id])\n conn.commit()\n conn.close()\n return", "def delete(self):\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n DELETESQL = \"\"\"DELETE FROM accounts WHERE id=:id \"\"\"\n cursor.execute(DELETESQL, {\"id\": self.id})\n self.id = None", "def delete_account_key(configuration):\n os.remove(configuration.cm_key)", "def delete(self, customerguid, jobguid=\"\", executionparams=None):", "def del_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n comment.delete()\n actor = comment.actor\n url = '../../' + str(comment.actor.pk)\n return redirect(url)", "def __ui_remove_activity(self):\n remove_activity_id = int(input(\"The ID of the activity you want to remove: \"))\n self.__activity_service.service_remove_activity(remove_activity_id)\n print(\"Activity successfully removed from your agenda!\\n\")", "def delete(ctx, query, force):\n\n ensure_validated(ctx)\n controller = ctx.obj['controller']\n creds = controller.list()\n hits = _search(creds, query)\n if len(hits) == 0:\n click.echo('No matches, nothing to be done.')\n elif len(hits) == 1:\n cred = hits[0]\n if force or (click.confirm(\n u'Delete credential: {} ?'.format(cred.printable_key),\n default=False, err=True\n )):\n controller.delete(cred)\n click.echo(u'Deleted {}.'.format(cred.printable_key))\n else:\n click.echo('Deletion aborted by user.')\n\n else:\n _error_multiple_hits(ctx, hits)", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete(name):\n # Just like adding something, we use the cursor, but instead of INSERT INTO, we write DELETE FROM.\n # WHERE determines which activity the user wants to change\n c.execute(\"DELETE FROM activities WHERE name = (?)\", [name])\n # Now we must commit the changes that happened in the database\n conn.commit()", "def delete(self, uid):\n raise NotImplementedError", "def delete_credential(self):\n Credential.credential_list.remove(self)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete_request(self, request):\n # DELETE https://graph.facebook.com/[<REQUEST_OBJECT_ID>_<USER_ID>]?\n # access_token=[USER or APP ACCESS TOKEN]\n delete_id = '%s_%s' % (request.request_id, request.to_facebook_user_id)\n resp = self.open_facebook.delete(delete_id)\n if resp:\n request.accepted_at = timezone.now()\n request.save()\n else:\n logger.warning('Tried to delete invite request id %s, facebook returned False' % delete_id)", "def delete(self):\n url = \"https://api.imgur.com/3/account/{0}\".format(self.name)\n return self._imgur._send_request(url, needs_auth=True, method='DELETE')", "def delete(self):\n self.request().delete()", "def delete_credential(self):\n\n Credential.credential_list.remove(self)", "def delete_account(request):\n collected_values = {}\n \n if request.method != 'POST':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n \n uid = request.POST[\"user_id\"]\n token = request.POST[\"token\"]\n\n # Check auth\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n change_query = \"UPDATE linx_luser SET username = \\'{}\\' WHERE user_id = {}\".format(\"DELETE ME\", uid)\n with connection.cursor() as cursor:\n cursor.execute(change_query)\n\n collected_values[\"user_id\"] = uid\n collected_values[\"token\"] = token\n collected_values[\"executed_query\"] = change_query\n\n LOGGER.info(\"Delete account request: %s\", collected_values)\n return JsonResponse(collected_values, status=200)", "def delete_user(id):\n pass", "def delete(self, userguid, jobguid=\"\", executionparams=dict()):", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def closeaccount(request):\n get_user_model().objects.get(username=request.user.get_username()).delete()\n return Response({}, status=status.HTTP_200_OK)", "def delete(self, item):\n self._createAction(item, \"delete\")", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def do_deluser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\ttry:\n\t\t\t\tself.cl.del_contact(line)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Wrong syntax! Type 'help delete'\")\n\t\telse:\n\t\t\tprint(\"To delete contacts you need to open or create a book.\")", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def delete(self, uid):\n return self.delete_instance(uid)", "def delete(self, filename):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))", "def delete_user(self, user):\n self.delete(user)", "def delete_credential(self, context, id):\n return remove_credential(id)", "def delete(self, request, app_id, addon_name):\n addon = Addon.objects.get(app__app_id=app_id, display_name=addon_name)\n provider = get_provider_from_provider_name(addon.provider_name)\n result = provider.deprovision(addon.provider_uuid)\n manager = StateMachineManager()\n with manager.transition(addon.id, AddonEvent.deprovision_success):\n pass\n manager.start_task(addon.id)\n return self.respond({'message': result['message']})", "def delete(self, _id):", "def DeleteApp(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")", "def post_delete_account(self, data=None):\n return self.client.post(self.delete_account_url, data)", "def del_awcomment(request, pk):\n comment = get_object_or_404(AwardComment, pk=pk)\n comment.delete()\n award = comment.award\n url = '../../' + str(comment.award.pk)\n return redirect(url)", "def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })", "def delete(self):\r\n self.domain.delete_item(self)", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)", "def delete_user(self):\n raise NotImplementedError(\"Function not yet implemented contact package creator\")", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def delete_wim_account(self, uuid):\n # Since we have foreign keys configured with ON CASCADE, we can rely\n # on the database engine to guarantee consistency, deleting the\n # dependant records\n return self.db.delete_row_by_id('wim_accounts', uuid)", "def delete():\n click.echo('delete was called.')", "def acc_delete_gameaccount(request, account_id):\n\n gameaccount = GameAccount.query.get(account_id)\n if gameaccount is None:\n raise NotFound()\n form = DeleteGameAccountForm(gameaccount)\n if gameaccount.user != request.user:\n raise Forbidden()\n\n if request.method == 'POST':\n if request.form.get('cancel'):\n return form.redirect('account/gameaccounts')\n elif request.form.get('confirm') and form.validate(request.form):\n accountname = str(gameaccount.account)\n form.delete_account()\n db.commit()\n account_flash(_('The game account %s was deleted successfully') % accountname, 'remove')\n return redirect_to('account/gameaccounts')\n\n return render_account_response('account/gameaccount_delete.html', 'gameaccounts',\n form=form.as_widget())", "def delete(name: str):\n profiles = prefect.settings.load_profiles()\n if name not in profiles:\n exit_with_error(f\"Profile {name!r} not found.\")\n\n current_profile = prefect.context.get_settings_context().profile\n if current_profile.name == name:\n exit_with_error(\n f\"Profile {name!r} is the active profile. You must switch profiles before\"\n \" it can be deleted.\"\n )\n\n profiles.remove_profile(name)\n\n verb = \"Removed\"\n if name == \"default\":\n verb = \"Reset\"\n\n prefect.settings.save_profiles(profiles)\n exit_with_success(f\"{verb} profile {name!r}.\")", "def delete(self, purpose, using, sender, recipient):\n Relation = self.models[using]\n self.session.query(Relation)\\\n .filter(Relation.purpose == purpose)\\\n .filter(Relation.using == using)\\\n .filter(Relation.sender == sender)\\\n .filter(Relation.recipient == recipient)\\\n .delete()\n self.session.flush()", "def deleteCredential(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete_credential(self):\n Credentials.credentials_list.remove(self)", "def delete(self, account=None, user=None, account_id=None):\n print(request.data)\n data = json.loads(request.data)\n target_user = data[\"user\"]\n\n admin_edge = UserIsAccountAdmin.filter(outv_id=target_user,\n inv_id=account.id)\n\n if not admin_edge:\n return jsonify_response({\n \"error\": \"No edge exists between the targeted user and account\"\n }, 404)\n\n try:\n admin_edge[0].delete()\n except ObjectCanNotBeDeletedException as e:\n return jsonify_response({\n \"error\": e.message\n }, 400)\n\n response = AccountDetailSchema().dumps(account).data\n return jsonify_response(json.loads(response), 200)", "def do_destroy(self, arg):\n obj = self.verify(arg, 2)\n if obj:\n del storage.all()[obj]\n storage.save()", "def deleterecord(phones,username,phonenum):\r\n if username in phones:\r\n del phones[username]\r\n else:\r\n raise ValueError(\"This username are not exist\")", "def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))", "def DeleteCreditCard(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def remove(self, context):\n try: \n url_tournament = Tournament.last_url_tournament\n await context.send(url_tournament)\n except Exception as error:\n print(error)", "def delete(self, type=None, name=None, identity=None):\n if name and identity:\n name = None # Only specify one\n request = self.request(operation='DELETE', type=type, name=name,\n identity=identity)\n self.call(request, expect=error.NO_CONTENT)", "def delete_identity(self, realm=None, type=\"users\", username=None):\n if not username:\n raise ValueError(\"Please provide a username.\")\n\n type = self._type_validator(type=type)\n uri = self._uri_realm_creator(realm=realm, uri=type + '/' + username)\n data = self._delete(uri=uri, headers=self.headers)\n return data.json()", "def delete_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request has been removed.'\n )\n return redirect('profiles:my_requests')", "def deleteaccount():\n try:\n if app.config['UPLOAD_FOLDER'] == UPLOAD_FOLDER:\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + \\\n session['username']\n with Database() as db:\n texts = db.getOwnedTexts(session['id'])\n for text in texts:\n TextDelete(text[0])\n db.deleteUser(session['id'])\n shutil.rmtree(app.config['UPLOAD_FOLDER'])\n session.clear()\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n flash(\"Account has been deleted\")\n return redirect(url_for('index'))\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('index'))", "def delete(self, *args, **kwargs) -> Any:\n pass", "def del_user(self, username):\n pass", "def close_account(self, conn, number):\n sql = \"DELETE FROM card WHERE number=?\"\n c = conn.cursor()\n c.execute(sql, (number,))\n conn.commit()\n self.menus()", "def deleteUser(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.7136622", "0.7109074", "0.6793135", "0.6618114", "0.6454273", "0.6342631", "0.63189256", "0.62924826", "0.6259348", "0.6142261", "0.6112738", "0.60232806", "0.5955118", "0.59354204", "0.5897123", "0.58737737", "0.5854009", "0.5853782", "0.5827274", "0.5824352", "0.57835615", "0.5770079", "0.57689846", "0.57591033", "0.57539934", "0.5744825", "0.57406825", "0.5740482", "0.5739148", "0.57342786", "0.5719334", "0.5711964", "0.5711533", "0.57098496", "0.57087904", "0.57063884", "0.5690279", "0.56830555", "0.56824106", "0.567967", "0.5675962", "0.56687516", "0.5666842", "0.5650067", "0.56452507", "0.56444335", "0.5631572", "0.5628163", "0.5622746", "0.5619506", "0.56114686", "0.5601165", "0.55980533", "0.55918354", "0.55797786", "0.55543363", "0.5546211", "0.5543177", "0.554071", "0.5529021", "0.55231184", "0.5518563", "0.55172455", "0.55172455", "0.5509746", "0.5506676", "0.5505391", "0.55037373", "0.5482531", "0.54779685", "0.5477689", "0.5475153", "0.5472147", "0.5471512", "0.546667", "0.54660916", "0.5464184", "0.5461184", "0.54565084", "0.5451988", "0.54501104", "0.54498315", "0.5445787", "0.5438857", "0.54376036", "0.5435175", "0.5428776", "0.54207647", "0.5404668", "0.5401842", "0.540107", "0.5387807", "0.5387606", "0.5385483", "0.53825414", "0.53690344", "0.53676283", "0.5366592", "0.53606594", "0.53550285" ]
0.7211831
0
demo function to get the intent's latest configuration
def get_intent_configuration(intent_name, version ="$LATEST"): response=client.get_intent( name=intent_name, version=version ) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config():\n return CONFIG", "def getConfig(self):\n pass", "def config(self) -> \"AutomationConfig\":", "def config(self) -> \"AutomationConfig\":", "def get_config(self,config):\n return self.parser.get(\"main\", config)", "def get_details(self):\n return self.__config_data", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def get_config(self):\n return super().get_config()", "def get_full_config(self):\n return self._read_config()", "def get_config():\n return _config", "def get(self) -> dict:\n return Config.get()", "def config():", "def config():", "def get_next_config(self):\n\n self.reset_trial()\n self._cur_config = self.get_default()\n return self._cur_config if len(self._results) == 0 else None", "def get_configuration():\r\n if not hasattr(CURRENT_REQUEST_CONFIGURATION, 'data'):\r\n return {}\r\n\r\n return CURRENT_REQUEST_CONFIGURATION.data", "def _get_config(self, *args, **kwargs):\n # Just need to show the parameter screen...the parser for the command\n # does the update_many()\n self._go_to_root_menu()\n self._navigate(SubMenu.SHOW_PARAM)\n self._go_to_root_menu()", "def _est_config(self):\n return self._est_method.config", "def get_config():\n app = NbConvertApp()\n app.load_config_file()\n return app.config", "def get_config() -> Optional[Config]:\n return CurrentConfig.get()", "def app_config(self):\n return self._app_conf[\"aiscalator\"]", "def get_config(self):\n return self.config", "def _get_config_dict():\r\n return CONFIGS", "def getConfig(self):\n \n return self.config", "def get_config():\n return _CONFIG", "def config(self):\n return self[CONFIG_KEY]", "def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)", "def configuration():", "def config(self):\n return None", "def config(self):\n return self.namespace['config']", "def config(self) -> Dict[str, Any]:", "def get_config(client):\n func = client.get_config()\n config = run_in_loop_now('get_config', func)\n\n a = {}\n b = {}\n for i in config['activity']:\n a[i['label']] = i['id']\n b[i['id']] = i['label']\n activities_by_name = a\n activities_by_id = b\n d = {}\n for device in config['device']:\n device_cmds = []\n for grp in device['controlGroup']:\n for fnc in grp['function']:\n device_cmds.append(json.loads(fnc['action'])['command'])\n d[device['label']] = {\"id\": device['id'],\n \"cmds\": device_cmds}\n devices = d\n return config", "def get(self):\n lc = self._client.describe_launch_configurations(LaunchConfigurationNames=[self._name])\n if len(lc[\"LaunchConfigurations\"]) == 0:\n return None\n else:\n config = lc[\"LaunchConfigurations\"][0]\n config[\"UserData\"] = base64.b64decode(config[\"UserData\"])\n return config" ]
[ "0.6338923", "0.6251342", "0.61532223", "0.61532223", "0.6144978", "0.60848004", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.5976653", "0.59657294", "0.5903472", "0.5867463", "0.5826157", "0.5826157", "0.5824259", "0.5821595", "0.581498", "0.577358", "0.57663345", "0.57662135", "0.5757804", "0.5757554", "0.57492876", "0.5735587", "0.57318056", "0.5718778", "0.5704525", "0.5697589", "0.56906223", "0.56726307", "0.5649872", "0.5617661", "0.5616914" ]
0.73107255
0
a help function to print the intentinformation in format
def format_print_jobs(intent): print "\nintentName: %s" %(intent['name']) for k,v in intent.iteritems(): if k <> 'name': print "\t" + str(k) + ": " + str(v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printhelp():", "def info(self):", "def info(self):", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"", "def info(self, *args, **kwargs):", "def cmd_info(self):\n self.cmd_author()\n self.cmd_date()\n log = self.get_log() or ''\n print(len(log))\n print(log)", "def printinfo(assign, question):\n print(\"Last Name: Bell\")\n print (\"First Name: Daniel\")\n print(\"Student ID: 282911\")\n print(\"Course: CPSC 231\")\n print(\"Tutorial Section: T02\")\n print(\"Assignment: %d\" %assign)\n print(\"Question: %s\" %question)\n print(\"\")", "def print_info(*args):\n print(CGREEN2 + str(*args) + CEND)", "def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "def info() -> None:", "def printDictIntents(self):\n result = \", \".join(str(value.tag) for key, value in self.dicIntents.items())\n self.ouput.exec('Las Intenciones del ChatBot \"'+self.name+'\" son:'+result)", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def describe(self) -> str:", "def print_actions_help():\n print(\\\n'''\\n\nTools for handling SELAFIN files and TELEMAC binary related in python\\n\nP ossible actions:\\n\n scan will print information about the SELAFIN, such as variables,\n their vales etc.\n spec will print information about a spectral file (also SELAFIN),\n such as frequencies, periodes, etc.\n chop will chop a SELAFIN given a new set of time range and step (but\n alter is better)\n alter will alter a SELAFIN file, choping or modifying time,\n converting its coordinates, extracting variables, etc.\n merge will merge two files together, whether they are continuous\n simulations (same variables) or putting variables together\n (same time definition)\n subdivide will subdivide a mesh by one iteration (splitting all triangles\n in four others)\n ''')", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def printInfoDoc():\n global _modinfo\n print _modinfo\n help(\"ProcUtils\")", "def info(capsys, format_str, format_args=None):\n\n if format_args is not None:\n msg = (format_str % format_args)\n else:\n msg = format_str\n\n with capsys.disabled():\n print(msg)", "def run(self):\n logging.debug('Displaying Info: ' + self.recipe.name)\n\n msg = PREFIX[1:] + PREFIX.join(self.recipe.info().split('\\n'))\n print(msg)\n return msg", "def info(msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stdout)", "def process_info(process):\n\thelp(process)", "def info(self):\n import tc\n ## enumerate all options\n opts = self.to_list()\n res = \"\"\n fmt = \"%20s = %5s ## %s\\n\"\n\n for k, v in opts:\n res += fmt % (k, str(self.__getattribute__(k)),\n str(v.doc()).split('\\n')[0])\n\n return res", "def help(self):\n res = \"\"", "def print_info(c, timestamp):\r\n print(f\"\\n[{timestamp}] [{id(c)}] [Fitness: {c.fitness()}]\\n \" +\r\n f\"Age: {c.age} seconds, F.Eaten: {c.food_eaten}, P.Eaten: {c.poison_eaten}\\n\" +\r\n f\"currHP: {c.health}, Gen: {c.gen}, Childs: {c.childs}\\n\" +\r\n f\"DNA: {c.dna}\\n\" +\r\n f\"FoodAttr: {c.food_attraction}, PoisonAttr: {c.poison_attraction}\\n\" +\r\n f\"FoodDist: {c.food_dist}, PoisonDist: {c.poison_dist}\\n\" +\r\n f\"MaxHealth: {c.max_health}, MaxVel: {c.max_vel}, Size: {c.size}\\n\" +\r\n f\"MaxSteer: {c.max_steer_force}, DirAngleMult: {c.dir_angle_mult}\\n\")", "def description():", "def getInfo():", "def print_info(msg):\n print(msg)", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def print_me(self):\n return \"ID: %s Title: %s\" % (self.ID, self.title)", "def help_description():\n # for ain\n print(\"--------TABLE FOR AIN(AIN4=GND)-------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | AINP | AINN |\")\n for i in range(8):\n print(\"| {} | {} | AIN{} | AIN{} |\".format(str(i), bin(i)[2:].zfill(3), DICT_AIN[i][0],\n DICT_AIN[i][1]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR FSR------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | FSR |\")\n for i in range(6):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_FSR[i]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR RATE------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | RATE |\")\n for i in range(8):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_RATE[i].rjust(7, ' ')))\n print(\"--------------------------------------\")", "def info(object, spacing=10, collapse=1):\n methodList = [meth for meth in dir(object) if callable(getattr(object,meth))]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print \"\\n\".join([\"%s %s\" % (meth.ljust(spacing),\n processFunc(str(getattr(object, meth).__doc__)))\n for meth in methodList])", "def info(object, spacing=10, collapse=1):\n methodList = [method for method in dir(object) if callable(getattr(object, method))]\n argList = [method for method in dir(object) if not callable(getattr(object, method))]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print \"\\n\".join([\"%s %s\" %\n (method.ljust(spacing),\n processFunc(str(getattr(object, method).__doc__)))\n for method in methodList])\n print argList", "def summary_line_and_description():", "def show_info(self):\n txt = \"Brand: %s\\nModel: %s\\nHostname: %s\\n\"%(self.brand, self.model, self.hostname)\n return txt", "async def info(self, context):\n await context.send('creador: debellisnahuel@gmail.com\\ncolabs:\\n emi: https://twitter.com/emilianosce/ o https://www.instagram.com/emilianosce/ \\n garza: https://twitter.com/Matias_Garcia00 o https://www.twitch.tv/garzangb')", "def print_help():\n\n print(\"Mailroom Usage: <name>:add a donor and donation h:help l:list\"\n \"donors r:print report q:quit\")", "def print_env_information(step_id, current_time, final_move, current_score, current_reward):\n print(\"Step: {}\".format(step_id))\n print(\"Current Time: {}\".format(current_time))\n print(\"Action: {}\".format(final_move))\n print(\"Current scenario score: {} \\nCurrent reward: {}\\n\".format(current_score, current_reward))", "def output_debug_info(self):", "def show_info(self):\n # attr[0] attr[1]\n attrs = [(self.TYP.value, 'nam'),\n ('Skill', 'skl')]\n # voeg ook alle stats en skills in deze lijst toe.\n for stat in Minimals:\n attrs.append((stat.value, stat.name))\n attrs.append(('Spell Battery', 'cur_bat'))\n for stat in StatType:\n attrs.append((stat.value, stat.name))\n for skill in SkillType:\n attrs.append((skill.value, skill.name))\n\n # nu alle mogelijkheden geladen zijn, ga dan aan de slag met diegene die van toepassing zijn\n attr_list = []\n\n import enum\n for attr in attrs:\n value_of_attr = self.get_value_of(attr[1])\n # uitzondering, 'wht' altijd gewoon weergeven\n if attr[0] == StatType.wht.value:\n # deze uitzondering geldt niet voor weapons en shields.\n if not isinstance(self.get_value_of('skl'), enum.Enum): # niet wanneer 'skl' een waarde heeft\n attr_list.append((attr[0], str(value_of_attr)))\n elif value_of_attr:\n if isinstance(value_of_attr, enum.Enum): # uitzondering alleen voor 'skl'\n value_of_attr = value_of_attr.value\n elif attr[0] == StatType.hit.value: # uitzondering alleen voor 'hit'\n value_of_attr = str(value_of_attr)+\"%\"\n attr_list.append((attr[0], str(value_of_attr)))\n\n return attr_list", "def get_info(self):\n return \"TODO !\"", "def help_help(self):\n print(\"List commands or print details about a command\")", "def info(object, spacing=10, collapse=1):\n methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print(\"\\n\".join([\"%s %s\" %\n (method.ljust(spacing),\n processFunc(str(getattr(object, method).__doc__)))\n for method in methodList]))", "def info(object, spacing=10, collapse=1):\n methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print( \"\\n\".join([\"%s %s\" %\n (method.ljust(spacing),\n processFunc(str(getattr(object, method).__doc__)))\n for method in methodList]) )", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__", "def individual_info(self, ctx: commands.Context, format: str) -> str:\n\t\tformat = self.__normalize(ctx, format)\n\t\ttip = self.formats[format]\n\t\theader_text = self.__header(format, tip)\n\t\thow_to = blockquote(tip.escaped)\n\t\tfooter_text = self.__footer(format)\n\t\treturn f\"{header_text}\\n\\n{how_to}\\n\\n{footer_text}\"", "def print_help(self):\n print self.get_help()", "def Help():\n names=api_method_dict.keys()\n names.sort()\n return ''.join(['**** ' + api_method_dict[name].__name__ + '\\n' + api_method_dict[name].__doc__ + '\\n'\n for name in names])", "def details(self):\n pass", "def do_info(self, args):\n if self.exploit is None:\n eprint(colorize('No exploit set; nothing to describe. Select an exploit with the \\'use\\' command',\n 'cyan'))\n else:\n eprint(colorize('\\n ' + self.exploit.DESCRIPTION + '\\n', 'green'))", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def extras_msg(extras):\r\n\r\n if len(extras) == 1:\r\n verb = \"was\"\r\n else:\r\n verb = \"were\"\r\n return \", \".join(repr(extra) for extra in extras), verb", "def printHelp(self,):\n print man\n return 0", "def info(msg):\n click.secho(msg, fg='blue')", "def print_result_info(self,result,filename):\n print ('File: %s' % filename)\n print ('Desc: %s' % result.description)\n print ('Version: %s' % result.version)\n print ('Arch: %s' % result.arch)\n print ('Platform: %s' % result.platform)\n print ('CPU: %s' % result.cpuarch)\n if hasattr(result,'sequence'):\n print ('Sequence: %s' % result.sequence)\n print ('Person: %s (%s)' % (result.person_name,result.person_id))\n result.print_summary()\n print('')", "def _printable(self):\n toPrint = \"Command Header. Qubit ID: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Instruction: \" + str(self.instr) + \" \"\n toPrint = toPrint + \"Notify: \" + str(self.notify) + \" \"\n toPrint = toPrint + \"Block: \" + str(self.block) + \" \"\n toPrint = toPrint + \"Action: \" + str(self.action)\n return toPrint", "def show_info(title, message):\n\n pass", "def info():\n print(\"Made using the OOP RPG game creator (c) Claire.\\n\")", "def show_man_page(self):\n print(Gstr_synopsis)", "def show_info(self): \n color= Fore.WHITE\n print(f\"\"\" {color} \nNombre: {self.name} \nRuta: {self.route }\nFecha de salida: {self.departure_date}\"\"\")\n print(\"<\"*8, \">\"*8)\n print(\"El precio por habitacion es:\")\n for key, value in self.prize.items():\n color_value= (Fore.GREEN + str(value))\n color_key= Fore.WHITE + \"Habitacion\" + \" \" + key\n print(f\"\"\" {color_key} : {color_value}$ \"\"\")\n \n print(Fore.WHITE + \"<\"*8, \">\"*8)\n for floor, info in self.floors_info.items():\n piso=(Fore.WHITE + floor)\n print(f\" {piso}:{info} \")\n \n \n print(\"<\"*8, \">\"*8)\n print(\"Capacidad por tipo de habitacion: \")\n for key, value in self.room_capacity.items():\n print(f\"Habitacion {key}: {value} personas \",\"\\t\")\n return \"\"", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def describe():", "def help():", "def describe(self):\n\n ret = []\n ret.append(\"Functional ID: %s\" % self._number)\n ret.append(\"Functional Name: %s\" % self._xc_func_name)\n ret.append(\"Attributes:\")\n ret.append(\" Name: %s\" % self._name)\n ret.append(\" Kind: %d\" % self._kind)\n ret.append(\" Family: %d\" % self._family)\n ret.append(\"Citations:\")\n for x in self._refs:\n ret.append(\" \" + x)\n\n return \"\\n\".join(ret)", "def description(self):", "def summary_string(self) -> str:", "def program_info():\n\n print(\n color.GREEN\n + color.UNDERLINE\n + color.BOLD\n + \"Program Info Center:\\n\"\n + color.END\n )\n print(\n color.UNDERLINE\n + color.BOLD\n + \"About The Program:\"\n + color.END\n + \" This program works with the Blockchain-19 protocols defined within it's respective project. Blockchain-19 is an adaptation of the cryptocurrency blockchain or the Blockchain game used for education purposes, instead relating the content on the Blockchain to COVID-19. Given patient information the program can calculate the hashes within the Blockchain, creating a solved ledger. The program offers users the option of creating a new ledger or importing a previously exported ledger.\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Necessary Patient Info:\"\n + color.END\n + \"\\n* Hospital \\n* Patient ID \\n* Current Status\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Current Patient Status Key:\"\n + color.END\n + \"\\n* A = Admitted \\n* B = Stable \\n* C = Moderate \\n* D = Severe \\n* E = Discharged \\n* F = ICU\\n\\n\"\n )", "def _print_custom(self):\n pass", "def explain(self):", "def _printable(self):\n toPrint = \"Qubit ID: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Outcome: \" + str(self.outcome) + \" \"\n toPrint = toPrint + \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint = toPrint + \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint = toPrint + \"Remote Port: \" + str(self.remote_port) + \" \"\n toPrint = toPrint + \"Datetime: \" + str(self.datetime)\n return toPrint", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def action_to_pretty_str(action) :\n raise NotImplementedError", "def tell(self):\n print('Name {}, Age {}'. format(self.name, self.age), end=\" \")", "def info(self):\n print self.id, self.type, self.xyz.get_xyz", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "def describe(self):\n return ''", "def get_help(intent, session):\n \n print(\"get_help: \", intent)\n\n text = HELP_MESSAGE\n if \"attributes\" in session and \"current_question\" in session[\"attributes\"]:\n attributes = session[\"attributes\"]\n frage_text = attributes[\"current_question\"]\n text += \"Ich wiederhole die letzte Frage: \" + frage_text\n else:\n frage_text = SPIELER_PROMPT_TEXT\n text += SPIELER_PROMPT_TEXT\n attributes = reset_attributes()\n \n attributes[\"current_question\"] = frage_text\n attributes[\"speech_output\"] = text\n attributes[\"reprompt_text\"] = frage_text\n\n return response(text, False, frage_text, attributes, card_text=clear_tags(HELP_MESSAGE)+\\\n \"\\n\" + build_card_content(attributes))", "def methodHelp(self, req, method):\n p = self.get_method(method)\n return '\\n'.join((p.signature, '', p.description))", "def metadata_print(metadata):\n\n print('{0:<10} {1}'.format('parameter', 'value'))\n for key in metadata:\n print('{0:<10} {1}'.format(key, metadata[key]))", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def print_info(case, title='Case', kolom=25):\n def print_desc(desc, val, unit):\n print('| {:<{k:d}s} | {:> {k:d}f} | {:^{k:d}s} |'.format(\n desc, val, unit, k=kolom-2))\n \n lebar = kolom*3+4\n print('='*lebar)\n print('|{:^{l:d}s}|'.format(title, l=lebar-2))\n print('='*lebar)\n print('|{:^{k:d}s}|{:^{k:d}s}|{:^{k:d}s}|'.format('description', 'value', 'unit', k=kolom))\n print('-'*lebar)\n \n L, k, q, TA, TB, nodes, dx = get_valdict(case, 'L,k,q,TA,TB,nodes,dx')\n \n print_desc('L (length)', L, 'm')\n print_desc('k (conductivity)', k, 'W/(m.K)')\n print_desc('q (heat generation)', q, 'kW/(m^3)')\n print_desc('TA (temperature at A)', TA, 'Celcius')\n print_desc('TB (temperature at B)', TB, 'Celcius')\n print_desc('nodes', nodes, '-')\n print_desc('dx (grid space)', dx, 'm')\n\n print('='*lebar)\n print()", "def print_help(self):\r\n\r\n print (\"\"\"Show data values for assignment.\r\n\r\nUsage:\r\n cat <request or table path>\r\n cat --id <assignment_id> #Where assignment_id provided by 'vers <table path>' command\r\n\r\nFormatting flags:\r\n\r\n -c or --comments - Show comments on/off\r\n -nc or --no-comments\r\n\r\n -ph or --horizontal - Print table horizontally\r\n -pa or --vertical - Print table vertically\r\n (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically:\r\n vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise)\r\n\r\n -b or --borders - Switch show borders on of off\r\n -nb or --no-borders\r\n\r\n -h or --header - Show header on/off\r\n -nh or --no-header\r\n\r\n -t or --time - Show time\r\n -nt or --no-time\r\n\r\nExamples:\r\n > cat /test/test_vars/test_table #print latest data for test_table\r\n > cat /test/test_vars/test_table::subtest #print latest data in subtest variation\r\n > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012\r\n\r\nSee also 'dump' command which is 'cat' formatted to save data to files. 'help dump'\r\n\r\n \"\"\")", "def show_info(self, handle=sys.stdout):\n pt = PrettyTable(['EntryInfo', 'Value'])\n pt.align = 'r'\n pt.align['EntryInfo'] = 'l'\n pt.align['Value'] = 'l'\n pt.float_format = '8.5'\n\n # Gather all device information, do not show private\n # information that begins with an underscore\n show_info = self.post()\n public_keys = sorted([key for key in show_info.keys()\n if not key.startswith('_')])\n for key in public_keys:\n pt.add_row([key, show_info[key]])\n\n print(pt, file=handle)", "def printDetails(self):\n print str(self.number) + \": \" + self.title\n print \"URL: \" + self.URL\n print \"domain: \" + self.domain\n print \"score: \" + str(self.score) + \" points\"\n print \"submitted by: \" + self.submitter\n print \"# of comments: \" + str(self.commentCount)\n print \"'discuss' URL: \" + self.commentsURL\n print \"HN ID: \" + str(self.id)\n print \" \"", "def __str__(self):\n s = 'hit '+str(self.hit)+'\\n'\n s+= 'states '+str(self.states)+'\\n'\n s+= 'chi2 '+str(self.chi2)\n return s", "def __str__(self):\n reprStr = 'Help Mario build Iron Man suit!'+'\\n' +'To make the ' + self._name + ',you need:'+'\\n'\n for part in self._supplies:\n reprStr = reprStr + str(part.getCount()) + ' ' + part.getData() + '\\n'\n return reprStr", "def print_mission(self): \n #Download mission from vehicle\n missionlist = self.download_mission()\n \n #Add commands\n for cmd in missionlist:\n commandline=\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (cmd.seq,cmd.current,cmd.frame,cmd.command,cmd.param1,cmd.param2,cmd.param3,cmd.param4,cmd.x,cmd.y,cmd.z,cmd.autocontinue)\n print commandline", "def info(releaser):\n click.echo(\"\\n\".join(releaser.get_info()))", "def help_show(self):\n print(\"print an instance based on the class name and id\")", "def __str__(self):\n\n descr = \"You are in the \" + self.name + \"\\n\"\n for key in self.exits:\n descr += \"You can go \" + key + \" to the \" + self.exits[key].name + \"\\n\"\n for item in self.inventory:\n descr += \"There is a \" + item.name + \" here.\\n\"\n for item in self.objects:\n descr += item.name + \" is here.\"\n return descr", "def formatted_locator_information(self):\n info = 'May not follow-up.'\n if self.may_follow_up == 'Yes':\n info = (\n '{may_sms_follow_up}\\n'\n 'Cell: {subject_cell} {alt_subject_cell}\\n'\n 'Phone: {subject_phone} {alt_subject_phone}\\n'\n '').format(\n may_sms_follow_up='SMS permitted' if self.may_sms_follow_up == 'Yes' else 'NO SMS!',\n subject_cell='{} (primary)'.format(self.subject_cell) if self.subject_cell else '(none)',\n alt_subject_cell=self.subject_cell_alt,\n subject_phone=self.subject_phone or '(none)', alt_subject_phone=self.subject_phone_alt\n )\n if self.may_call_work == 'Yes':\n info = (\n '{info}\\n Work Contacts:\\n'\n '{subject_work_place}\\n'\n 'Work Phone: {subject_work_phone}\\n'\n '').format(\n info=info,\n subject_work_place=self.subject_work_place or '(work place not known)',\n subject_work_phone=self.subject_work_phone)\n if self.may_contact_someone == 'Yes':\n info = (\n '{info}\\n Contacts of someone else:\\n'\n '{contact_name} - {contact_rel}\\n'\n '{contact_cell} (cell), {contact_phone} (phone)\\n'\n '').format(\n info=info,\n contact_name=self.contact_name or '(name?)',\n contact_rel=self.contact_rel or '(relation?)',\n contact_cell=self.contact_cell or '(----)',\n contact_phone=self.contact_phone or '(----)'\n )\n if info:\n info = ('{info}'\n 'Physical Address:\\n{physical_address}').format(\n info=info, physical_address=self.physical_address)\n return info", "def help_dump(self):\n print(DUMP)", "def print(self):\r\n self.print_avec_separateur()", "def show_data(self, ):\r\n return print('society_name : {}\\n'\r\n 'flat : {}\\n'\r\n 'house_no : {}\\n'\r\n 'no_of_members : {}\\n'\r\n 'income : {}\\n '\r\n .format(self.society_name, self.flat, self.house_no, self.no_of_members, self.income))", "def info(): # noqa: E501\n return 'do some magic!'", "def _print_matrix_info(mtrx, name):\r\n pr = lambda t: print(\"ht3_solver:\\t\" + t)\r\n pr(\"MATRIX INFO:\")\r\n pr(\"Matrix:\\t\" + name)\r\n pr(\"Description:\\t\" + str(mtrx.description))\r\n pr(\"Shape:\\t\" + str(mtrx.shape))", "def print_info(self):\r\n self.system.print_to_log(\r\n f\"{self.__class__.__name__} model: Infection probability: {self.p}, Infectious period: {self.i}, Recovery period: {self.r}.\")", "def _help_actions(self):\n actions_str = \"\"\n for (key, value) in self.actions_help.items():\n actions_str += \"command: %s\\n%s\\n\\n\" % (key, value)\n print(actions_str)\n sys.exit(0)" ]
[ "0.64891857", "0.64813703", "0.64813703", "0.6405768", "0.6331518", "0.6306247", "0.62967855", "0.6288361", "0.6245361", "0.6203289", "0.61881757", "0.617994", "0.6178175", "0.6174316", "0.6146131", "0.614331", "0.6139111", "0.61244285", "0.6111058", "0.60988563", "0.60816693", "0.6077465", "0.60719025", "0.60661864", "0.60510826", "0.60261244", "0.60195094", "0.60188246", "0.60168207", "0.60155016", "0.6003681", "0.5986204", "0.59788626", "0.5970048", "0.5963746", "0.59533393", "0.59527016", "0.5932722", "0.5932636", "0.59296435", "0.59165925", "0.59048235", "0.58858705", "0.5884582", "0.58826905", "0.58756554", "0.5871369", "0.5870589", "0.5868432", "0.5864934", "0.5862124", "0.58566844", "0.5850498", "0.5829244", "0.58248675", "0.58203214", "0.581874", "0.5801509", "0.57980055", "0.57979363", "0.57915777", "0.5790887", "0.5790156", "0.57883596", "0.57882816", "0.57844913", "0.57824874", "0.57821214", "0.5770765", "0.57707554", "0.5769467", "0.5768682", "0.5767395", "0.57662666", "0.57657516", "0.5761802", "0.57607466", "0.57592595", "0.5758391", "0.5753085", "0.57523376", "0.57523376", "0.57491463", "0.5733967", "0.57325655", "0.5730924", "0.57291734", "0.5728152", "0.57278514", "0.57243884", "0.5723412", "0.57222337", "0.57213783", "0.5717588", "0.57174015", "0.57111156", "0.5710051", "0.5709267", "0.57070273", "0.5703215" ]
0.6709204
0
Initializes the Crawler and decides which action to take based on the mode.
def __init__(self, restUrl: str, mode: CrawlMode = CrawlMode.NO, loginUrl: str = None, loginName: str = None, loginPW: str = None, furtherparams: str = None, workers: int = 10, mongoDB: Database = None, foldername: str = None, bugList: Union[List, str] = None) -> None: self.session = requests.session() self.workers = workers if loginUrl: #bugzilla user data user = loginName pw = loginPW #login process loginURL = loginUrl self.session.post(loginURL, {'Bugzilla_login': user, 'Bugzilla_password': pw}) #checks for the right ending of restUrl if restUrl[-1] != '/': restUrl += '/' #prepares URLs for crawling of bugs and comments self.bugURL = restUrl + 'bug?limit=500' + furtherparams self.commentURL = restUrl + 'bug/{}/comment' #database if given one self.mongoDB = mongoDB #foldername if given one self.folder = foldername if foldername: #creates directory self.createFolder(foldername) self.folderpath = foldername + '/' #checks on which crawl operation to execute self.decide_action(mode, bugList)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_modes(self):\n self._verify_not_using_threaded_mpm()\n\n self._init_screenshot_mode()\n self._init_debug_mode()\n self._init_webapi_cors_header()\n self.init_theme()", "def decide_action(self, mode: CrawlMode = CrawlMode.NO, bugList: Union[List, str] = None) -> None:\n # checks on which crawl operation to execute\n if mode == CrawlMode.BUG:\n self.get_all_bugs()\n elif mode == CrawlMode.COMMENT:\n if bugList:\n self.get_all_comments(bugList)\n else:\n print('Error: No buglist to be found. Please check your params and start again.')\n return\n elif mode == CrawlMode.BOTH:\n bugIDList = self.get_all_bugs()\n self.get_all_comments(bugIDList)\n elif mode == CrawlMode.CFAST:\n self.get_all_comments_mp(bugList, self.workers)\n elif mode == CrawlMode.BFAST:\n bugsIDList = self.get_all_bugs()\n self.get_all_comments_mp(bugsIDList, self.workers)\n else:\n return", "def __init__(self):\n self.modes = {}\n self.modelist = []\n self.mode = 'main'\n self.defs = {}\n events.bind(Key=self.dispatch)", "def on_init(self, mode: BacktestingMode):\n print(\"策略初始化\")\n \n if mode == BacktestingMode.TICK:\n self.load_tick(1)\n else:\n self.load_bar(10)", "def __init__(self, thread_id, depth, spider_config, url, pattern):\n\n super(CrawlUrl, self).__init__()\n\n self.thread_id = thread_id\n self.depth = depth\n self.spider_config = spider_config\n self.url = url\n self.pattern = pattern\n self.page = ''", "def __init__(self):\n self.redis = RedisClient()\n self.crawlers = [crawler_cls() for crawler_cls in crawlers_cls]", "def initialize_process():\n\n settings = Settings({'BOT_NAME': 'warnnoticebot',\n 'LOG_LEVEL': 'INFO',\n 'ITEM_PIPELINES': {'modules.pipelines.PerStateJsonlinesExportPipeline': 300},\n 'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', # This is my actual user agent when using a browser\n 'COOKIES_ENABLED': False,\n 'ROBOTSTXT_OBEY': True,\n 'DOWNLOAD_DELAY': 5.0,\n 'DEFAULT_REQUEST_HEADERS': {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n 'Upgrade-Insecure-Requests': 1}\n })\n \n process = CrawlerProcess(settings) \n\n return process", "def __init__(self):\n self._order_handlers = []\n self._target_handlers = {}\n\n self._robot = None\n self._lock = threading.Lock()", "def boot():\n\t\tcreate_project_url_dir(Spider.project_name)\n\t\tcreate_url_data(Spider.project_name, Spider.base_url)\n\t\tSpider.queue = file_to_set(Spider.queue_file)\n\t\tSpider.crawled = file_to_set(Spider.crawled_file)", "def initialize(self):\n self.actions = []\n \"*** YOUR CODE HERE\"\n #raise NotImplementedError()", "def setup_crawler(self, crawlers: List[BaseCrawler]) -> None:\n self.tasks.extend(crawlers)", "def on_init(self):\n self.write_log(\"策略初始化\")", "def on_init(self):\n self.write_log(\"策略初始化\")", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def __init__(self, urls_file_, file_spider_='no', target_format_='', ignored_links_file_='',\n allow_clean_url_='no', time_out_=60, work_path_='./',\n max_recursion_depth_=0, one_bite_='no', white_list_path_=\"\"):\n self.__urls = Crawler.__read_file(urls_file_)\n self.__file_spider = file_spider_\n self.__target_format = target_format_\n self.__allow_clean_url = allow_clean_url_\n self.__one_bite = one_bite_\n self.__white_list_path = white_list_path_\n self.__white_list = []\n\n # loads white list in beginning in case an argument was passed for it\n if self.__file_spider == 'yes' and self.__white_list_path != '':\n self.__white_list = Crawler.__read_white_list(self.__white_list_path)\n\n # link titles that should be ignored during recursions\n self.__ignored_links = Crawler.__read_file(ignored_links_file_)\n\n self.__time_out = time_out_\n self.__work_path = os.path.join(work_path_.rstrip('/')+'/', 'DATA')\n self.__recursion_max_depth = max_recursion_depth_\n self.__extensions = ['txt', 'html', 'csv', 'tsv', 'tar', 'raw']\n\n logging.info('''Crawler Has been Initialized With The Below Configurations:\n-------------------------------------------------------------------\n-urls: %s\n-file_spider: %s\n-target_format: %s\n-ignored_links_file: %s\n-allow_clean_url: %s\n-time_out: %s\n-work_path: %s\n-max_recursion_depth: %s\n-one_bite: %s\n-white_list_path: %s\n''', self.__urls, self.__file_spider, self.__target_format, self.__ignored_links,\n self.__allow_clean_url, self.__time_out, self.__work_path,\n self.__recursion_max_depth, self.__one_bite, self.__white_list_path)", "def init_downloader(self) -> None:\n raise NotImplementedError", "def kickoff(self):\n settings = Settings()\n\n # settings.set(\"USER_AGENT\", \"Test\")\n settings.set('JOBDIR', self.args.data_dir)\n self.spider = MavenDataSpider()\n\n # Wrap with crawler, configure\n crawler = Crawler(self.spider, settings)\n crawler.signals.connect(spider_closing, signal=signals.spider_closed)\n\n logger.info('Starting crawler')\n crawler.crawl(self.spider, app=self, dbsess=self.session)\n\n self.spider = crawler.spider\n self.spider.link_queue_mode = False\n if self.args.debug:\n coloredlogs.install(level=logging.DEBUG)\n\n # Keeping thread working\n reactor.run()", "def _initialize_runners_startup(self):\n if self.command_group.is_cmd0_runner():\n self._initialize_runner(self.command_group.cmd0)\n if self.command_group.is_cmd1_runner():\n self._initialize_runner(self.command_group.cmd1)\n if self.command_group.is_cmd2_runner():\n self._initialize_runner(self.command_group.cmd2)", "def do_init(self):\n\n pass", "def __init__(self, base_url, start_urls, config, helper_outfile, verbose):\n\n # setup class variables\n self.base_url = base_url\n self.config = config\n self.helper_outfile = helper_outfile\n self.verbose = verbose\n self.found_urls = set()\n self.crawled_urls = {}\n self.crawled_paths = {}\n self.param_infos = {}\n self.helper_pid = None\n self.found_cookies = []\n self.comments = {}\n self.redirects = {}\n self.driver = None\n\n # figure out domain\n parsed_url = urllib.parse.urlparse(base_url)\n self.domain = parsed_url.hostname\n self.port = parsed_url.port\n if not self.port:\n self.port = 80 if parsed_url.scheme == \"http\" else 443\n self.protocol_prefix = \"%s://\" % parsed_url.scheme\n\n # compile exclude path regexes from config\n self.exclude_paths = []\n if self.config.get(\"exclude_paths\", \"\"):\n exclude_paths_str = util.parse_as_csv(self.config.get(\"exclude_paths\", \"\"))\n for path_str in exclude_paths_str:\n self.exclude_paths.append(re.compile(path_str))\n\n # parse cookies from config\n self.cookies = {}\n for key_val_pair in self.config[\"cookie_str\"].split(\";\"):\n if not key_val_pair:\n continue\n if \"=\" not in key_val_pair:\n self.cookies[key_val_pair.strip()] = \"\"\n else:\n key, val = key_val_pair.strip().split(\"=\")\n self.cookies[key.strip()] = val.strip()\n\n # setup start urls\n self.start_urls = set([base_url])\n for url in start_urls:\n # skip paths that are excluded from crawling\n if self.exclude_paths and url.count(\"/\") > 2:\n check_str = \"/\" + \"/\".join(url.split(\"/\")[3:])\n if any(re_path.match(check_str) for re_path in self.exclude_paths):\n continue\n self.start_urls.add(url)\n self.start_urls = list(self.start_urls)\n\n # create unix socket for IPC with crawler helper\n if os.path.exists(UNIX_SOCK_ADDR):\n os.remove(UNIX_SOCK_ADDR)\n self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.socket.bind(UNIX_SOCK_ADDR)\n\n # setup selenium if it is configured to be used\n if config[\"use_selenium\"].lower() == \"true\":\n import logging\n logging.getLogger(\"seleniumwire\").setLevel(logging.ERROR)\n from seleniumwire import webdriver\n from selenium.webdriver.chrome.options import Options\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--user-agent=%s\" % self.config[\"user_agent\"])\n\n # on Linux running Selenium as root requires '--no-sandbox' option\n if os.geteuid() == 0 and sys.platform.startswith(\"linux\"):\n chrome_options.add_argument(\"--no-sandbox\")\n self.driver = webdriver.Chrome(options=chrome_options)\n\n # disallow downloads via Selenium (see https://stackoverflow.com/a/47366981)\n self.driver.command_executor._commands[\"send_command\"] = (\"POST\", \"/session/$sessionId/chromium/send_command\")\n params = {\"cmd\": \"Page.setDownloadBehavior\", \"params\": {\"behavior\": \"disallow\", \"downloadPath\": \"\"}}\n command_result = self.driver.execute(\"send_command\", params)\n\n # add cookies\n self.driver.get(self.base_url) # initial request required to add cookies\n self.driver.delete_all_cookies()\n for key, val in self.cookies.items():\n self.driver.add_cookie({\"name\": key, \"value\": val, \"domain\": self.domain})", "def __init__(self, cfg):\n\t\t\n\t\tself.menu = False\n\t\t\n\t\tself.cfg = cfg\n\t\tself.run()", "def start(self):\n self.start_spider()\n self.start_ranker()\n\n concurrent.futures.wait(self.spider_thread_futures) # wait for spiders to finish\n self.logger.info(\"Done crawling\")\n self.ranker.done_crawling.set()\n\n self.ranker.print_ranks()", "def __init__(self):\n super(Handler, self).__init__()\n logging.warning('Initializing coffeeHandler....')\n\n # get an active token and get prepared for sending request\n self.coffee_session = requests.session()", "def __init__(self, action=0):\n self.action = action", "def start(self):\n\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(self._setup())\n except KeyboardInterrupt:\n Reporter.info('Crawler stopping...')\n finally:\n loop.run_until_complete(self._close())\n\n # Next 2 lines are needed for aiohttp resource cleanup\n loop.stop()\n loop.run_forever()\n\n loop.close()", "def initialize(self):\n\n \"*** YOUR CODE HERE\"\n self.path = []\n MyAgent.customFood = None\n MyAgent.foodLeft = 0\n MyAgent.specialWalls = {}\n self.followOne = False\n if self.index == 0:\n MyAgent.finding = []\n MyAgent.finding.append(False)", "def initialize(self,init_info):\n self.action_info = init_info.actions\n return True", "def start(self):\n print \"starting to crawler qsbk's page(Enter Q or q to quit)\"\n print\n self.enable = True\n self.load_page()\n # a variabel to control counts\n nowpage = 0\n while self.enable:\n if len(self.stories) > 0:\n # get a page stories\n page_stories = self.stories[0]\n nowpage += 1\n del self.stories[0]\n # print stories\n self.print_one_story(page_stories, nowpage)", "def init(self):\n self.connect_to_switches()\n self.reset_states()", "def __init__(self, *args, **kwargs):\n super(TurntableCrawler, self).__init__(*args, **kwargs)\n\n parts = self.var(\"name\").split(\"_\")\n\n # Add the job var once job names on disk match job code names in shotgun\n self.setVar('assetName', parts[1], True)\n self.setVar('step', parts[2], True)\n self.setVar('variant', parts[3], True)\n self.setVar('pass', parts[4], True)\n self.setVar('renderName', '{}-{}-{}'.format(\n self.var('assetName'),\n self.var('variant'),\n self.var('pass')\n ),\n True\n )", "def __init__(self,url):\n self.base_url = url\n content = self._get_page_content()\n json_data = self._get_data_json(content)\n self._categories = self._get_categories(json_data)", "def initiate(self):\n\n for item in config.WEATHER_PROVIDERS[self.title]:\n self.__setattr__(item, config.WEATHER_PROVIDERS[self.title][item])\n\n # RP5 and Sinoptik have same URLs for hourly and next day weather info\n if self.title in ('RP5', 'Sinoptik'):\n self.URL_hourly = self.URL\n self.URL_next_day = self.URL\n\n self.logger = self._get_logger(self.title, self.app.args.verbosity)", "def __init__(self):\n self.SPIDER = \"spider\"", "def __init__(self):\n self.timeout = Config.conf['timeout']\n self.ctimeout = Config.conf['ctimeout']\n self.download_timeout = Config.conf['download_timeout']\n self.agent = Config.conf['http_agent']\n self.http_proxy = Config.conf['http_proxy']\n self.cache_support = False\n self.insecure = Config.conf['http_insecure']\n self._curl_exec = Config.conf['use_curl_executable']\n self._select_implementation()", "def init_modes(self):\n \n self.deleteMode = delete_Mode()\n self.commandMode = command_Mode()\n self.visualMode = visual_Mode()\n self.insertMode = insert_Mode()\n self.exMode = ex_Mode()\n self.yankMode = yank_Mode()\n self.gmodeMode = gmode_Mode()\n self.cmodeMode = cmode_Mode()\n self.rmodeMode = rmode_Mode()\n self.tmodeMode = tmode_Mode()\n self.selectionMode = selection_Mode()\n self.indentMode = indent_Mode()", "def agent_init(self):\n pass", "def crawler(self):\n\n\t\tfor page in range(self.first_page, self.last_page+1):\n\t\t\tprint(\"\\nCrawling Page \" + str(page))\n\t\t\tpage_url = self.site_url + \"?page=\" + str(page) +\\\n\t\t\t \"&index=prod_all_products_term_optimization\"\n\t\t\t\n\t\t\tself.scrape_features(page_url)", "def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()", "def init(self):\n\t\tsp_addcallback(self.sp_callback)\n\t\tself.downloader.start()", "def initialize() -> fetcher.Fetcher:\n options = fetcher.Input(\n command=\"some_cmd\", config_file=\"looker.ini\", section=\"Looker\"\n )\n return fetcher.Fetcher(options)", "def __init__(self, env, action_repeat=1):\n super().__init__(env)\n if self.env.mujoco_robot.name == \"sawyer\":\n from robosuite.controllers import SawyerIKController\n\n self.controller = SawyerIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n elif self.env.mujoco_robot.name == \"baxter\":\n from robosuite.controllers import BaxterIKController\n\n self.controller = BaxterIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n else:\n raise Exception(\n \"Only Sawyer and Baxter robot environments are supported for IK \"\n \"control currently.\"\n )\n\n self.action_repeat = action_repeat", "def __init__(self,**kwargs):\r\n self.__dict__ = dict(list(kwargs.items()) + list(self.__dict__.items()))\r\n self.driver = kwargs.get('driver', None)\r\n self.scraper_url = self.state_storage_get_prop('scraper_url') #kwargs.get('scraper_url', None)\r\n self.scraper_url = self.reformat_scraper_url()\r\n self.retry_count = kwargs.get('retry_count', HitParadeBot.DEFAULT_RETRY)\r\n self.command = kwargs.get('command', None)\r\n self.open_url = self.state_storage_get_prop('data_selectors').get('open_url', True) #kwargs.get('data_selectors', {}).get('open_url', True)\r\n self.cache_manager = kwargs.get('cache_manager', None)\r\n parser_kwargs = {'driver' : self.driver}\r\n self.parser_kwargs = kwargs\r\n try:\r\n self.default_parser = kwargs.get('default_parser', 'BeautifulSoupParser')\r\n if self.default_parser is None:\r\n self.default_parser = self.cache_manager.cache_output_component_func(kwargs.get('default_parser', 'BeautifulSoupParser'), **kwargs)\r\n except:\r\n print('exception making parser')\r\n traceback.print_exc()\r\n self.use_once = kwargs.get('use_once', False)\r\n self.use_until_failure = kwargs.get('use_until_failure', False)\r\n self.web_driver = kwargs.get('web_driver', None)\r\n self.force_refresh = kwargs.get('force_refresh', False)\r\n self.get_external_ip_addressesss = kwargs.get('get_external_ip_adressesss', None)", "def __init__(self):\n self.action_server = actionlib.SimpleActionServer(\"navigate_2D_action\",\n Navigate2DAction, self.navigate_cb)\n\n self.robot_point_sub = rospy.Subscriber(\"robot/point\", Point, self.update_robot_position)\n self.robot_current_point = None\n self.robot_goal_point = None\n self.distance_threshold = 0.35\n self.feedback_rate = rospy.Rate(1)", "def run(self) -> None:\n\n # These are set here, because user may\n # change settings (i.e. app.config['LOG_LEVEL] = 'DEBUG')\n # after instantiation\n self.logger = create_logger(self.config[\"LOG_LEVEL\"], self.config[\"LOG_FILE\"])\n\n self._check_valid_config()\n self.exporter = Exporter(self.config[\"OUT_FILE\"], self.config[\"OUT_FORMAT\"])\n self.crawler = Crawler(\n self.logger,\n self.exporter,\n wait=self.config[\"WAIT\"],\n timeout=self.config[\"TIMEOUT\"],\n concurrency=self.config[\"CONCURRENCY\"],\n max_retries=self.config[\"MAX_RETRIES\"],\n )\n\n self.logger.info(\"Starting crawler\")\n indent = \" \" * 4\n for key, val in self.config.items():\n self.logger.info(f\"{indent}{key}: {val}\")\n\n # Create a new event loop for each execution\n # Allows run() to be called multiple times\n try:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n rc = loop.run_until_complete(self.crawler.crawl(self._starting_requests))\n finally:\n loop.close()\n self.end()\n\n if rc == ReturnCode.SUCCESS:\n self.logger.info(\"Crawler ended successfully\")\n else:\n self.logger.critical(\"Crawler ended with error\")", "def __init__(self, config, processors):\n source = HackernewsStories()\n source.configure(config)\n\n super(HackernewsCrawlJob, self).__init__(source, processors)", "def __init__(self, main_url, year, folder):\n\n # logger setting\n logging.basicConfig(\n filename='crawler.log',\n level=logging.INFO,\n format='[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s'\n '- %(message)s',\n datefmt='%H:%M:%S'\n )\n\n # set up logging to console\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s '\n '%(message)s')\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('crawler_app').addHandler(console)\n\n self.logger = logging.getLogger('crawler_app')\n\n # configuration / init\n\n self.shelve_obj = None\n self.maven_url = main_url\n self.year = year\n self.file_ext = '.txt'\n self.counter = 0\n self.url_to_parse = list()\n self.list_year_month_url = list()\n self.folder = 'mailbox/'\n self.meta_file_name = self.folder + 'meta.shelve'\n self.process_folder(folder)\n\n list_url = self.parse_main_page()\n msg_year_month = self.parse_year_month_link()\n self.parse_raw_msg()", "def __init__(self, **kwargs):\n # Variables that we give through the constructor.\n # namespace\n self.n = kwargs['n']\n self.robots = [Robot(i, kwargs['displacement_xyz']) for i in range(self.n)]\n self.controllers_list = [\n 'joint_state_controller',\n 'joint1_B_controller',\n 'joint1_F_controller',\n 'joint1_L_controller',\n 'joint1_R_controller',\n 'joint2_B_controller',\n 'joint2_F_controller',\n 'joint2_L_controller',\n 'joint2_R_controller',\n 'joint3_B_controller',\n 'joint3_F_controller',\n 'joint3_L_controller',\n 'joint3_R_controller',\n 'joint4_B_controller',\n 'joint4_F_controller',\n 'joint4_L_controller',\n 'joint4_R_controller'\n ]\n for r in self.robots:\n for n in self.controllers_list[1:]:\n r.publisher_list.append(\n rospy.Publisher(r.ns + '/' + n + '/command', Float64, queue_size=1))\n\n self.all_controllers_list = []\n for r in self.robots:\n for c in self.controllers_list:\n self.all_controllers_list.append(r.ns + '/' + c)\n reset_controls_bool = True\n super(CrawlerRobotEnv, self).__init__( n=self.n, robot_name_spaces=['crawler_'+str(i) for i in range(self.n)],\n controllers_list=self.controllers_list,\n reset_controls=reset_controls_bool)\n rospy.logdebug(\"END init CrawlerRobotEnv\")", "def __init__(self, script=None, **kwargs):\n super().__init__()\n\n # The splash lua script. Provide a custom lua script to fit your use case.\n if script:\n self.LUA_SOURCE = script\n else:\n self.LUA_SOURCE = get_data(\n 'transistor',\n 'scrapers/scripts/basic_splash.lua').decode('utf-8')\n\n # after calling super().__init__(), call self.start_http_session()\n\n # ------------------ kwargs ---------------- #\n # Set these as needed in your subclass with keywords or hardcoded.\n self.baseurl = kwargs.pop('baseurl', None)\n self.searchurl = kwargs.pop('searchurl', None)\n self.crawlera_user = kwargs.pop('crawlera_user', None)\n self.name = kwargs.pop('name', None)\n self.referrer = kwargs.pop('referrer', None)\n self.user_agent = kwargs.pop('user_agent',\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/73.0.3683.86 Safari/537.36\")\n self.max_retries = kwargs.pop('max_retries', 5)\n self.http_session_timeout = kwargs.pop('http_session_timeout', (3.05, 10.05))\n self.splash_args = kwargs.pop('splash_args', None)\n self.splash_wait = kwargs.pop('splash_wait', 3.0)\n self.js_source = kwargs.pop('js_source', None)\n\n # ----- kwargs only used for testing setup ----- #\n self._test_true = kwargs.get('_test_true', False)\n self._test_page_text = kwargs.get('_test_page_text', None)\n self._test_status_code = kwargs.get('_test_status_code', None)\n self._test_url = kwargs.get('_test_url', None)\n self._test_soup_config = kwargs.get('_test_soup_config', None)\n # ----- end kwargs for testing setup ----- #\n\n # ------ flags for internal use --------- #\n # For example, if a public method on your scraper returns\n # None undesirably, switch the self._result flag to False.\n # Then, you can just delete scrape results if flagged False.\n self._result = True\n # ------- /end internal use flags -------- #\n\n # Whether we already have a valid HTTP session with the remote server\n self.http_session_valid = False\n\n # ssl._create_default_https_context = ssl._create_unverified_context\n self._crawlera_ca = get_data('transistor',\n 'scrapers/certs/crawlera-ca.crt').decode('utf-8')\n\n ssl.create_default_context(cadata=self._crawlera_ca)\n\n self.browser = SplashBrowser(\n soup_config={'features': 'lxml'},\n requests_adapters={'http://': HTTPAdapter(max_retries=self.max_retries)})\n\n self.cookies = dict_from_cookiejar(self.browser.session.cookies)\n\n # set the splash basic authorization\n self.auth = basic_auth_header(\n username=os.environ.get('SPLASH_USERNAME', 'user'),\n password=os.environ.get('SPLASH_PASSWORD', 'userpass'))\n self.browser.session.headers.update({'Authorization': self.auth})", "def __init__(self, downloader=None):\n self._ready = False\n self.set_downloader(downloader)", "def __init__(self):\n self.gdc = GdocsCrawler()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setup_start_agents = False", "def __init__(self, args, string_search, mode_search,\n page, key_search, torrent_page, domain):\n self.args = args\n self.back_to_menu = False\n self.content_page = None\n self.domain = domain\n self.elements = None\n self.found_torrents = False\n self.hrefs = []\n self.keep_search = True\n self.key_search = key_search\n self.magnet = \"\"\n self.mode_search = mode_search\n self.page = page\n self.picked_choice = False\n self.selected = \"\"\n self.string_search = string_search\n self.table = None\n self.torrent = \"\"\n self.torrent_page = torrent_page\n self.url = \"\"\n self.movieName = \"\"\n self.retries = 0", "def __init__(self, url):\n\n parser = argparse.ArgumentParser(description=\"A tool to assist in social media statistic tracking.\")\n parser.add_argument(\"username\")\n parser.add_argument(\"--searchrank\")\n self.args = parser.parse_args()\n\n self.username = self.args.username\n self.page = self.get_page(url)\n self.soup = self.get_soup()", "def prepare(self):\n self.uri = self.request.uri\n self.path = self.request.uri.split('?')[0]\n self.method = self.path.split('/')[-1]\n self.default_methods = {}\n #\n # You can use the before_handler in a local controller to\n # process your own prepare stuff.\n # a common use case is to call: self.print_debug_info().\n # which then applies only to this specific handler.\n # \n before_handler = getattr(self, \"before_handler\", None)\n print(\"calling before_handler for \" + str(self.__class__))\n if callable(before_handler):\n before_handler()", "def _start(self):\n if self._classifier is None:\n self._classifier = TFSlimClassifier(self.config)\n self._classifier.__enter__()", "def _initialise_run(self) -> None:", "def __init__(self, spider):\n\n super(SprintBehavior, self).__init__(spider)\n self.remoteContext = spider.remoteController.context", "def initialize_attributes(self):\n self.host = self.netloc\n self.url = self.geturl()\n\n self.set_scheme_if_non('https')\n \n # The file extensions we are watching for. Either load the extensions\n # from a text file, or create a seperate python file contain a list\n # supported file extensions\n self.listed_file_extensions = [ \n '.jpg', '.bmp', '.png',\n '.mp3', '.mp4', '.flv', '.avi',\n '.zip', '.7z', '.tar', '.tar.gz', '.tar.bz', '.rar',\n '.exe', '.git', '.torrent',\n ] \n # Type Boolean: True or False\n # Urls contain some useful information. Depending on the framework the \n # website is built on, a url can contain information about paths and files.\n # This is a glimpse of the sites computer system. Pretty Useful!\n self.is_file_extension = None # Does this path end as a file?\n #self.file_extension = self.check_for_file_extension()", "def __init__(self):\n rospy.init_node('approach')\n\n rospy.Subscriber('/scan', LaserScan, self.scan_callback)\n self.vel_pub = rospy.Publisher('/cmd_vel_mux/input/navi', Twist,\n queue_size=10)\n self.scan = None", "def run(self):\n \n try:\n logging.info('Thread:{} starting'.format(self.thread_id))\n\n self.crawl_url()\n self.parse_html()\n except IOError as e:\n self.thread_post_processing()\n logging.error('CrawlUrlError url:{} msg:{}'.format(self.url, e))\n\n self.thread_post_processing()", "def __init__(self, mode: str = \"\", src: str = \"\", ds: str = \"\", **fetcher_kwargs):\n\n # Facade options:\n self._mode = OPTIONS[\"mode\"] if mode == \"\" else mode\n self._dataset_id = OPTIONS[\"dataset\"] if ds == \"\" else ds\n self._src = OPTIONS[\"src\"] if src == \"\" else src\n\n _VALIDATORS[\"mode\"](self._mode)\n _VALIDATORS[\"src\"](self._src)\n _VALIDATORS[\"dataset\"](self._dataset_id)\n\n # Load data source access points:\n if self._src not in AVAILABLE_DATA_SOURCES:\n raise InvalidFetcher(\n \"Requested data fetcher '%s' not available ! Please try again with any of: %s\"\n % (self._src, \"\\n\".join(AVAILABLE_DATA_SOURCES))\n )\n else:\n Fetchers = AVAILABLE_DATA_SOURCES[self._src]\n\n # Auto-discovery of access points for this fetcher:\n # rq: Access point names for the facade are not the same as the access point of fetchers\n self.Fetchers = {}\n self.valid_access_points = []\n for p in Fetchers.access_points:\n if p == \"box\": # Required for 'region'\n self.Fetchers[\"region\"] = Fetchers.Fetch_box\n self.valid_access_points.append(\"region\")\n if p == \"wmo\": # Required for 'profile' and 'float'\n self.Fetchers[\"float\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"float\")\n self.Fetchers[\"profile\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"profile\")\n\n # Init sub-methods:\n self.fetcher = None\n if self._dataset_id not in Fetchers.dataset_ids:\n raise ValueError(\n \"%s dataset is not available for this data source (%s)\"\n % (self._dataset_id, self._src)\n )\n self.fetcher_kwargs = {**fetcher_kwargs}\n self.fetcher_options = {**{\"ds\": self._dataset_id}, **fetcher_kwargs}\n self.postproccessor = self.__empty_processor\n self._AccessPoint = None\n\n # Init data structure holders:\n self._index = None\n self._data = None\n\n # Dev warnings\n # Todo Clean-up before each release\n if self._dataset_id == \"bgc\" and self._mode == \"standard\":\n warnings.warn(\n \"'BGC' dataset fetching in 'standard' user mode is not reliable. \"\n \"Try to switch to 'expert' mode if you encounter errors.\"\n )", "def __init__(self, downloader=None):\n super(YoumakerIE, self).__init__(downloader=downloader)\n self._protocol = \"https\"\n self._category_map = None\n self._cache = {}", "def crawl(self):\n\n # create helper process and setup IPC\n self.socket.listen(1)\n help_out_fd = open(self.helper_outfile, \"w\")\n with subprocess.Popen(\"./crawl_helper.py\", stdout=help_out_fd, stderr=subprocess.STDOUT) as proc:\n self.helper_pid = proc.pid\n try:\n conn, _ = self.socket.accept()\n # create initial params for crawler helper and send them\n new_urls = set()\n setup_params = {\"start_urls\": self.start_urls, \"allowed_domains\": [self.domain],\n \"cookies\": self.cookies, \"user_agent\": self.config[\"user_agent\"]}\n ipc_operations.send_object(conn, setup_params)\n\n # loop: receive a response object, then send new URLs to crawl. Catch & handle problems.\n while True:\n try:\n proc.wait(timeout=0.001)\n break\n except subprocess.TimeoutExpired:\n response = ipc_operations.receive_object(conn)\n if not response: # socket is dead / closed\n break\n new_urls = self.process_response(response)\n ipc_operations.send_object(conn, new_urls)\n except socket.timeout:\n util.printit(\"Unix socket connection to scrapy crawler unexpectedly broke. \" +\n \"Quitting crawling of %s\" % self.base_url, color=util.RED)\n break\n finally:\n # ensure connection is closed and helper process killed in any case\n conn.close()\n proc.kill()\n\n # after the actual crawling, extract all the gathered cookies from Selenium\n if self.config[\"use_selenium\"].lower() == \"true\":\n selenium_cookies = self.driver.get_cookies()\n for cookie in selenium_cookies:\n if not any(cookie[\"name\"] == c[\"name\"] and cookie[\"path\"] == c[\"path\"] and\n cookie[\"domain\"] == c[\"domain\"] for c in self.found_cookies):\n parsed_cookie = {}\n for key in (\"name\", \"path\", \"domain\", \"httpOnly\", \"secure\"):\n parsed_cookie[key] = cookie[key]\n self.found_cookies.append(parsed_cookie)\n\n help_out_fd.close()\n return self.create_results()", "def main():\n\n from scrapy.crawler import CrawlerProcess\n from scrapy.utils.project import get_project_settings\n\n process = CrawlerProcess(get_project_settings())\n process.crawl(NCBIGeoSpider)\n process.start()", "def __init__(self, board='Gossiping', pages=1, file='tmp.json', title_lim=[], jsonf=None, copy_data=[], simple_mode=True):\n if copy_data:\n self.extend(copy_data)\n return\n os.chdir(os.path.split(os.path.realpath(__file__))[0])\n print(os.getcwd())\n com = 'scrapy crawl ptt ' if not simple_mode else 'scrapy crawl ptt_url '\n # output json file name\n com += '-o %s ' % (file)\n # page\n com += '-a pages=%d ' % (pages)\n # board\n com += '-a board=%s ' % (board)\n\n # title limit\n if title_lim:\n com += '-a title_lim=\"'\n for lim in title_lim:\n com += \"%s,\" % (str(lim))\n com += '\" '\n # not opened by json_file\n if not jsonf:\n # start crawl\n print('Command: ' + com)\n os.system('rm -f {}'.format(file))\n os.system('{}'.format(com))\n # opened by json file\n else:\n file = jsonf\n\n # all data save in self\n self.load_json(file)\n self.com = com\n self.file = file", "def init(self):\n self.dispatcher.start()\n self.replyer.start()", "def _initialize(self):\n self.send_init_command()", "def initialize(self, context):\r\n pass", "async def initialiser_crawler(self) -> Dict[str, List[req.Response]]:\n web_pages = {}\n with ThreadPoolExecutor(max_workers=NUM_WORKERS) as exe:\n try:\n loop = asyncio.get_event_loop()\n tasks = [\n loop.run_in_executor(exe, self.collect_webpages, keyword)\n for keyword in self.keywords \n ]\n for res in await asyncio.gather(*tasks):\n web_pages.update(res)\n except KeyboardInterrupt:\n loop.close()\n raise KeyboardInterrupt\n return web_pages", "def __init__(self):\n self._urls = []", "def __init__(self):\n self.setup_called = False", "def _init_browser(self):\n # Initialize the browser\n br = mechanize.Browser()\n # Ignore the robots.txt\n br.set_handle_robots(False)\n return br", "def __init__(self, url, epRange):\n self.driver = webdriver.PhantomJS()\n self.downloads = OrderedDict() # sort episodes in asending order\n self.pbar = \"\" # Download Progressbar\n self.Main(url, epRange)", "def __init__(self):\n self.actions = []", "def on_init(self):\n self.write_log(\"策略初始化\")\n\n self.load_bar(10)", "def init(self):\n\n\t\tstatus, param = self.execute(self.mission, 'on_init', self.kingdom)\n\n\t\treturn status", "def generate_crawler(self):\n \n targets = ['https://community.upwork.com/t5/Announcements/bd-p/news', \\\n 'https://community.upwork.com/t5/Freelancers/bd-p/freelancers', \\\n 'https://community.upwork.com/t5/Clients/bd-p/clients', \\\n 'https://community.upwork.com/t5/Agencies/bd-p/Agencies']\n target = None\n for tar in targets:\n if tar in sys.argv:\n target = tar\n\n #Regenerate crawler object depending on params\n if target is None:\n if '-d' in sys.argv:\n crawler = Crawler(self.webdriver, self.db, debug=True)\n else:\n crawler = Crawler(self.webdriver, self.db)\n else:\n if '-d' in sys.argv:\n crawler = Crawler(self.webdriver, self.db, debug=True, link=target)\n else:\n crawler = Crawler(self.webdriver, self.db, link=target)\n\n return crawler", "def initialize(self, context):\n pass", "def initialize(self, context):\n pass", "def initialize(self, context):\n pass", "def start_scan(self):\n\n # If no websites exist in the list\n if len(self.config.websites) == 0:\n QtWidgets.QMessageBox.question(self, 'Can Not Continue',\n \"You have not specified any URL's to scan. Try adding some!\",\n QtWidgets.QMessageBox.Ok)\n return\n\n # If no templates have been added to be tracked\n if len(self.comparer.get_template()) == 0:\n QtWidgets.QMessageBox.question(self, 'Can Not Continue',\n \"You have not added any template images for the scanner to search for. \"\n \"Try adding some!\",\n QtWidgets.QMessageBox.Ok)\n return\n\n\n self.crawler = Crawler(self.config.websites,\n self.config.search_depth,\n self.config.max_browsers,\n self.config.browser_timeout)\n self.crawler.setDaemon(True)\n\n # Disable the scan button\n self.scan_btn.setDisabled(True)\n self.settings_btn.setDisabled(True)\n self.template_btn.setDisabled(True)\n self.website_btn.setDisabled(True)\n\n # Start crawling in another thread\n self.crawler.start()\n\n # Start analyzing in a while so the browsers have time to open\n self.scan_timer.singleShot(1000, self.check_crawler)", "def open_spider(self, spider):\n pass", "def setMode(self, mode):\n\n # initialize download only flag \n self.downloadOnly = self.parent.downloadOnly\n\n # initialize download type flag\n if( self.parent.downloadType != \"\" ):\n self.download.type = self.parent.downloadType\n\n # initialize download username\n if( self.parent.downloadUser != \"\" ):\n self.download.username = self.parent.downloadUser\n\n # initialize download password \n if( self.parent.downloadPass != \"\" ):\n self.download.password = self.parent.downloadPass\n\n # initialize cleanInstall flag\n self.cleanInstall = self.parent.cleanInstall\n \n self.rebuild = self.parent.rebuild\n\n self.envcmake.update(self.parent.envcmake)\n self.makeTests = self.parent.makeTests\n\n if( mode == \"install\" ):\n\n if( not self.installSupport ):\n self.abort( \"Sorry, it is not possible to install \" \\\n + self.name + \" with this installation script!!\" )\n\n # software version\n self.version = self.__userInput\n\n # name of the tarball for wget downloads\n self.download.tarball = self.alias + \"_\" + self.version + \".tgz\"\n \n # install path\n self.installPath = self.parent.installPath + \"/\" + self.alias + \"/\" + self.version\n \n elif( mode == \"link\" ):\n \n # set link flag to true\n self.useLink = True\n\n # linkPath\n if( len( self.patchPath ) > 0 ):\n self.linkPath = fixPath( self.patchPath + \"/\" + self.alias + \"/\" + self.__userInput )\n else:\n self.linkPath = fixPath( self.__userInput )\n \n # check if installation where the link points to is ok \n self.checkInstall(True)\n \n # extract version from Path\n self.version = basename( self.linkPath )\n \n # now override installPath\n newPath = self.parent.installPath + \"/\" + self.alias + \"/\" + self.version\n self.installPath = fixPath( newPath )\n\n mode = \"use\"\n \n elif( mode == \"use\" ):\n if( self.__userInput != \"auto\" ):\n # 1st case: full path to installation is given\n self.installPath = fixPath(self.__userInput)\n # extract version from path\n self.version = basename( self.installPath )\n # 2nd case: use( Mod( \"vXX-XX\" ) is given\n if( not self.checkInstall() ):\n self.version = self.__userInput\n self.installPath = self.parent.installPath + \"/\" + self.alias + \"/\" + self.version\n # 1st and 2nd cases failed:\n if( not self.checkInstall() ):\n print 'failed to find', self.name, 'in', self.installPath\n # revert installPath back to user input\n self.installPath = fixPath(self.__userInput)\n self.version = basename( self.installPath )\n\n # check if installed version is functional, abort otherwise\n self.checkInstall(True)\n\n self.buildPath = self.installPath + '/build'\n\n self.mode = mode", "def setUp(self) -> None:\n super(ReadOnlyActionTestsMixin, self).setUp()\n\n self.request = RequestFactory().request()\n self.siteconfig = SiteConfiguration.objects.get_current()", "def __init__(self, *args, **kwargs):\n super(AlibabaCompanySpider, self).__init__(*args, **kwargs)", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(1) # 具体加载多少天的数据, 1表示1天的数据,如果是2表示过去2天的数据", "def init(self, target):\n pass", "def __init__(self, priority):\n self.robot = None\n self.priority = priority\n self.lock = Lock()\n self.action_thread = None", "def dm_setup(self):\n dispatcher.connect(\n self.dequeue_next_page_requests,\n signal=signals.spider_idle\n )\n self._was_setup_called = True", "def __init__(self, mode=\"shore\", n=2 ** 14):\n self.mode = mode\n self.lang = language.get_language()\n self.build_grid(n)\n\n if \"/\" in mode:\n self.mixed_heightmap()\n mode = mode.split(\"/\")[0]\n else:\n self.single_heightmap(mode)\n\n self.finalize()\n\n self.riverperc = riverpercs[mode] * np.mean(self.elevation > 0)\n self.place_cities(np.random.randint(*city_counts[mode]))\n self.grow_territory(np.random.randint(*terr_counts[mode]))\n self.name_places()\n self.path_cache = {}\n self.fill_path_cache(self.big_cities)", "def __init__(self, browser=settings.DEFAULT_BROWSER):\n self.browser = browser", "def __init__(self, *kwargs):\n self.session = requests.Session()\n self.config_path = os.path.join(\n os.path.dirname(__file__), 'config.json')\n self.load_config()\n if self.application_token == '':\n self.set_application_token()\n self.token = self.get_token()\n self.get_settings()", "def __call__(self):\r\n self.init_data = td.import_data(self.__module__)\r\n self.page1() # GET navigation (requests 101-153)\r\n\r\n grinder.sleep(20)\r\n self.page2() # GET case (requests 201-252)\r\n\r\n grinder.sleep(20)\r\n self.page3() # GET view (requests 301-365)\r\n\r\n grinder.sleep(20)\r\n self.page4() # POST view (requests 401-452)\r", "def initialConfig(self):\r\r\n\r\r\n loggerCmw = logging.getLogger('initialConfig')\r\r\n\r\r\n self.set_scenario()\r\r\n\r\r\n self.set_default_rf_settings()\r\r\n\r\r\n self.physical_downlink_settings()\r\r\n\r\r\n self.physical_uplink_settings()\r\r\n\r\r\n self.connection_config()\r\r\n\r\r\n self.network_settings()\r\r\n\r\r\n self.set_conn_type(conn= self.connTypeEnum.CS)\r\r\n\r\r\n self.waitForCompletion()", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "async def pre_action_init(self) -> None:", "def __init__(self):\n self.session = CommandInterface()\n while not self._login():\n self.session = CommandInterface()\n self._navigate_mainmenu(self.session.main_menu())", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(1)", "def __init__(self):\n self.log = logging.getLogger()\n XMLRepollFiles.navigate_to()\n return", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.load_bar(10)" ]
[ "0.6156874", "0.5882419", "0.5669092", "0.5625389", "0.56192595", "0.5612265", "0.55834305", "0.5568586", "0.55475867", "0.55300885", "0.5503639", "0.54796416", "0.54796416", "0.5456761", "0.53957784", "0.5347344", "0.53471684", "0.5346393", "0.53363496", "0.53115886", "0.53047734", "0.5302404", "0.5287524", "0.52838176", "0.5275271", "0.5262054", "0.5259705", "0.5257128", "0.5251591", "0.524307", "0.5242189", "0.5239346", "0.5230947", "0.5225857", "0.52202445", "0.5219075", "0.52151275", "0.5207102", "0.5203885", "0.519923", "0.51979184", "0.5194383", "0.5193841", "0.51778376", "0.5174831", "0.5167805", "0.51677245", "0.5163708", "0.514832", "0.5145216", "0.51360697", "0.51094145", "0.5106575", "0.50987715", "0.5098393", "0.508129", "0.50810874", "0.5076771", "0.5074948", "0.50686646", "0.506371", "0.50568086", "0.50453615", "0.5040698", "0.5039222", "0.5033654", "0.5019095", "0.5018606", "0.5017454", "0.50074714", "0.50035304", "0.50004137", "0.49963778", "0.49933785", "0.4989319", "0.4985429", "0.4981603", "0.49813762", "0.49813762", "0.49813762", "0.4980961", "0.4975352", "0.4970216", "0.49700996", "0.4969652", "0.49693444", "0.49657455", "0.49647093", "0.496313", "0.4959097", "0.4954515", "0.49493366", "0.49463996", "0.49428087", "0.49422693", "0.49397036", "0.49394852", "0.49288002", "0.49161747", "0.4909683" ]
0.5355751
15
Decides which action to start depending on the mode.
def decide_action(self, mode: CrawlMode = CrawlMode.NO, bugList: Union[List, str] = None) -> None: # checks on which crawl operation to execute if mode == CrawlMode.BUG: self.get_all_bugs() elif mode == CrawlMode.COMMENT: if bugList: self.get_all_comments(bugList) else: print('Error: No buglist to be found. Please check your params and start again.') return elif mode == CrawlMode.BOTH: bugIDList = self.get_all_bugs() self.get_all_comments(bugIDList) elif mode == CrawlMode.CFAST: self.get_all_comments_mp(bugList, self.workers) elif mode == CrawlMode.BFAST: bugsIDList = self.get_all_bugs() self.get_all_comments_mp(bugsIDList, self.workers) else: return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_action(self):\r\n pass", "def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()", "def startMode(self):\n raise NotImplementedError('startMode() should be implemented')", "def select_action(self):\n pass", "def process_mode(self):\n self.mode = 'process'\n self.set_action_text(self.request + ' processed')\n self.set_action_bgcolor(self._colors[self.request], alpha=1.0)\n if self.err_msg:\n self.set_action_text(self.request + ' processed' + '\\nERROR: ' +\n self.err_msg)\n self.err_msg = None\n time.sleep(1.0)\n else:\n time.sleep(0.5)\n if self.request in ('stop', 'reset', 'fail'):\n self.wait_mode()\n elif self.request == 'go':\n self.run_mode()\n self.request = None", "def request_mode(self, request):\n self.mode = 'request'\n self.request = request\n self.set_action_text(self.request + ' requested')\n self.set_action_bgcolor(self._colors[self.request], alpha=0.2)", "def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)", "def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]", "def bcp_mode_start(self, config, priority, mode, **kwargs):\n del config\n del kwargs\n self.machine.bcp.transport.send_to_all_clients('mode_start', name=mode.name, priority=priority)\n\n return self.bcp_mode_stop, mode.name", "def mode(self, mode_type: str):\r\n self._mode = mode_type.lower()\r\n self.mode_hist.append(mode_type)\r\n\r\n if self.mode_hist[-2] != mode_type and self._daq:\r\n msg = Message(\"mode\", mode_type, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)", "def mode (self, mode) :\r\n self.mode_ = mode", "def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...", "def choose_action(self, obs, **kwargs):\n pass", "def _on_mode_change(self, event_name: str, data: dict, kwargs: dict) -> None:\n mode = data[\"name\"]\n\n if data[\"state\"] == \"on\":\n self.mode_events.append(mode)\n elif mode in self.mode_events:\n self.mode_events.remove(mode)\n\n try:\n primary = max(\n (m for m in self.mode_alterations if m[\"mode\"] in self.mode_events),\n key=lambda m: m[\"priority\"],\n )\n except ValueError:\n try:\n primary = next((m for m in self.mode_alterations if m[\"mode\"] == mode))\n except StopIteration:\n return\n\n if primary[\"action\"] == \"enable\":\n primary[\"action\"] = \"disable\"\n else:\n primary[\"action\"] = \"enable\"\n\n # If the primary mode alteration prescribes an action that matches the state the\n # app is already in, return:\n if (self.enabled and primary[\"action\"] == \"enable\") or (\n not self.enabled and primary[\"action\"] == \"disable\"\n ):\n return\n\n if primary[\"action\"] == \"enable\":\n self.enable()\n else:\n self.disable()", "def startMode(self):\n return True, None", "def setMode(self,mode):\n self.mode=mode\n if self.mode==0:\n self.setDrawing()\n elif self.mode==1:\n self.setConstruction()\n elif self.mode==2:\n self.setDisplay()\n self.context.text.append(\"mode: \"+self.messages[self.mode])", "def choose_action(self, state, task=0):\n pass", "def setMode(self, targetmode):\n self.resetStream()\n\n if targetmode not in self.prompts.keys():\n raise ValueError(\"Invalid Mode %s\" % targetmode)\n\n initialmode = self.getMode()\n if targetmode == initialmode:\n logger.debug(\"In %s mode\" % targetmode)\n return True\n\n logger.debug(\"Changing mode from '%s' to '%s' on %s\" % (initialmode, targetmode, self))\n\n # Provide all permutations of mode switching\n if targetmode == CLI_MODES.config and initialmode == CLI_MODES.enable:\n self._session.sendline(\"config terminal\")\n elif targetmode == CLI_MODES.config and initialmode == CLI_MODES.shell:\n self._session.sendline(\"cli -m config\")\n elif targetmode == CLI_MODES.config and initialmode == CLI_MODES.pmx:\n self._session.sendline(\"quit\")\n elif targetmode == CLI_MODES.enable and initialmode == CLI_MODES.shell:\n self._session.sendline(\"cli -m enable\")\n elif targetmode == CLI_MODES.enable and initialmode == CLI_MODES.config:\n self._session.sendline(\"exit\")\n elif targetmode == CLI_MODES.shell and initialmode == CLI_MODES.enable:\n self._session.sendline(\"_shell\")\n elif targetmode == CLI_MODES.shell and initialmode == CLI_MODES.config:\n self._session.sendline(\"_shell\")\n elif targetmode == CLI_MODES.shell and initialmode == CLI_MODES.mysql:\n self._session.sendline(\"quit\")\n elif targetmode == CLI_MODES.pmx:\n self.setMode(CLI_MODES.config)\n self._session.sendline(\"pmx\")\n elif targetmode == CLI_MODES.mysql:\n self.setMode(CLI_MODES.shell)\n self._session.sendline(\"idbmysql\")\n elif targetmode != CLI_MODES.config and initialmode == CLI_MODES.pmx:\n # Moving from pmx to other modes. Switch to config and proceed..\n self.setMode(CLI_MODES.config)\n self.setMode(targetmode)\n self._session.sendline(\"\") # Send empty line for guessMode to work\n elif targetmode != CLI_MODES.shell and initialmode == CLI_MODES.mysql:\n # Moving from mysql to other modes. Switch to shell and proceed..\n self.setMode(CLI_MODES.shell)\n self.setMode(targetmode)\n self._session.sendline(\"\") # Send empty line for guessMode to work\n else:\n raise ValueError(\"Invalid Mode combination. Targetmode: %s, Currentmode: %s\" % (targetmode, initialmode))\n\n finalmode = self.guessMode()\n logger.debug(\"Mode changed to %s mode\" % finalmode)\n if targetmode == finalmode:\n if finalmode == CLI_MODES.shell:\n self.initShell()\n return True\n else :\n # A user can be in pmx subshells. So we might need to get back a couple levels\n if finalmode == CLI_MODES.pmx and targetmode == CLI_MODES.config:\n return self.setMode(CLI_MODES.config)\n else:\n logger.warn(\"Unable to set '%s' mode\" % targetmode)\n return False", "def mode(self, mode):\n self.set_mode(mode)", "def chooseAction(self):\n print \"nothing\"\n pass", "def setMode(self, mode):\n self.mode = mode\n if self.mode == 0:\n self.setDrawingMode()\n elif self.mode == 1:\n self.setConstructionMode()\n elif self.mode == 2:\n self.setDisplayMode()\n self.context.text.append(\"mode: \" + self.messages[self.mode])", "def mode(self, channel, target, command=\"\"):\n time.sleep(1)\n self.s.send(\"MODE %s %s%s\\n\" % (channel, target, (command and (\" \" + command))))\n logger.log(\"MODE %s %s%s\" % (channel, target, (command and (\" \" + command)))).LogSend()", "def bcp_mode_start(self, name=None, priority=0, **kwargs):\n if not name:\n return\n #todo raise error\n\n if name in self.game_modes:\n self.game_modes[name].start(priority=priority)", "def cmd_mode(args):", "def choose_action(self, *args, **kwargs):\n return NotImplementedError", "def action_type(self):", "def change_mode(self):\n return (self.mode + 1) % 2", "def route(self):\n\n mode = self.addon_args.get(\"mode\", [\"main_page\"])[0]\n\n if not mode.startswith(\"_\"):\n getattr(self, mode)()", "def choose_action(self, board):\n raise NotImplementedError", "def select_action(self, **kwargs):\n raise NotImplementedError('This method should be overriden.')", "def mode(self, value):\r\n if value != self._mode:\r\n if str(value).lower() == 'edit':\r\n if self._mode == 'read':\r\n self.stop_reading()\r\n self._mode = 'edit'\r\n self.start_editing()\r\n elif str(value).lower() == 'read':\r\n if self._mode == \"edit\":\r\n self.stop_editing(save=False)\r\n self._mode = \"read\"\r\n elif value is None:\r\n if self._mode == 'edit':\r\n self.stop_editing(save=False)\r\n elif self._mode == 'read':\r\n self.stop_reading()", "def set_current_operation_mode(self, operation_mode):\n self._current_operation_mode = operation_mode\n \"\"\"Retrieve from textual representation\"\"\"\n if self._current_operation_mode == 'Off':\n self._api._opmode = 0;\n elif self._current_operation_mode == 'Heat only':\n self._api._opmode = 1;\n elif self._current_operation_mode == 'Cool only':\n self._api._opmode = 2;\n elif self._current_operation_mode == 'Heat & Cool':\n self._api._opmode = 3; \n self._api.set()\n self.schedule_update_ha_state()", "def choose_action(self):\n\n # Set the agent state and default action\n action=None\n if len(self.action_sequence) >=1:\n action = self.action_sequence[0] \n if len(self.action_sequence) >=2:\n self.action_sequence=self.action_sequence[1:]\n else:\n self.action_sequence=[]\n return action", "def ChangeMode(self, mode):\n if mode in MODE_DICT:\n self.ImportCover(MODE_DICT[mode], layer = MODE_LAYER)", "def onClick(self):\n self.app.setActiveMode(\"start\")", "def action(self):\n current_action = self.get_script_entry()\n if current_action[\"type\"] == \"request\":\n self._handle_request(current_action)\n elif current_action[\"type\"] == \"event\":\n self._handle_event(current_action)\n elif current_action[\"type\"] == \"response\":\n self._handle_response(current_action)\n else:\n raise AttributeError(\"Wrong action type!\" +\n \" Scenario: \" + str(self._loaded_sc[\"name\"]) +\n \" Action: \" + str(self._scenario_script_cur))", "def handleModeToggle(self):\n self.filesList.changeMode(not self.autoMode)\n if self.autoMode:\n self.modeToggle.setText(\"Auto Mode\")\n self.mainWindow.setWindowTitle(\"CMAT (Manual Mode)\")\n else:\n self.modeToggle.setText(\"Manual Mode\")\n self.mainWindow.setWindowTitle(\"CMAT (Auto Mode)\")\n self.autoMode = not self.autoMode", "def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)", "def select_action(self, state):", "def set_mode(self, mode):\n if mode == 'train':\n self.net.train()\n elif mode == 'eval':\n self.net.eval()\n else:\n raise ValueError(\n \"Got invalid mode '{}'. Valid options are 'train' and 'eval'.\".format(mode))", "def mode(self) -> Mode:\n ...", "def dispatch_mode_for_channel(self, target, mode):\n channel = target[1:]\n assert channel in self.server.channels\n self.server.channels[channel].mode(self, mode)", "def mode(self):\r\n pass", "def mode(self, target, *data):\n self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)", "def setMode(self):\n if self.currentTarget != None and self.finishedAssault == 0:\n if self.isAssault == 1:\n if self.currentTarget != None:\n self.mode = 'assault'\n else:\n self.mode = 'escape'\n else:\n self.log.debug('COUNT: %s: %s TARGET-> %s' % (self.myGalaxy.count, self.name, self.currentTarget.name))\n ##self.myGalaxy.resultList.append('COUNT: %s: %s TARGET-> %s' % (self.myGalaxy.count, self.name, self.currentTarget.name))\n if ((len(self.activeWeapons) == 0 or (self.currentISP/self.myShipHull.maxISP) < 0.7)) and self.__module__ == 'anw.war.ship':\n self.mode = 'escape'\n else:\n range = funcs.getTargetRange(self.posX, self.posY, self.currentTarget.posX, self.currentTarget.posY)\n if range <= self.range:\n self.mode = 'engage'\n else:\n self.mode = 'close'\n else:\n self.mode == 'escape'\n if globals.serverMode == 0:\n self.shipsim.updateShipMode()", "def action_run(self):\n pass", "def _run_actions(self):\n\n if \"install-bento\" in self.actions:\n self._do_action_bento_setup()\n\n if \"create-tables\" in self.actions:\n self._do_action_tables_create()\n\n if \"import-ratings\" in self.actions:\n self._do_action_import_ratings()\n\n if \"import-user-info\" in self.actions:\n self._do_action_import_user_info()\n\n if \"import-movie-info\" in self.actions:\n self._do_action_import_movie_info()\n\n if \"train-item-item-cf\" in self.actions:\n self._do_action_train()\n\n if \"register-freshener\" in self.actions:\n self._do_action_register_freshener()", "def set_action_cmd(self, action):\n if self.args.snapcheck is True:\n action = \"snapcheck\"\n if self.args.check is True:\n action = \"check\"\n if self.args.snap is True:\n action = \"snap\"\n if self.args.diff is True:\n action = \"diff\"\n return action", "def get_mode(self):\n self.read(\":FUNC?\")", "def select_action(self, state):\n pass", "def set_mode(self, mode: str) -> None:\n # Not all programs are fully supported by the current\n # OpenInterface API version. The known restricitons are:\n # - The 'Calibration' and 'TightnessTest' programms cannot\n # be started through the API.\n # - The 'Dry' program does not expose all it's parameters\n # (see github.com/buchi-labortechnik-ag/openinterface_rotavapor/issues/1)\n return self.send(self.cmd.SET_MODE, mode)", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def cmd_mode (self, line):\r\n if line[1] in 'sS':\r\n # f == 'file'\r\n self.respond ('200 MODE S Ok')\r\n else:\r\n self.respond ('502 Unimplemented MODE type')", "def _on_mode_change(self, event):\n mode = event.mode\n if mode == Mode.PAN_ZOOM:\n self.panzoom_button.setChecked(True)\n elif mode == Mode.PICKER:\n self.pick_button.setChecked(True)\n elif mode == Mode.PAINT:\n self.paint_button.setChecked(True)\n elif mode == Mode.FILL:\n self.fill_button.setChecked(True)\n else:\n raise ValueError(\"Mode not recognized\")", "def main(ctx):\n\n print(\"Mode:\")", "def get_mode_parameter(mode):\n if mode == 'job':\n return 'cli'\n elif mode == 'serve':\n return 'serving'\n else:\n return mode", "def _select_action(self):\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action,\n {self.state_ph: self.state})", "def tempo_mode_switch(event):\n value = gremlin.actions.Value(event.is_pressed)\n tempo_mode_switch_container(event, value)", "def start(self):\n op = self.menu()\n self.opcoes(op)\n if op != \"q\" and op != \"w\":\n self.start()", "def choose_action(self, game_state):\n util.raise_not_defined()", "def set_mode(self, mode):\n self.mode = mode\n self.btn_mode.setText(f\"{mode.title()}\\u25BE\")\n self.state_changed()", "def init_modes(self):\n \n self.deleteMode = delete_Mode()\n self.commandMode = command_Mode()\n self.visualMode = visual_Mode()\n self.insertMode = insert_Mode()\n self.exMode = ex_Mode()\n self.yankMode = yank_Mode()\n self.gmodeMode = gmode_Mode()\n self.cmodeMode = cmode_Mode()\n self.rmodeMode = rmode_Mode()\n self.tmodeMode = tmode_Mode()\n self.selectionMode = selection_Mode()\n self.indentMode = indent_Mode()", "def pickUpActionAny(self, **kwargs):\n pose_offset = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n colour = kwargs[\"fname\"]\n self.locator.recognise_grid()\n red = self.locator.detect_colour(0, 'red')\n rospy.loginfo(\"permutation(): looking for red object: %s\" % str(red))\n blue = self.locator.detect_colour(0, 'blue')\n rospy.loginfo(\"permutation(): looking for blue object: %s\" % str(blue))\n if red[0] < blue[0]:\n colour = 'blue'\n else:\n colour = 'red'\n\n self.locator.update_pose() #get current pose of arm\n\n success = self.locator.locate(colour, pose_offset, 1)\n self.mm.loadMenu(\"actionMenu\")", "def default_action(self):\n pass", "def get_action_command(self):\n if self.action.value == \"start\":\n self.action_command = self.ServerStartSubCommand()\n else:\n self.action_command = None", "def set_mode(self, mode='List'):\r\n \r\n #If we choose list mode \r\n if mode.lower() == 'list':\r\n #First choose a list if there was no, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n self.write('OUTP1:STAT ON') #Somehow the SMA100B wants the RF to be ON for switching into list mode.\r\n self.write('SOUR1:LIST:MODE STEP') #Make Step mode in order to not automatically sweep all the frequencies\r\n self.write('SOURce1:FREQuency:MODE LIST')\r\n else:\r\n #CW and FIXed are synonyms for SMA100B\r\n self.write('SOURce1:FREQuency:MODE CW')", "def set_mode(self, mode='List'):\r\n \r\n #If we choose list mode \r\n if mode.lower() == 'list':\r\n #First choose a list if there was no, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n self.write('OUTP1:STAT ON') #Somehow the SMA100B wants the RF to be ON for switching into list mode.\r\n self.write('SOUR1:LIST:MODE STEP') #Make Step mode in order to not automatically sweep all the frequencies\r\n self.write('SOURce1:FREQuency:MODE LIST')\r\n else:\r\n #CW and FIXed are synonyms for SMA100B\r\n self.write('SOURce1:FREQuency:MODE CW')", "def openMenuHandler(self, action):\n\n button_text = action.text()\n\n if button_text == 'Open Command File':\n self.openFile()\n\n elif button_text == 'Open Scenario':\n self.openScenarioFile()", "def change_mode(self):\n master.destroy()\n os.system(\"add_mode_run.py\")", "def ChooseAction(self):\n self.lastAction = None\n self.lastState = None\n if(self.attention is None or self.attention == \"\"): return\n # find best action for the currently attended node\n actions = list(self.vi.Q[self.states.index(self.attention)])\n actionIndex = actions.index(max(actions))\n actionName = self.actions[actionIndex]\n # execute the best action for the currently attended node\n self.nodes[actionName].Activate()\n self.lastAction = actionName\n self.lastState = self.attention", "def startKuri(self):\n if self.option == 'c':\n self.useChat()\n elif self.option == 's':\n self.useSpeech()", "def decide_place(self, action):\n pass", "def competition_mode(self, on):\n pass", "def competition_mode(self, on):\n pass", "def setMode(cls, mode):\n global CURRENT_MODE\n assert isinstance(mode, cls), \"Invalid mode {}\".format(mode)\n CURRENT_MODE = mode", "def executeAction(self,**kwargs):\n try:\n action = kwargs[\"fname\"]\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n\n entries = {}\n pose_offset = 'empty'\n if action in self.bl.getAllSavedActions():\n pose_offset = self.bl.baxter_actions[str(action)]['joint_position']\n entries['Show action only'] = [self.moveBy, pose_offset]\n entries['Show pick up action'] = [self.pickUpActionColour, pose_offset]\n# entries['Add condition'] = self.addEmptyCondition\n# entries['Rename '+str(action)] = [self.renameAction, action]\n entries['Learn '+str(action)] = getattr(self.bl, 'demoAction')\n\n self.mm.addGenericMenu(\"learnMenu\", self.mm.cur_page,\"Action saved as: %s\" % (str(pose_offset)),entries)\n self.mm.loadMenu(\"learnMenu\")", "def mode_menu(self):\n menu = QtWidgets.QMenu()\n for mode in 'edit', 'view', 'split':\n act = QAction(mode.title(), self)\n\n def cb(checked, self=self, mode=mode):\n self.set_mode(mode)\n\n act.triggered.connect(cb)\n act.setCheckable(True)\n act.setChecked(mode == self.mode)\n menu.addAction(act)\n\n button = self.btn_mode\n point = button.position().toPoint() if isQt6 else button.pos() # Qt6 documentation is wrong.\n global_point = button.mapToGlobal(point)\n menu.exec_(global_point)", "def chooseGamemode(self):\n\n # Set the gamemode when user clicks a radio button\n self.GAME_MODE = self.gamemode_var.get()", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)", "def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)", "def mannequin_mode(self, event=None):\n if not self._mannequin_mode:\n self.set_action_status_message('mannequin_mode', 'requested')\n subprocess.Popen(['rosrun', 'pr2_controller_manager', \n 'pr2_controller_manager', 'stop', 'GPSPR2Plugin'], stdout=DEVNULL)\n self._mm_process = subprocess.Popen(['roslaunch',\n 'pr2_mannequin_mode', 'pr2_mannequin_mode.launch'], stdout=DEVNULL)\n self._mannequin_mode = True\n self.set_action_status_message('mannequin_mode', 'completed',\n message='mannequin mode toggled on')\n else:\n self.set_action_status_message('mannequin_mode', 'requested')\n self._mm_process.send_signal(signal.SIGINT)\n subprocess.Popen(['rosrun', 'pr2_controller_manager',\n 'pr2_controller_manager', 'start', 'GPSPR2Plugin'], stdout=DEVNULL)\n self._mannequin_mode = False\n self.set_action_status_message('mannequin_mode', 'completed',\n message='mannequin mode toggled off')", "def mode_auto(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Bot Auto Mode Set\")\n self.patrol()", "def switch(self, context):\n return", "def choose_action(self, action, params, event):\n if REQ_AND_PARAMS.get(action) != len(params):\n print(\"invalid request\")\n self.send_message(INVALID_REQ)\n return\n if action == STREAM_ACTION:\n path = choose_song(params[0])\n self.stream_song(path, event)\n elif action == LOGIN_ACTION:\n self.login_check(params[0], params[1])\n elif action == ADD_ACTION:\n self.add_check(params[0], params[1])\n elif action == DOWNLOAD_ACTION:\n self.download_song(params[0])\n elif action == PAUSE_ACTION:\n self.pause = True\n event.clear()\n elif action == UN_PAUSE_ACTION:\n self.pause = False\n event.set()\n elif action == FORWARD_ACTION:\n self.skip_q.put(FORWARD_ACTION)\n elif action == BACKWARD_ACTION:\n self.skip_q.put(BACKWARD_ACTION)\n elif action == STOP:\n self.skip_q.put(STOP)\n elif action == CREATE_PL_ACTION:\n self.create_new_pl(params)\n elif action == GET_ALL_SONGS:\n self.get_all_songs()\n elif action == GET_ALL_PLS_OF_USER:\n self.get_all_pls_of_user(params[0])\n elif action == GET_SONGS_IN_PL:\n self.get_all_songs_in_pl(params[0])\n elif action == REMOVE_SONG_FROM_PL:\n self.remove_song_from_pl(params[0], params[1])\n elif action == ADD_SONG_TO_PL:\n self.add_song_to_pl(params[0], params[1])\n elif action == UNLINK_PLAYLIST:\n self.delete_pl(params[0], params[1])", "def execute_action(self, action):\n if self.game_over or len(self.agent_locs) == 0:\n pass\n elif action.startswith(\"MOVE \"):\n direction = ORIENTATION[action[5:]]\n flip = 2 if direction == 6 else 0\n if direction < 4:\n self.execute_actions(direction + 1)\n else:\n # Relative direction. Either forward (4) or backward (6)\n direction = self.orientation ^ flip\n self.execute_actions(direction + 1)\n self.orientation ^= flip\n self.game_over = self.has_exited().any()\n elif action.startswith(\"TURN \"):\n direction = ORIENTATION[action[5:]]\n self.orientation += 2 - direction\n self.orientation %= 4\n elif action.startswith(\"FACE \"):\n self.orientation = ORIENTATION[action[5:]]\n elif action.startswith(\"TOGGLE\"):\n if len(action) > 6:\n # Toggle in a particular direction\n direction = ORIENTATION[action[7:]]\n else:\n direction = self.orientation\n self.execute_actions(direction + 5)\n elif action in (\"RESTART\", \"ABORT LEVEL\", \"PREV LEVEL\", \"NEXT LEVEL\"):\n self.game_over = action\n return 0", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def mode(self):\n return self._lift(\"mode\")", "def request_action(self):\n\n # Determine the computer player's desired action\n action = \"r\" if self._current_score < min(25, (100 - (self._total_score + self._current_score))) else \"h\"\n # Return the action\n return action", "def _sketch_mode(self):\r\n self._mode_select(1)", "def choose_action(self, board, possible_actions):\r\n pass", "def _execute_action(self, action):\n if action['type'] == 'http':\n self._execute_action_http(action)\n elif action['type'] == 'mail':\n self._execute_action_mail(action)\n elif action['type'] == 'chat':\n pass\n elif action['type'] == 'printer':\n self._execute_action_printer(action)\n elif action['type'] == 'smb':\n self._execute_action_smb(action)\n\n # Wait for a randomized interval.\n time.sleep(random.randint(1, 5))" ]
[ "0.67960316", "0.66079754", "0.63345975", "0.61656135", "0.6149419", "0.61217403", "0.6095867", "0.6050132", "0.60292625", "0.60050106", "0.5970908", "0.5939976", "0.592196", "0.59155333", "0.5853063", "0.58509487", "0.58477354", "0.5835604", "0.5818035", "0.5796116", "0.57629275", "0.575259", "0.57414037", "0.5741145", "0.5724242", "0.5722696", "0.5717563", "0.57030874", "0.56965", "0.56808144", "0.5669432", "0.5666934", "0.5656632", "0.56547785", "0.56444764", "0.56406343", "0.5620561", "0.5616701", "0.55954176", "0.55912894", "0.5585276", "0.5581446", "0.55782086", "0.5576071", "0.5560484", "0.5554388", "0.55395544", "0.55365074", "0.55361927", "0.5533722", "0.5517307", "0.55143666", "0.55143666", "0.55143666", "0.5505579", "0.5497121", "0.5466255", "0.5457007", "0.5450339", "0.5431009", "0.5425237", "0.5419112", "0.54171026", "0.54164964", "0.54033756", "0.5403009", "0.5402314", "0.53969383", "0.53969383", "0.53968906", "0.53880584", "0.53876984", "0.5373759", "0.5370497", "0.5369673", "0.5369673", "0.5368105", "0.5364833", "0.5360828", "0.53478974", "0.5347534", "0.5346066", "0.5346066", "0.5333551", "0.5332969", "0.53196275", "0.5312639", "0.53071254", "0.5302534", "0.5302534", "0.5302534", "0.5302534", "0.5302534", "0.5302534", "0.5302225", "0.5301631", "0.52960676", "0.527805", "0.52767205", "0.52701133" ]
0.59804595
10
Crawls all requested bug data and bug ids. Saves them in files (bugIDListP.pickle, bugIDList.csv, bugsData.txt ) and/or Mongo DB collections (BugIDs, BugsData) depending if they are given at initialization.
def get_all_bugs(self) -> List: #starting point offset = 0 #list for all bugs resultBugList = [] #list for bug IDs bugIDList = [] #checks if there are still results returned notEmpty = True #queries in 500 bug steps until the result list is empty while notEmpty: print("entered") #interpretation of result as list plus formatting for eval errors result = ast.literal_eval(self.session.get(self.bugURL + "&offset=" + str(offset)).text. replace('true', 'True').replace('false', 'False').replace('null', 'None'))["bugs"] #checks if the query needs to be set again with a new offset if result: resultBugList += result else: notEmpty = False #gets the ID out of all comments partList = [bug["id"] for bug in result] bugIDList += partList #sets new starting point offset += 500 #inserts bug ids and bugs into db if given one if self.mongoDB: for id in bugIDList: self.mongoDB["BugIDs"].insert_one({"ID": id}) self.mongoDB["BugsData"].insert_many(resultBugList) #creates files for bug ids and bugs if given a folder if self.folder: #saves bug list as python object with open(self.folderpath + "bugIDListP.pickle", "wb") as a: pickle.dump(bugIDList, a) #saves bug list as csv with open(self.folderpath + "bugIDList.csv", "w") as b: for id in bugIDList: b.write(str(id) + "\n") with open(self.folderpath + "bugsData.txt", "w") as c: for bug in resultBugList: c.write(str(bug) + "\n") #returns List Object for further processing return(bugIDList)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #goes through idList\n for id in tqdm(idList):\n #performs request and replaces trouble some parts\n commentsString = self.session.get(self.commentURL.format(id)).text.\\\n replace('true', 'True').replace('false', 'False').replace('null', 'None')\n #gets only the comments\n commentsDict = ast.literal_eval(commentsString)[\"bugs\"][str(id)][\"comments\"]\n\n #enters comments into db or file if there are any comments for the id\n if commentsDict:\n if self.mongoDB:\n self.mongoDB[\"Comments\"].insert_many(commentsDict)\n if self.folder:\n with open(self.folderpath + \"Bugzilla_Comments.txt\", 'a') as f:\n f.write(str(commentsDict) + \"\\n\")", "def get_bug_data(self, current_date=None):\n start_time = time.time()\n bug_data = self.web_connection.get_async_data_using_asyncio_paginated(self.bug_url, self.web_constants, 5)\n end_time = time.time()\n # print(f\"Commit data using Parallel (asyncio)\\n {commit_data}\\n\\n\")\n print(f\"Time Taken to Fetch Bug Details {end_time - start_time}\")\n\t\t\n bugs_parser = BugsJsonParser()\n bug_list_df = bugs_parser.parse_json(bug_data)\n\n if current_date is None:\n current_date = datetime.today().strftime('%Y-%m-%d')\n directory = f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}\"\n CDPConfigValues.create_directory(directory)\n bug_list_df.to_csv(\n f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}/\"\n f\"{CDPConfigValues.project_issue_list_file_name}\",\n index=False)\n else:\n bug_list_df.to_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\", index=False)\n\n return bug_list_df", "def getIssues(db, data):\n start = datetime.utcnow() # Time this and log how long refreshing took.\n try:\n cur = getRelevantIssues(db, data)\n except pymongo.errors.PyMongoError as e:\n return {\"error\": \"Error querying the Mongo database: \" +\n e.message}\n\n count = 0\n dbd_data = {\n # TODO: make sets of these to make the lookups below faster\n \"SLA\": data.get(\"SLA\", []),\n \"FTS\": data.get(\"FTS\", []),\n \"REV\": [], # Just refresh these every time\n \"UNA\": data.get(\"UNA\", []),\n \"active\": data.get(\"active\", {}),\n \"waiting\": data.get(\"waiting\", {})\n }\n\n try:\n revIssues = getREVIssues(db)\n except pymongo.errors.PyMongoError as e:\n return {\"error\": \"Error querying the Mongo database: \" +\n e.message}\n\n updated_data = {\n \"SLA\": [],\n \"FTS\": [],\n \"REV\": revIssues,\n \"UNA\": []\n }\n for i in cur:\n count += 1\n issue = SupportIssue().fromDoc(i)\n\n # Keep track of the totals:\n # --- Active issue count ---\n if issue.isActive():\n dbd_data['active'][issue.key] = 1\n elif issue.key in dbd_data['active']:\n del dbd_data['active'][issue.key]\n # --- Waiting For Customer issue count ---\n if issue.isWFC() and not issue.doc['deleted']:\n dbd_data['waiting'][issue.key] = 1\n elif issue.key in dbd_data['waiting']:\n del dbd_data['waiting'][issue.key]\n\n # For each category, see if the issue belongs, and if not, remove it\n # from the dashboard issues if it was there.\n if isSLA(issue):\n updated_data[\"SLA\"].append(trimmedSLAIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"SLA\"])\n if isFTS(issue):\n updated_data[\"FTS\"].append(trimmedFTSIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"FTS\"])\n if isUNA(issue):\n updated_data[\"UNA\"].append(trimmedUNAIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"UNA\"])\n\n mergeAndSortIssues(dbd_data, updated_data)\n\n duration = datetime.utcnow() - start\n logger.info(\"getIssues took {0}, count: {1}\".format(duration, count))\n return dbd_data", "def get_bugs(self, year):\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n for bug in helpers.load_json(path):\n yield bug", "def get_event_data(self, ):\n \n if os.path.exists(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\"):\n self.bug_data_frame = pd.read_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\")\n else:\n self.bug_data_frame = self.get_bug_data()\n self.closed_bug_data_frame = self.bug_data_frame[self.bug_data_frame['STATE'] == 'closed']\n self.closed_bug_data_frame = self.closed_bug_data_frame.reset_index()\n\n self.event_data_frame = self.closed_bug_data_frame[[\"ISSUE_ID\", \"CREATED_TIMESTAMP\", \"UPDATED_TIMESTAMP\"]]\n\n \"\"\"Fetch the Bug Id's from the data frame\"\"\"\n list_of_issues = self.closed_bug_data_frame['ISSUE_ID'].tolist()\n\n \"\"\"using the Bugs Id list create event url list\"\"\"\n url_list = Utilities.format_url(self.event_url, list_of_issues)\n start_time = time.time()\n\n results = self.web_connection.get_async_data_using_asyncio(url_list, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size)\n\n list_of_buggy_commits = results[0]\n failed_urls = results[1]\n loop_counter = 1\n\n while len(failed_urls) > 0:\n time.sleep(60 * loop_counter)\n print(f\"Total Failed URL's re-trying {len(failed_urls)}\")\n results = self.web_connection.get_async_data_using_asyncio(failed_urls, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size // 2)\n failed_urls = results[1]\n list_of_buggy_commits = list_of_buggy_commits + results[0]\n end_time = time.time()\n print(\"Parallel time taken to get all event data using (asyncio) =\", end_time - start_time)\n\n list_of_buggy_commits = pd.DataFrame(list_of_buggy_commits, columns=[\"ISSUE_ID\", \"JSON_RESPONSE\"])\n list_of_buggy_commits['ISSUE_ID'] = list_of_buggy_commits['ISSUE_ID'].astype(str)\n self.event_data_frame['ISSUE_ID'] = self.event_data_frame['ISSUE_ID'].astype(str)\n self.event_data_frame = pd.merge(self.event_data_frame, list_of_buggy_commits, how=\"left\",\n left_on=[\"ISSUE_ID\"],\n right_on=[\"ISSUE_ID\"])\n\n self.event_data_frame.to_csv(f\"{self.cdp_dump_path}/github_events_cdp_dump.csv\", encoding='utf-8-sig',\n index=False)\n event_parser = EventsJsonParser()\n event_parser.find_buggy_commits_based_on_repository_fixes(self.web_constants, self.event_data_frame,\n f\"{self.cdp_dump_path}/\"\n f\"{CDPConfigValues.closed_events_list_file_name}\")", "def __init__(self, restUrl: str,\n mode: CrawlMode = CrawlMode.NO,\n loginUrl: str = None,\n loginName: str = None,\n loginPW: str = None,\n furtherparams: str = None,\n workers: int = 10,\n mongoDB: Database = None,\n foldername: str = None,\n bugList: Union[List, str] = None) -> None:\n\n self.session = requests.session()\n\n self.workers = workers\n\n if loginUrl:\n #bugzilla user data\n user = loginName\n pw = loginPW\n\n #login process\n loginURL = loginUrl\n self.session.post(loginURL, {'Bugzilla_login': user, 'Bugzilla_password': pw})\n\n #checks for the right ending of restUrl\n if restUrl[-1] != '/':\n restUrl += '/'\n\n #prepares URLs for crawling of bugs and comments\n self.bugURL = restUrl + 'bug?limit=500' + furtherparams\n self.commentURL = restUrl + 'bug/{}/comment'\n\n #database if given one\n self.mongoDB = mongoDB\n\n #foldername if given one\n self.folder = foldername\n if foldername:\n #creates directory\n self.createFolder(foldername)\n self.folderpath = foldername + '/'\n\n #checks on which crawl operation to execute\n self.decide_action(mode, bugList)", "def run(self):\n if self.parsed_args.fetch_cache:\n issues = self.backend.fetch_from_cache()\n else:\n issues = self.backend.fetch(from_date=self.from_date)\n\n try:\n for issue in issues:\n obj = json.dumps(issue, indent=4, sort_keys=True)\n # self.outfile.write(issue['url']+\"\\n\")\n self.outfile.write(obj)\n self.outfile.write('\\n')\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(str(e.response.json()))\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n if self.backend.cache:\n self.backend.cache.recover()\n raise RuntimeError(str(e))", "def save_bugs(self, year, chunk, bugs, errors=None):\n directory = self.get_bugs_path(year)\n return self._save(directory, chunk, bugs, errors, switch='bugs')", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def parse_log_and_populate_db(self,start_issue,stop_issue):\n \n for issue_num in range(start_issue,stop_issue+1):\n try:\n police_log=PoliceLog.objects.get(issue_number__exact=issue_num)\n except PoliceLog.DoesNotExist:\n #\n # if issue doesn't exist in db, then go to next issue\n pass\n else:\n if len(police_log.filename)>0:\n #\n # in order to parse log file, must have filename\n L=self.parse_log(police_log.filename,\n police_log.pub_date.year)\n\n else:\n L=[]\n \n #\n # add each report to db\n for report in L:\n #\n # hash string is digest of (issue_number, crime category, original text)\n # this should ensure a unique hash\n hasher = hashlib.md5()\n hasher.update(str(police_log.issue_number))\n hasher.update(report['category'])\n hasher.update(report['original_text'])\n\n crime=CrimeReport(hash=hasher.hexdigest(),\n policelog=police_log, # foreign key: specify police log object\n category=report['category'],\n original_text=report['original_text'],\n line_num=report['line_num'],\n address=report['address'],\n map_scale=report['map_scale'],\n date=datetime.date(report['date_year'],\n report['date_month'],\n report['date_day']))\n \n\n # add lat-long coordinates to crime report\n (lat,long)=self.geocoder.geocode(crime.address,crime.map_scale)\n crime.lat=lat\n crime.long=long\n\n crime.save()", "def do_the_issues(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp:\n path = os.path.join(tmp, \"{}_{}_issues.txt\".format(repo_id, user_id))\n issues_initial_url = get_initial_url_issues(user_id, repo_id)\n resp_obj = requests.get(issues_initial_url, headers=headers)\n # prase the initial request. for Issue\n all_issues = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n print(\"the len of resp is {}\".format(len(all_issues)))\n LINK_HEADER = \"Link\"\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n # parse next page (if present)\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n # subsequent page\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n all_issues = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))\n print(\"the issues path is \" + str(path))", "def main():\n\n database = MongoDbUtil('ro').database()\n\n tag = 'Px1id'\n daemons = ['daq_files_watcher', 'jobs_validator', 'submitter']\n colls = ['%s_%s'%(coll, tag) for coll in daemons]\n\n datas = []\n for daemon, coll in zip(daemons, colls):\n last_doc = database[coll].find().skip(database[coll].count()-1)[0]\n accum_stats = last_doc['accum_stats']\n\n vals = {}\n timestamps = []\n for key in accum_stats.keys():\n vals[key] = []\n\n for doc in database[coll].find():\n timestamps.append(doc['date'])\n for key in vals:\n vals[key].append(doc['accum_stats'][key])\n\n urls = []\n for key in vals:\n urls.append(draw(timestamps, vals[key], daemon, key))\n\n datas.append({'title': daemon, 'urls': urls})\n\n make_index_file(tag, datas)", "def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues", "def bugs_to_csv(promptArgs=False):\n username = os.environ['USER']\n cachedir = \"/tmp/\" + username + \"/.cache/.launchpadlib\"\n anon_or_auth = 'anon_'\n\n if promptArgs is False:\n launchpad = Launchpad.login_anonymously(\n 'anonymously', 'production', cachedir, version='devel')\n\n # Clear credentials if they already existed and check for X11 forwarding\n elif promptArgs is True:\n def no_credential():\n print(\"Can't proceed without Launchpad credential.\")\n sys.exit()\n\n if os.path.isfile(cachedir+'/auth.txt'):\n os.remove(cachedir+'/auth.txt')\n\n try:\n os.environ['DISPLAY']\n except KeyError:\n raise ValueError('X11 Disabled (or) and DISPLAY Variable unset')\n\n launchpad = Launchpad.login_with(\n 'authorize', 'production', cachedir,\n credentials_file=cachedir + '/auth.txt',\n credential_save_failed=no_credential, version='devel')\n anon_or_auth = 'authorized_'\n else:\n raise ValueError(\"Prompt argument was not a boolean\")\n\n # Try to get bugs and place to csv file, if stopped midway or finishes,\n # delete authentication credentials (if used)\n try:\n project = launchpad.projects['starlingx']\n\n bugs = project.searchTasks(status=ALL_STATUSES, omit_duplicates=False)\n currentDate = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\n \"\"\"\n file_n = 'launchpad-bugs-' + anon_or_auth + currentDate + '.csv'\n print('Destination file is: ' + WORK_DIR + '/' + file_n)\n\n if os.path.isfile(DEST_DIR + file_n):\n updateFileQuestion = \"File Exists, do you want to overwrite it?\"\n overWriteFile = query_yes_no(updateFileQuestion, \"no\")\n if overWriteFile is False:\n raise ValueError(\"Overwrite existing file is False\")\n with open(DEST_DIR + file_n, 'wb') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(fieldnames)\n for each_bug in bugs:\n bugInfoString = get_bug_info_tuple(each_bug)\n writer.writerow(bugInfoString)\n \"\"\"\n # create my workbook\n workbook_filename = 'stx_lp_workbook-' + anon_or_auth + currentDate + '.xlsx'\n workbook = xlsxwriter.Workbook(WORK_DIR + \"/\"+ workbook_filename)\n print \"Start writing LP data to worksheets according to the tag ......\"\n worksheet_dict = {}\n row_dict = {}\n worksheet_dict['all_open'] = workbook.add_worksheet('all_open')\n worksheet_dict['all_open'].write_row(0, 0, list(fieldnames))\n row_dict['all_open'] = 1\n for tag in targetTags:\n worksheet_dict[tag] = workbook.add_worksheet(tag)\n worksheet_dict[tag].write_row(0, 0, list(fieldnames))\n row_dict[tag] = 1\n for each_bug in bugs:\n bugInfoStringList = get_bug_info_tuple(each_bug)\n worksheet_dict['all_open'].write_row(row_dict['all_open'], 0, bugInfoStringList)\n row_dict['all_open'] += 1\n for tag in targetTags:\n if tag in each_bug.bug.tags:\n worksheet_dict[tag].write_row(row_dict[tag], 0, bugInfoStringList)\n row_dict[tag] += 1\n bugId = str(each_bug.bug.id)\n row = row_dict[tag]\n print \"writting LP \" + bugId + \" at row #\" + str(row) + \" into sheet: \" + tag\n workbook.close()\n # old mehtod: totally loop number of tags * number of bugs\n # not efficient enough, so commented those lines out\n \"\"\"\n for tag in targetTags:\n worksheet = workbook.add_worksheet(tag)\n # with each of sheets (named under targeted tag),\n # loop all bugs to find the bug with such a tag,\n # and wite the bug one by one into this sheeet.\n row = 0;\n worksheet.write_row(row, 0, list(fieldnames))\n print worksheet\n #worksheet = stx_lp_workbook.get_worksheet_by_name(tag)\n for each_bug in bugs:\n if tag in each_bug.bug.tags:\n row += 1\n bugId = str(each_bug.bug.id)\n bugInfoString = get_bug_info_tuple(each_bug)\n worksheet.write_row(row, 0, bugInfoString)\n print \"writting LP \" + bugId + \" at row #\" + str(row+1) + \" into sheet: \" + tag\n\n print \"Complete writting worksheets and closed workbook!\"\n workbook.close()\n \"\"\"\n except BaseException as e:\n print e.message, e.args\n\n finally:\n workbook.close()\n if anon_or_auth == 'authorized_':\n os.remove(cachedir + '/auth.txt')", "def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\\\n num_of_workers=8):\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n\n # uses a pool of 'curl' workers\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\\\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, \\\n ((output_dir, saved_location, save_to_path, bug, curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)", "def fetch_cases():\n logger.info(\"Start fetching cases\")\n fb = fogbugz.FogBugz(\n settings.AUTH_FOGBUGZ_SERVER,\n settings.FOGBUGZ_TOKEN)\n release_query = ' OR '.join('milestone:\"{0}\"'.format(release.number) for release in Release.objects.all())\n resp = fb.search(\n q='({0}) AND ({ciproject}:\"*\")'.format(release_query, ciproject=settings.FOGBUGZ_CI_PROJECT_FIELD_ID),\n cols='sTitle,sOriginalTitle,sFixFor,dtFixFor,sProject,sArea,dtLastUpdated,tags,' +\n settings.FOGBUGZ_CI_PROJECT_FIELD_ID\n )\n cases = resp.findAll('case')\n logger.info('Found %s cases to fetch from fogbugz', len(cases))\n for case_xml in cases:\n update_case_from_fogbugz.apply_async(kwargs=dict(case_id=int(case_xml.attrs['ixbug'])))\n logger.info(\"Task finished\")", "def run(self):\n comment_df_list = []\n post_df_list = []\n subreddit_df_list = []\n\n reddit = sr.reddit_interface()\n subreddits = reddit.subreddits.popular(limit = SUBREDDIT_LIMIT) # Lists the top 50 subreddits\n\n for subreddit in subreddits:\n top_posts = reddit.subreddit(str(subreddit)).top()\n for post in top_posts:\n if not post.stickied:\n post_list = [post.id, str(post.subreddit), post.title, post.num_comments]\n post.comments.replace_more(limit = 0)\n for comment in post.comments.list():\n comment_list = [str(comment.parent()), comment.id, comment.body, int(comment.score)]\n comment_df_list.append(comment_list)\n post_df_list.append(post_list)\n subreddit_df_list.append([str(subreddit)])\n\n comment_df_list = pd.DataFrame(comment_df_list, columns = COMMENTS_COLUMNS)\n post_df_list = pd.DataFrame(post_df_list, columns = POSTS_COLUMNS)\n subreddit_df_list = pd.DataFrame(subreddit_df_list, columns =['Subreddit'])\n reddit_df = [subreddit_df_list, post_df_list, comment_df_list]\n sr.save_xlsx(reddit_df, self.output().path)", "def bug_map(self):\n bug_map = defaultdict(list)\n # obtain bug_id/test_id mapping\n with MongoConnection(self.host, self.port) as mongo:\n collection = mongo.connection[\"kdetector\"][\"dataset\"]\n dataset = collection.find()\n for data in dataset:\n bug_id, test_id = data[\"bug_id\"], data[\"test_id\"]\n bug_map[bug_id].append(test_id)\n return bug_map", "def init():\n\n for subreddit in SUBREDDITS:\n\n writer = csv.writer(open(\"./{}-submissions.csv\".format(subreddit),\n \"w\", newline=\"\", encoding=\"utf-8\"))\n\n # Adding the header.\n writer.writerow([\"datetime\", \"author\", \"title\", \"url\", \"domain\"])\n\n print(\"Downloading:\", subreddit)\n download_submissions(subreddit=subreddit)\n writer.writerows(SUBMISSIONS_LIST)\n\n SUBMISSIONS_LIST.clear()", "def get_defectdojo_findings(filename):\n\n acunetix_scan_report = get_acunetix_scan_report(filename)\n defectdojo_findings = []\n for report_item in acunetix_scan_report.ReportItems:\n defectdojo_finding = dict()\n\n if \"Affects\" in report_item:\n affects = (\" ({})\".format(report_item['Affects']))\n else:\n affects = \"\"\n defectdojo_finding['title'] = \"{}{}\".format(report_item['Name'], affects)\n defectdojo_finding['date'] = acunetix_scan_report.StartTime\n defectdojo_finding['cwe'] = report_item['CWEId']\n defectdojo_finding['url'] = acunetix_scan_report.StartURL\n defectdojo_finding['severity'] = report_item['Severity']\n defectdojo_finding['description'] = get_html2text(report_item['Description'])\n if \"Details\" in report_item and len(report_item['Details'].strip()):\n defectdojo_finding['description'] += \"\\n**Details:**\\n{}\\n\".format(report_item['Details'])\n if \"TechnicalDetails\" in report_item and len(report_item['TechnicalDetails'].strip()):\n defectdojo_finding['description'] += \"\\n**Technical Details:**\\n{}\\n\".format(report_item['TechnicalDetails'])\n defectdojo_finding['mitigation'] = get_html2text(report_item['Recommendation'])\n defectdojo_finding['impact'] = get_html2text(report_item['Impact'])\n defectdojo_finding['references'] = ''\n for ref in report_item['ReferencesURLs']:\n defectdojo_finding['references'] += \"{}\\n\".format(ref)\n defectdojo_finding['false_p'] = report_item['IsFalsePositive']\n\n finding = DefectDojoFinding(**defectdojo_finding)\n defectdojo_findings.append(finding)\n\n return defectdojo_findings", "def __load_bugs(self):\n bugs = []\n with open(self.reffile(), 'rb') as reffile:\n reader = csv.reader(reffile, delimiter=';', quotechar='\\n')\n for line in reader:\n bugs.append(tuple(map(int, line)))\n return bugs", "def scrape_data(self):\n ## OPEN EMPTY CSV FILE\n self.write_into_csv()\n \n ## READ POSTCODES\n postcodes = self.read_postcodes()\n\n for postcode in postcodes:\n\n sleeptime = round(random.uniform(0.5, 1.0), 2)\n time.sleep(sleeptime)\n \n self.get_url_response(postcode)\n\n ## WRITE DATA INTO CSV FILES\n atms = [v for k, v in self.ATMS.items( )] \n if atms:\n self.write_into_csv(atms, 'atm')\n\n branches = [v for k, v in self.BRANCHES.items()]\n if branches:\n self.write_into_csv(branches, 'brc')", "def crawlDocuments(docIds, skipIssns):\n rootLog = logging.getLogger('')\n successCount = 0\n consecErrorCount = 0\n fileLogHandler = None\n for i, docIdTuple in enumerate(docIds):\n docId, srcDir = docIdTuple\n removeLocks()\n checkCreateLock(srcDir)\n if fileLogHandler is not None:\n rootLog.handlers.remove(fileLogHandler)\n fileLogHandler = pubGeneric.logToFile(join(srcDir, 'crawler.log'))\n todoCount = len(docIds) - i\n logging.info('--- Crawling document with ID %s, dir %s (%d IDs left)' % (docId, srcDir, todoCount))\n webCache.clear()\n try:\n artMeta = getArticleMeta(docId)\n except pubGetError:\n writeDocIdStatus(srcDir, docId, 'no meta', '')\n continue\n\n logging.info('Got Metadata: %s, %s, %s' % (artMeta['journal'], artMeta['year'], artMeta['title']))\n try:\n checkIssnErrorCounts(artMeta, skipIssns, srcDir)\n paperData = crawlOneDoc(artMeta, srcDir)\n writePaperData(docId, artMeta, paperData, srcDir)\n consecErrorCount = 0\n successCount += 1\n except pubGetError as e:\n consecErrorCount += 1\n docId = artMeta['pmid']\n writeDocIdStatus(srcDir, docId, e.logMsg, e.longMsg, e.detailMsg)\n issnYear = getIssnYear(artMeta)\n issnYearErrorCounts[issnYear] += 1\n if e.logMsg not in ('noOutlinkOrDoi', 'unknownHost', 'noLicense'):\n waitSec = ERRWAIT * consecErrorCount\n logging.debug('Sleeping for %d secs after error' % waitSec)\n time.sleep(waitSec)\n if consecErrorCount > MAXCONSECERR:\n logging.error('Too many consecutive errors, stopping crawl')\n e.longMsg = 'Crawl stopped after too many consecutive errors / ' + e.longMsg\n raise\n if DO_PAUSE:\n raw_input('Press Enter to process next paper...')\n except:\n raise\n\n logging.info('Downloaded %d articles' % successCount)\n removeLocks()\n if fileLogHandler != None:\n rootLog.handlers.remove(fileLogHandler)\n return", "def push_current_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n now = datetime.datetime.utcnow().strftime(DATE_FORMAT)\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n\n # Each issue fetched is being generated with our schema.\n for issue in jira_issues:\n try:\n jira_dict = jira_obj_to_dict(issue, now)\n defect = create_defect(jira_dict, issue)\n defects.append(defect)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(issue.key, e))\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n\n return post_defects(project, jira_issues, defects)", "def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues", "def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n results = {}\n hits_limit = 500\n start_at = 1\n counter = 0\n while True:\n url = create_url(keyword, hits_limit, start_at, api_key)\n records = get_records_from_url(url)\n total_results = get_total_hits(records)\n records = split_records(records)\n records_on_page = len(records)\n if records_on_page == 0:\n break\n else:\n for record in records:\n counter += 1\n id_no = extract_id_number(record)\n processed_dict = {'ID': id_no, 'problem': []}\n processed_record = parse_record(\n record, processed_dict, log)\n if id_no not in results:\n results[id_no] = processed_record\n if counter % 100 == 0:\n print(\"Processed {} out of {}\".format(\n counter, total_results))\n start_at += hits_limit\n time.sleep(THROTTLE)\n print(\"[{}] : fetched {} records to {}.\".format(\n keyword, len(results), filename))\n save_data(results, filename)", "def update_issue_tracker():\n # Only process flakes that happened at least MIN_REQUIRED_FLAKY_RUNS times in\n # the last 24 hours.\n for flake in Flake.query(Flake.count_day >= MIN_REQUIRED_FLAKY_RUNS,\n projection=[Flake.count_day]):\n logging.info('Created processing task for %s' % flake.key)\n taskqueue.add(queue_name='issue-updates',\n url='/issues/process/%s' % flake.key.urlsafe())", "def main():\n\n conn = psycopg2.connect(**env.DATABASE)\n cursor = conn.cursor()\n\n for file, city in env.supported_cities().items():\n try:\n data = add_metadata(parse_html(city, get_html(city)))\n save_data_to_db(cursor, data, file.title())\n except Exception as e:\n print(\"Failed to scrape '%s': %s\" %(city, e))\n print(traceback.format_exc())\n\n conn.commit()\n conn.close()", "def exportToDB(self, submissions):\n for p in range(len(submissions)):\n for x in range(len(submissions[p])):\n doc_ref = self.fs_db.collection(u'reddit').document(str(submissions[p][4]))\n doc_ref.set({\n u'content': str(submissions[p][0]),\n u'upvote_ratio': str(submissions[p][1]),\n u'score': submissions[p][2],\n u'title': submissions[p][3],\n u'id': submissions[p][4],\n u'total_awards_received': submissions[p][5],\n u'created_utc': submissions[p][6]\n })", "def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n # Extract bug type\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n # Get whether or not the bug was reproduced\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n # Skip the 'Attempted to reproduce' line if exists\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n # Populate the sequence of requests that made the bug\n while line and not line.startswith(BUG_START):\n seq += self._get_request(line)\n line = file.readline()\n # Add the bug sequence to the bug list\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(\"Failed to read bug log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException", "def get_data(self):\n\n all_data = OrderedDict()\n projects = [Path(proj) for proj in glob(str(self.data_path.joinpath(\"*\"))) if Path(proj).is_dir()]\n\n for project in projects:\n files = []\n \n # Read all csv files and save them as a list in files\n for ver in glob(str(project.joinpath(\"*.csv\"))):\n files.extend(pd.read_csv(ver, usecols=['time', 'buggy']).values.tolist())\n \n # Create a pandas dataframe from the csv sorted by datetime\n df = pd.DataFrame(files, columns=['Time', 'Bugs']).sort_values(by='Time').reset_index(drop=True)\n \n # Convert time to Pandas DateTime format\n df['Time'] = pd.to_datetime(df['Time']) \n \n # Group bug counts by week starting on monday\n df = df.reset_index().set_index('Time').groupby(\n [pd.Grouper(freq='W-MON')])[\"Bugs\"].sum().astype(int).reset_index()\n \n df = df.set_index('Time')\n # Save the data to dictionary\n all_data.update(OrderedDict({project.name: df}))\n\n return all_data", "def main():\n exit_if_already_started()\n while True:\n for timeframe in ['all', 'month', 'week']:\n subreddits = load_list('subs.txt')\n while subreddits:\n # Grab all images/comments from sub, remove from list\n parse_subreddit(subreddits.pop(0), timeframe)", "def get_posts(self):\r\n\r\n sub_dict = {\r\n 'selftext': [], 'title': [], 'id': [], 'sorted_by': [],\r\n 'num_comments': [], 'score': [], 'ups': [], 'downs': []}\r\n csv = f'{self.sub}_posts.csv'\r\n\r\n # Attempt to specify a sorting method.\r\n sort, subreddit = self.set_sort()\r\n\r\n # Set csv_loaded to True if csv exists since you can't\r\n # evaluate the truth value of a DataFrame.\r\n df, csv_loaded = (pd.read_csv(csv), 1) if isfile(csv) else ('', 0)\r\n\r\n print(f'csv = {csv}')\r\n print(f'After set_sort(), sort = {sort} and sub = {self.sub}')\r\n print(f'csv_loaded = {csv_loaded}')\r\n\r\n print(f'Collecting information from r/{self.sub}.')\r\n\r\n for post in subreddit:\r\n\r\n # Check if post.id is in df and set to True if df is empty.\r\n # This way new posts are still added to dictionary when df = ''\r\n unique_id = post.id not in tuple(df.id) if csv_loaded else True\r\n\r\n # Save any unique posts to sub_dict.\r\n if unique_id:\r\n sub_dict['selftext'].append(post.selftext)\r\n sub_dict['title'].append(post.title)\r\n sub_dict['id'].append(post.id)\r\n sub_dict['sorted_by'].append(sort)\r\n sub_dict['num_comments'].append(post.num_comments)\r\n sub_dict['score'].append(post.score)\r\n sub_dict['ups'].append(post.ups)\r\n sub_dict['downs'].append(post.downs)\r\n sleep(0.1)\r\n\r\n new_df = pd.DataFrame(sub_dict)\r\n\r\n # Add new_df to df if df exists then save it to a csv.\r\n if 'DataFrame' in str(type(df)) and self.mode == 'w':\r\n pd.concat([df, new_df], axis=0, sort=0).to_csv(csv, index=False)\r\n print(\r\n f'{len(new_df)} new posts collected and added to {csv}')\r\n elif self.mode == 'w':\r\n new_df.to_csv(csv, index=False)\r\n print(f'{len(new_df)} posts collected and saved to {csv}')\r\n else:\r\n print(\r\n f'{len(new_df)} posts were collected but they were not '\r\n f'added to {csv} because mode was set to \"{self.mode}\"')", "def populate_bugstates(session, abbrev=False, commit_interval=None):\n\n # This code is really, really tricky. I don't know how I could have written it more clearly.\n # Maybe it should have been broken up more into subroutines.\n\n bugs = utils.open_data_file('All_bugs.csv')\n bugs.readline()\n bzids = [line.split(',')[0] for line in bugs]\n\n # XXX: There are 27 bugs in All_bugs.csv that are not in the DB for semi-obscure reasons (history not available)\n missing_f = utils.open_data_file('missing.txt')\n missing_ids = [line.strip() for line in missing_f]\n missing_f.close()\n\n if abbrev:\n bzids = bzids[:4] # This number just keeps going up as I incrementally debug\n bugs.close()\n\n # All Months\n months = MonthSet.from_session(session)\n niters = 0\n for bzid in bzids:\n if bzid in missing_ids:\n print \"Skipping missing id \" + bzid\n continue\n # I don't know how I got away for so long without casting this...\n bzid = int(bzid)\n\n # Skip any ids already in the db\n if session.query(BugState).filter_by(bugid=bzid).count():\n #print \"Skipping existing bug with id \" + str(bzid)\n continue\n\n niters += 1\n\n if commit_interval and niters % commit_interval == 0:\n session.commit()\n\n try:\n bp = bs.BugPage(bzid)\n except bs.ForbiddenBugException:\n print \"Skipping forbidden bug \" +str(bzid)\n continue\n base = BugState.from_bugpage(bp)\n\n base_bug = session.query(Bug).filter_by(bzid=base.bugid).scalar()\n\n reported = base_bug.reported\n # Is this even right? There won't always be an appropriate month to use\n base.month = months.monthafter(reported)\n history = bs.BugHistory(bzid)\n\n # Add duplicate events, which we have to construct ourself (they're not in the history)\n history.events += duplicate_deltas(bp)\n #print \"DON'T FORGET TO PUT THIS BACK IN\"\n \n history.sort(True)\n state = base.copy() # Holds the current state of the bug\n # The first month when this bug exists. (For our purposes) (This is in fact\n # the first month following the month containing the date the bug\n # was reported. BugStates describe the bug at the BEGINNING of the month.)\n first_month_index = months.after(reported)\n try:\n # XXX: I guess history is in reverse chrono order?\n last_change = history[0].date\n except IndexError:\n # If there are no changes, then the last change was the creation of the bug\n last_change = base_bug.reported\n\n # This is the index into months at which changes stop happening to the bug\n first_stagnant_month_index = months.after(last_change)\n # For each month where nothing further happens, we can just save the final bug state\n\n for month in months[first_stagnant_month_index:]:\n\n state.month = month\n session.add(state)\n # Need to do this every time, or they'll have identical ids\n state = state.copy()\n\n # OFF BY ONE ERRORS ARE THE DEVIL\n # -1 means the last element in a Python list. Do the math.\n # This actually fucks us on BOTH indices. Jesus.\n if first_stagnant_month_index == 0:\n continue\n terminus = first_month_index-1 if first_month_index else None\n\n # Find the index of the \"first\" (last chronologically) event that falls in the range\n # of months, and initialize our first delta correspondingly\n # Also, apply and then throw away all later deltas\n history_index = 0\n delta = None\n last_month = months[first_stagnant_month_index-1]\n first_month = months[first_month_index]\n for (index, event) in enumerate(history):\n history_index = index\n if event.date > last_month.last:\n state.apply_delta(event)\n elif event.date < first_month.first:\n # We skipped right through the relevant time period\n delta = event\n break\n else:\n # We've just hit on the first event we want to consider\n delta = event\n break\n else:\n # Ah, the rare \"else on for loop\" construct in the wild! Execute if we didn't break.\n # In this case, there are no deltas left\n history_index += 1\n assert len(history) == history_index\n\n\n\n for month in months[first_stagnant_month_index-1:terminus:-1]:\n while history_index < len(history) and month.first <= delta.date <= month.last:\n\n state.apply_delta(delta)\n history_index += 1\n if history_index == len(history):\n break\n delta = history[history_index]\n\n state.month = month\n session.add(state)\n state = state.copy()\n\n session.commit()\n\n if not abbrev:\n session.commit()", "def fetch_data():\n log = logging.getLogger(__name__)\n log.info('Checking data files...')\n if not os.path.isfile('CGN.txt'):\n params_cgn = {\n 'institute.code': ['NLD037'],\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n cgn = GenesysParser(params_cgn)\n cgn.fetch2json('CGN.txt')\n log.info('CGN data has been saved.')\n else:\n log.info('CGN data file already exists.')\n\n if not os.path.isfile('USDA.txt'):\n params_usda = {\n 'institute.code': usda_all,\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n usda = GenesysParser(params_usda)\n usda.fetch2json('USDA.txt')\n log.info('USDA data has been saved.')\n else:\n log.info('USDA data file already exists.')", "def test_save_historical_submission_comments():\n data = []\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n for item in threads:\n data.append(item.d_)\n\n save_historical_submission_comments(data, TEST_SUBREDDIT + '_TEST.csv')", "def process_all():\n\tconfilepath = check_args()\n\tif confilepath != \"\": #check arguments and sets some global variables \n\t\tconfig = read_conf(confilepath) #read config-file\n\t\tinst = get_hgf_institute(config) #check which hgf-institute\n\t\tbuild_or_remove_fielddesc(config) #create/delete fielddescriptors (fields + marctags)\n\t\tinsert_repnr_fielddesc(inst) #report number as hidden input in submit \n\t\tbuild_or_remove_doctypes(config,inst) #create/delete doctypes\n\t\tbuild_or_remove_schema(config) #create/delete collections for submit form\n\t\tgenerate_css(fieldlabels,inst) #create css_file \n\telse: pass", "def run(self):\n\n for url in self.urls:\n try:\n # Use requests to retrieve web page data\n print(url)\n response = session.get(url, ) # allow_redirects=True)\n\n if response.status_code != 200:\n print('Failed to retrieve page, URL: {0}, error: {1}\\n'.format(url, response.status_code))\n return\n\n # Get web page data from HTML response\n content = get_json_data(response.text)\n\n # Compile data into dictionary to be used for reporting\n summary_data = generate_report(content)\n\n # Generate/print report\n print_report(summary_data)\n\n except Exception as error:\n print('Scraper failed to run for URL {0}, error: {1}, {2}\\n'.format(\n url, type(error).__name__, error\n ))\n\n # time.sleep(1) # for load concerns", "def flush(self):\n\n # save ddocs\n all_ddocs = self.all_docs(startkey=u\"_design\", endkey=u\"_design/\\u9999\", include_docs=True)\n ddocs = []\n for ddoc in all_ddocs:\n doc = ddoc['doc']\n old_atts = doc.get('_attachments', {})\n atts = {}\n for name, info in old_atts.items():\n att = {}\n att['content_type'] = info['content_type']\n att['data'] = self.fetch_attachment(ddoc['doc'], name)\n atts[name] = att\n\n # create a fresh doc\n doc.pop('_rev')\n doc['_attachments'] = resource.encode_attachments(atts)\n\n ddocs.append(doc)\n\n # delete db\n self.server.delete_db(self.dbname)\n\n # we let a chance to the system to sync\n times = 0\n while times < 10:\n if self.dbname in self.server:\n break\n time.sleep(0.2)\n times += 1\n\n # recreate db + ddocs\n self.server.create_db(self.dbname)\n self.bulk_save(ddocs)", "def test_04_add_crash_document(self):\n # get applications\n r = requests.get('%s/settings/applications' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n application_key = re.search('<td>Key</td>.*<td>(.*)</td>', r.content).group(1)\n\n # add a crash\n f = open('./tests/CrashTester-CrashReport.json', 'r')\n r = requests.post('%s/api/v1/crash/%s' % (self.url, application_key), files={'reports': f})\n f.close()\n self.assertEqual(r.status_code, 200)\n result = json.loads(r.content)\n self.assertTrue(result['success'])\n\n # get crash groups\n r = requests.get('%s/crash_groups' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n crash_detail_uri = re.search('href=\"(/crash_groups/\\d+/.+)\" ', r.content).group(1)\n\n # get the crash\n r = requests.get('%s%s' % (self.url, crash_detail_uri), cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n self.assertRegexpMatches(r.content, 'user@gmail.com')", "def main():\r\n windows_driver = '/mnt/c/Users/kurtrm/Documents/bin/chromedriver.exe'\r\n browser = Chrome(executable_path=windows_driver)\r\n\r\n url = 'https://www.pcta.org/discover-the-trail/' \\\r\n 'thru-hiking-long-distance-hiking/2600-miler-list/'\r\n\r\n browser.get(url)\r\n year_range = range(1952, 2018) # Range of years of recorded thru-hikes\r\n\r\n for year in year_range:\r\n select = Select(browser.find_element_by_id('year'))\r\n select.select_by_value(str(year))\r\n time.sleep(1.5)\r\n miler_list = browser.find_elements_by_css_selector('td')\r\n if miler_list[0].text != 'No records found for the selected year.':\r\n people = extract_names(miler_list, year)\r\n load_mongo_db('pct', 'completions', people)", "def scrape_all():\n\n # Scrape team information by season\n for team in scrape_utils.team_names():\n team_season_stats(team)\n # Each season\n print(team)\n for year in range(2019, 2020):\n # Game Logs\n season_game_logs(team, year)\n\n # Starting Lineups\n #player_scraper.get_starting_lineups(team, year)\n\n # Init mongo to get game IDS for box score scraping\n m = mongo.Mongo()\n\n # Game Information (Box Score and Play by Play)\n for year in range(2015, 2020):\n player_scraper.get_starting_lineups(year)\n for game in m.find('game_log', {'season': year}, {'_id': 1}):\n #team_scraper.play_by_play(game['_id'])\n player_scraper.player_box_score(game['_id'])\n\n print(game['_id'])\n\n\n\n # Get player information\n for player in scrape_utils.get_active_players():\n print(player)\n player_scraper.player_per_game(player)\n\n # Get betting lines (By Year) need from 2014\n for year in range(2015, 2020):\n team_scraper.betting_lines(2019)", "def save_to_db(self, collect_results):\n\n logger.debug(f'saving {len(collect_results)} report files to database')\n cihpc_mongo = db.CIHPCMongo.get_default()\n\n results = list()\n for item in collect_results:\n\n # save logs first\n if item.logs and item.items:\n log_ids = cihpc_mongo.files.insert_many(item.logs).inserted_ids\n logger.debug(f'inserted {len(log_ids)} files')\n item.update(log_ids)\n\n # insert rest to db\n if item.items:\n results.append(cihpc_mongo.reports.insert_many(item.items))\n logger.debug(f'inserted {len(results)} reports')\n return results", "def bugs(self,active_only=True,name_only=False):\n\n q=\"SELECT {},{},{},{},{},{},{} FROM {} \".format(\n BugDB.NAME_COLUMN,\n BugDB.STEPS_COLUMN,\n BugDB.XB_COLUMN,\n BugDB.OB_COLUMN,\n BugDB.ASS_COLUMN,\n BugDB.CREATED_DATE_COLUMN,\n BugDB.FIXED_COLUMN,\n BugDB.BUG_TABLE,\n\n )\n params=[]\n if active_only:\n q+=\"\"\"\n WHERE {} IS NOT ?\n\"\"\".format(BugDB.FIXED_COLUMN)\n \n params.append(1)\n q+=\" ORDER BY ROWID\"\n with self.cxn:\n cur=self.cxn.cursor()\n for row in cur.execute(q,params): \n name=row[\"bug_name\"]\n if name_only:\n yield name\n else:\n bug={}\n for k in row.keys():\n bug[k]=row[k]\n yield bug", "def crawl_database():\n\n LOGGING.push(\"Attempting to request featured games.\")\n participants = get_featured()\n LOGGING.push(\"Got @\" + str(len(participants)) + \"@ participants.\")\n\n # NOTE: Only 40 summoners can be requested at a time\n participants = random.sample(participants, min(40, len(participants)))\n\n ids = SESSION.get_ids(participants)\n search_players = [ids[player]['id'] for player in ids.keys()]\n\n LOGGING.push(\n \"Now attempting to crawl players with a breadth of @\" +\n str(BREADTH) + \"@ and depth of ^\" + str(DEPTH) + \"^.\"\n )\n\n # NOTE: Creates the original call stack to crawl players\n for player in search_players:\n crawl_player(player, DEPTH, BREADTH)\n\n LOGGING.push(\"Finished crawling database.\")", "def fetch_data(args):\n logger.debug(\"Running the fetch_data function\")\n\n #Loading the config\n with open(os.path.join(\"Config\",\"config.yml\"), \"r\") as f:\n config = yaml.safe_load(f)\n\n #Starting the scraping process\n tstart = datetime.datetime.now()\n err_count = 0\n\n logger.info(\"Starting web scraping now.\")\n for i in range(config[\"fetch_data\"][\"indices\"][\"start\"], config[\"fetch_data\"][\"indices\"][\"end\"]+1):\n try:\n time.sleep(1)\n req_link1 = \"http://www.gutenberg.org/cache/epub/\" + str(i) + \"/pg\" + str(i) + \".txt\"\n response1 = requests.get(req_link1)\n \n req_link2 = \"http://www.gutenberg.org/files/\" + str(i) + \"/\" + str(i) + \"-0.txt\"\n response2 = requests.get(req_link2)\n \n response1.encoding = \"UTF-8\"\n response2.encoding = \"UTF-8\"\n \n if response1.status_code == 200:\n with open(config[\"fetch_data\"][\"save_location\"] + str(i) + \".txt\", \"w\", encoding=\"UTF-8\") as text_file:\n text_file.write(response1.text)\n \n elif response2.status_code == 200:\n with open(config[\"fetch_data\"][\"save_location\"] + str(i) + \".txt\", \"w\", encoding=\"UTF-8\") as text_file:\n text_file.write(response2.text)\n \n else:\n err_count = err_count + 1 \n logger.error(\"Status Code {} returned for index {}\".format(response.status_code, i))\n \n if i % 500 == 0:\n time.sleep(30)\n logger.info(\"At Index {}. Time Elapsed: {}\".format(i, datetime.datetime.now()-tstart)) \n\n except Exception as e:\n logger.error(e)\n \n logger.info(\"Total Errorred documents: {}\".format(err_count))\n logger.info(\"Total Successful documents: {}\".format(config[\"fetch_data\"][\"indices\"][\"end\"] - config[\"fetch_data\"][\"indices\"][\"start\"] + 1 -err_count))\n logger.info(\"Total Time taken: {}\".format(datetime.datetime.now()-tstart))\n\n return", "def main():\n global collection\n #args = argparse.ArgumentParser()\n #args.add_argument('directory', help='Directory in which the files'\n #'are stored.')\n #args.add_argument('collection', help='The collection to use.')\n #parser = args.parse_args()\n collection = get_collection()\n #documents = glob.glob('*.asm')\n documents = collection.find()\n num_cores = multiprocessing.cpu_count()\n print('Running code on %d processors' % num_cores)\n Parallel(n_jobs=num_cores)(\\\n delayed(save_comments)(doc) for doc in documents)", "def list(self, request):\n # Get all game records from the database\n bugs = Bug.objects.all()\n\n # Support filtering bugs by type\n # http://localhost:8000/bugs?type=1\n type = self.request.query_params.get('type', None) #pylint: disable=redefined-builtin\n if type is not None:\n bugs = bugs.filter(type__id=type)\n \n # Support filtering bugs by creator\n # http://localhost:8000/bugs?creator=1\n creator = self.request.query_params.get('creator', None)\n if creator is not None:\n bugs = bugs.filter(creator__id=creator)\n \n\n # Support filtering bugs by owner\n # http://localhost:8000/bugs?owner=1\n owner = self.request.query_params.get('owner', None)\n if owner is not None:\n bugs = bugs.filter(owner__id=owner)\n \n\n serializer = BugSerializer(\n bugs, many=True, context={'request': request})\n return Response(serializer.data)", "def _worker():\n try:\n logger.info('Looping...')\n temp_list = []\n for file in ['data_unfcc.csv','data_ebal.csv']:\n temp_list.append(os.path.isfile(file))\n if not all(temp_list):\n print('Starting from scratch...')\n download_source()\n create_database()\n create_index()\n\n time_mod = datetime.strptime(time.ctime(os.stat('data_ebal.csv').st_mtime),'%a %b %d %H:%M:%S %Y')\n time_now = datetime.now()\n\n if (time_now - time_mod).seconds > 3600:\n download_source()\n get_updated_records('unfcc')\n get_updated_records('ebal')\n create_index()\n except Exception as e:\n logger.warning('Main Loop error')", "def save_historical_submission_comments(list_of_dictionary_submissions, file_name):\n all_comments_list = []\n submission_count = 1\n\n for submission_dict in list_of_dictionary_submissions:\n print('saving comments from submission', submission_count, '/', len(list_of_dictionary_submissions))\n submission_count += 1\n submission = (REDDIT.submission(id=submission_dict['id']))\n\n submission.comments.replace_more(limit=None)\n for comment in submission.comments.list():\n temp_dict = {'body': comment.body, 'comment_id': comment, 'author': comment.author,\n 'created_utc': comment.created_utc, 'permalink': comment.permalink,\n 'link_id': comment.link_id, 'score': comment.score}\n all_comments_list.append(temp_dict)\n print('total comments: ', len(all_comments_list))\n\n comments_df = pd.DataFrame(all_comments_list, columns=['body', 'comment_id', 'author', 'created_utc',\n 'permalink', 'link_id', 'score'])\n\n print(comments_df)\n\n print('saving comments to file:', file_name, '...')\n comments_df.to_csv(file_name)\n print('done.')", "def run(self):\n\n # TODO: Logic to get data, enforce request limits, and filter out duplicates", "def files_to_submissions(self):\n url = \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-latest-data-source-details.csv\"\n self.parse_file(url)", "def create_raw_data():\r\n for csv_file in glob.glob(raw_loc + 'ticket_data/PRR_*'):\r\n filestring =os.path.basename(csv_file)\r\n index_start = 1\r\n j = 0\r\n start = dt.datetime.now()\r\n print('{} file started at {}'.format(filestring, start.strftime(\"%H:%M\")))\r\n df = pd.read_csv(csv_file, encoding = 'utf-8', parse_dates = ['Tick Issue Date'])\r\n df = df.rename(columns = {c: c.replace(' ', '') for c in df.columns})\r\n try:\r\n df.to_sql('raw_ticket_data', con = conn, if_exists='append')\r\n except:\r\n print('File read error')\r\n\r\n\r\n print ('{} file finished in {:03.2f} minutes '.format(filestring, (dt.datetime.now()-start).seconds / 60))", "def main():\n # group_id = get_group_id() This would be used if I had\n # the appropriate privileges\n group_id = 15000022833\n setup_logger()\n ticket_ids = get_newhire_tickets(group_id)\n for ticket_id in ticket_ids:\n update_ticket_info(ticket_id)", "def push_historic_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n last_upload = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + relativedelta(weekday=SA(-1))\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n for issue in jira_issues:\n try:\n created = datetime.datetime.strptime(issue.fields.created, DATE_FORMAT)\n jira_dict = jira_obj_to_dict(issue, datetime.datetime.utcnow().strftime(DATE_FORMAT))\n\n historic_data = []\n # Last Friday of the report ran\n report_date = last_upload\n while(report_date > created):\n jira_dict = jira_for_date(jira_dict, issue.changelog, report_date)\n historic_data.insert(0, create_defect(jira_dict, issue))\n report_date -= datetime.timedelta(weeks=1)\n defects.append(historic_data)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(jira_dict[\"key\"], e))\n logger.exception(\"Exception\")\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n defects_as_list = []\n for defect in defects:\n defects_as_list.extend(defect)\n return post_defects(project, jira_issues, defects_as_list)", "def get_events_data_for_scheduler(self, current_date, previous_bug_df, previous_closed_events_df):\n self.bug_data_frame = self.get_bug_data()\n self.closed_bug_data_frame = self.bug_data_frame[self.bug_data_frame['STATE'] == 'closed']\n\n self.closed_bug_data_frame = self.closed_bug_data_frame.reset_index()\n\n self.closed_bug_data_frame = self.closed_bug_data_frame[\n ~(self.closed_bug_data_frame.ISSUE_ID.isin(previous_bug_df.ISSUE_ID))]\n\n if len(self.closed_bug_data_frame) != 0:\n self.event_data_frame = self.closed_bug_data_frame[[\"ISSUE_ID\", \"CREATED_TIMESTAMP\", \"UPDATED_TIMESTAMP\"]]\n\n \"\"\"Fetch the Bug Id's from the data frame\"\"\"\n list_of_issues = self.closed_bug_data_frame['ISSUE_ID'].tolist()\n\n \"\"\"using the Bugs Id list create event url list\"\"\"\n url_list = Utilities.format_url(self.event_url, list_of_issues)\n start_time = time.time()\n\n results = self.web_connection.get_async_data_using_asyncio(url_list, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size)\n\n list_of_buggy_commits = results[0]\n failed_urls = results[1]\n loop_counter = 1\n\n while len(failed_urls) > 0:\n time.sleep(60 * loop_counter)\n print(f\"Total Failed URL's re-trying {len(failed_urls)}\")\n results = self.web_connection.get_async_data_using_asyncio(failed_urls, self.web_constants,\n CDPConfigValues.git_api_batch_size // 2)\n failed_urls = results[1]\n list_of_buggy_commits = list_of_buggy_commits + results[0]\n\n end_time = time.time()\n print(\"Parallel time taken to get all event data using (asyncio) =\", end_time - start_time)\n\n list_of_buggy_commits = pd.DataFrame(list_of_buggy_commits, columns=[\"ISSUE_ID\", \"JSON_RESPONSE\"])\n list_of_buggy_commits['ISSUE_ID'] = list_of_buggy_commits['ISSUE_ID'].astype(str)\n self.event_data_frame['ISSUE_ID'] = self.event_data_frame['ISSUE_ID'].astype(str)\n self.event_data_frame = pd.merge(self.event_data_frame, list_of_buggy_commits, how=\"left\",\n left_on=[\"ISSUE_ID\"],\n right_on=[\"ISSUE_ID\"])\n\n self.event_data_frame.to_csv(\n f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}/github_events_cdp_dump.csv\",\n encoding='utf-8-sig', index=False)\n event_parser = EventsJsonParser()\n event_df = event_parser.find_buggy_commits_based_on_repository_fixes(self.web_constants,\n self.event_data_frame)\n\n event_df = pd.concat([event_df, previous_closed_events_df], ignore_index=True)\n\n return event_df\n\n else:\n return None", "def build_comment_database_pipeline(subreddit, max):\n data_file_name = subreddit + '_30_months_comments'\n cleaned_file_name = data_file_name + '_cleaned'\n standardized_file_name = cleaned_file_name + '_standardized'\n vader_file_name = standardized_file_name + '_vader'\n flair_file_name = vader_file_name + '_flair'\n ibm_tone_file_name = flair_file_name + '_tones'\n\n # get historical data\n comment_data = get_historical_submissions(subreddit, max)\n\n # save to csv\n save_historical_submission_comments(comment_data, data_file_name + '.csv')\n\n # sanitize characters\n print('sanitizing characters')\n sanitize_characters(data_file_name + '.csv', cleaned_file_name + '.csv')\n\n # standardize comments\n generic_run_standardize_comments(cleaned_file_name + '.csv', standardized_file_name + '.csv')\n\n # add vader sentiment scores\n generic_run_vader_sentiment_scores(standardized_file_name + '.csv', vader_file_name + '.csv')\n\n # add flair sentiment score\n add_flair_sentiment_to_csv(vader_file_name + '.csv', flair_file_name + '.csv')\n\n # add ibm tones\n # add_tone_columns_to_csv(flair_file_name + '.csv', ibm_tone_file_name + '.csv')", "def process(self):\n for user in self.repos:\n for repo in self.repos[user]:\n self.process_issues(user, repo)", "def stage6(self):\r\n dbutils = DBUtils()\r\n fits_images_list = glob.glob('/GARUDATA/IMAGING24/CYCLE24/*/FITS_IMAGE/*PBCOR*.FITS')\r\n # fits_images_list = ['/GARUDATA/IMAGING17/CYCLE17/4575/17_024_04NOV09/FITS_IMAGE/A3376-W.GMRT325.SP2B.PBCOR.FITS']\r\n # fits_images_list = ['/GARUDATA/IMAGING17/CYCLE17/4572/17_024_03NOV09/FITS_IMAGE/A3376-E.GMRT325.SP2B.PBCOR.FITS']\r\n counter = 1\r\n for fits_file in fits_images_list:\r\n counter += 1\r\n # fits_file = '/GARUDATA/IMAGING19/CYCLE19/5164/19_085_27DEC10/FITS_IMAGE/1445+099.GMRT325.SP2B.PBCOR.FITS'\r\n\r\n fits_dir = os.path.dirname(fits_file)\r\n\r\n fits_table = fits.open(fits_file)\r\n fits_header = fits_table[0].header\r\n\r\n data_keys = {}\r\n\r\n object = os.path.basename(fits_file).split('.')[0]\r\n # object = \"A3376_E\"\r\n\r\n # summary_file = glob.glob(fits_dir + '/spam_A3376-E*.summary')\r\n summary_file = glob.glob(fits_dir + '/spam_' + object + '*.summary')\r\n rms = \"NA\"\r\n for each_summary in summary_file:\r\n if 'DONE' in open(each_summary).read():\r\n # print each_summary\r\n lines = open(each_summary).readlines()\r\n rms = lines[-1].split(' ')[-5]\r\n # print rms\r\n else:\r\n # print \"Needs to be deleted\"\r\n if rms == \"NA\":\r\n log_file = each_summary.replace('summary', 'log')\r\n lines = open(log_file).readlines()\r\n rms = lines[-2].split(' ')[0]\r\n if rms == \"NA\":\r\n rms = 2.11\r\n\r\n print(fits_file)\r\n\r\n if \"CYCLE24\" in fits_file:\r\n dir_path = os.path.dirname(os.path.dirname(fits_file))\r\n observation_no = glob.glob(dir_path+\"/*.obslog\")[0].split('/')[-1].split('.')[0]\r\n print(observation_no)\r\n else:\r\n observation_no = fits_file.split('/')[4]\r\n\r\n columnKeys = {\r\n \"project_id\"\r\n }\r\n\r\n if observation_no == 'MIXCYCLE':\r\n mix_path = fits_file.split('/')[4]+'/'+fits_file.split('/')[5]\r\n mix_sql = \"select observation_no from projectobsno where file_path like '%\"+mix_path+\"%'\"\r\n mix_cycle_data = dbutils.select_gadpu_query(mix_sql)\r\n observation_no = mix_cycle_data[0][0]\r\n\r\n whereKeys = {\r\n \"observation_no\": observation_no\r\n }\r\n print(columnKeys, whereKeys)\r\n project_id = dbutils.select_from_table(\"projectobsno\", columnKeys, whereKeys, 0)\r\n print(project_id)\r\n if project_id:\r\n columnKeys = {\r\n \"das_scangroup_id\",\r\n \"ltacomb_file\"\r\n }\r\n whereKeys = {\r\n \"project_id\": project_id,\r\n }\r\n result = dbutils.select_from_table(\"ltadetails\", columnKeys, whereKeys, 0)\r\n\r\n print(result)\r\n print(result[1])\r\n\r\n sql = \"select ant_mask, band_mask, calcode, chan_width, corr_version, g.observation_no, \" \\\r\n \"date_obs, ddec, dec_2000, dec_date, dra, lsr_vel1, lsr_vel2, lta_time, \" \\\r\n \"net_sign1, net_sign2, net_sign3, net_sign4, num_chans, num_pols, onsrc_time, \" \\\r\n \"proj_code, qual, ra_2000, ra_date, rest_freq1, rest_freq2, sky_freq1, \" \\\r\n \"sky_freq2, source, sta_time from das.scangroup g inner join \" \\\r\n \"das.scans s on s.scangroup_id = g.scangroup_id \" \\\r\n \"where s.scangroup_id = \" + str(result[1]) + \" AND source like '\" + object + \"'\"\r\n scangroup_data = dbutils.select_scangroup_query(sql)\r\n\r\n # print(scangroup_data)\r\n\r\n if scangroup_data:\r\n data_keys = {\r\n \"ANTMASK\": scangroup_data[0],\r\n \"BANDMASK\": scangroup_data[1],\r\n \"CALCODE\": scangroup_data[2],\r\n \"CHANWIDT\": scangroup_data[3],\r\n \"CORRVERS\": scangroup_data[4],\r\n \"OBSNUM\": scangroup_data[5],\r\n \"DATEOBS\": str(scangroup_data[6]),\r\n \"DDEC\": scangroup_data[7],\r\n \"DEC2000\": scangroup_data[8],\r\n \"DECDATE\": scangroup_data[9],\r\n \"DRA\": scangroup_data[10],\r\n \"LSRVEL1\": scangroup_data[11],\r\n \"LSRVEL2\": scangroup_data[12],\r\n \"LTATIME\": scangroup_data[13],\r\n \"NETSIGN1\": scangroup_data[14],\r\n \"NETSIGN2\": scangroup_data[15],\r\n \"NETSIGN3\": scangroup_data[16],\r\n \"NETSIGN4\": scangroup_data[17],\r\n \"NUMCHANS\": scangroup_data[18],\r\n \"NUMPOLS\": scangroup_data[19],\r\n \"ONSRCTIM\": scangroup_data[20],\r\n \"PROJCODE\": scangroup_data[21],\r\n \"QUAL\": scangroup_data[22],\r\n \"RA2000\": scangroup_data[23],\r\n \"RADATE\": scangroup_data[24],\r\n \"RESTFRE1\": scangroup_data[25],\r\n \"RESTFRE2\": scangroup_data[26],\r\n \"SKYFREQ1\": scangroup_data[27],\r\n \"SKYFREQ2\": scangroup_data[28],\r\n \"STATIME\": scangroup_data[30],\r\n \"RMS\": float(rms)\r\n }\r\n\r\n # print(data_keys)\r\n filename = fits_file\r\n hdulist = fits.open(filename, mode='update')\r\n header = hdulist[0].header\r\n\r\n try:\r\n histroy = str(fits_header[\"HISTORY\"]).strip().split(' ')\r\n nh = [x for x in histroy if x]\r\n data_keys[\"BMAJ\"] = float(nh[3])\r\n data_keys[\"BMIN\"] = float(nh[5])\r\n data_keys[\"BPA\"] = float(nh[7])\r\n print(histroy)\r\n try:\r\n del header['HISTORY']\r\n except Exception as exh:\r\n print(exh)\r\n except Exception as ex:\r\n print(ex)\r\n try:\r\n if fits_header[\"BMAJ\"]:\r\n data_keys[\"BMAJ\"] = float(fits_header[\"BMAJ\"])\r\n data_keys[\"BMIN\"] = float(fits_header[\"BMIN \"])\r\n data_keys[\"BPA\"] = float(fits_header[\"BPA\"])\r\n except Exception as ex:\r\n print(ex)\r\n\r\n pbcor_file = os.path.basename(fits_file).split('.')[0]\r\n spam_log = glob.glob(os.path.dirname(fits_file) + \"/spam_\" + pbcor_file + \"*.log\")\r\n spam_log.sort()\r\n spam_log = spam_log[0]\r\n reading_spam_log = open(spam_log).readlines()\r\n bmaj_bmin = []\r\n if len(reading_spam_log) > 0:\r\n for each_line in reading_spam_log:\r\n if \"BMAJ\" in each_line:\r\n bmaj_bmin.append(each_line)\r\n bmaj_bmin_data = bmaj_bmin[0].replace(' ',' ').replace(\" \",\" \").replace(\"= \",\"=\").split((\r\n ' '))\r\n print(bmaj_bmin_data)\r\n for each_key in bmaj_bmin_data:\r\n if \"BMAJ\" in each_key:\r\n data_keys[\"BMAJ\"] = float(each_key.split('=')[1])\r\n if \"BMIN\" in each_key:\r\n data_keys[\"BMIN\"] = float(each_key.split('=')[1])\r\n if \"BPA\" in each_key:\r\n data_keys[\"BPA\"] = float(each_key.split('/')[0].split('=')[1])\r\n print( data_keys[\"BMAJ\"], data_keys[\"BMIN\"], data_keys[\"BPA\"])\r\n try:\r\n for key, value in data_keys.iteritems():\r\n print key, value\r\n header.set(key, value)\r\n hdulist.flush()\r\n except Exception as ex:\r\n print(ex)", "def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return", "def create_issue_objs(self):\n \n print \"Creating IssueClass objects\"\n \n # Create IssueClass objects, add to issue_objs dictionary\n for issue in self.issues: \n# print json.dumps(issue, indent=4)\n if issue['fields']['issuetype']['name'] == \"Sub-task\" and issue['fields']['parent']['fields']['issuetype']['name'] != \"New Feature\":\n continue # Skip sub-tasks whose parents are not New features\n ic = IssueClass() # Create IssueClass object for each issue, assign data from issue to object's variables\n ic.assignee = issue['fields']['assignee']['name']\n ic.assignee_email = issue['fields']['assignee']['emailAddress']\n ic.issue_id = issue['key']\n ic.issue_type = issue['fields']['issuetype']['name']\n ic.summary = issue['fields']['summary']\n ic.status = issue['fields']['status']['name']\n self.issue_objs[issue['key']] = ic # Add object to main object dictionary\n \n if ic.issue_type == \"Sub-task\":\n ic.issue_parent = issue['fields']['parent']['key'] # Get Sub-task parent\n \n try:\n ic.sprint = issue['fields']['customfield_10264'][0]['value'] # Get current sprint\n except TypeError:\n pass # Some issues have no sprint\n \n # Brand new issues less than change_period with no changes yet are considered a \"change of status\".\n ic.icdt = dt.strptime(issue['fields']['created'].split('.')[0], \"%Y-%m-%dT%H:%M:%S\") # Item create datetime\n if (issue['fields']['issuetype']['name'] == \"New Feature\") and \\\n ic.icdt.date() > date.today()-timedelta(days=int(self.config.get('default', 'change_period'))):\n ic.last_sprint = \"\" # Only objects with a last_sprint or last_status attribute will be checked for changes within change_period\n ic.last_status = \"\" # Set last_sprint and last_status to null for issues less than change_period old\n\n # Get time in status for the issues we're interested in, also updates sprint/last_sprint, status/last_status\n self.get_time_in_status(issue, ic.status)", "def getIssuesEmail(self):\r\n base_url = \"http://beta.boost.org/development/tests/\"\r\n base_url += self.branch\r\n base_url += \"/developer/\";\r\n got_issues = False\r\n\r\n # Ping the server by looking for an HTML file\r\n print \"Pinging the server to initiate extraction...\"\r\n ping_url = base_url + \"issues.html\"\r\n os.system('curl -O ' + ping_url)\r\n os.system('rm -f issues.html')\r\n \r\n for x in range(30):\r\n # Update issues-email.txt\r\n url = base_url + \"issues-email.txt\"\r\n print 'Retrieving issues email from ' + url\r\n os.system('rm -f issues-email.txt')\r\n os.system('curl -O ' + url)\r\n\r\n if self.parseIssuesEmail():\r\n return True\r\n\r\n print 'Failed to fetch issues email. '\r\n time.sleep (30)\r\n\r\n return False", "def crawler():\n job_entries = []\n for job in job_info(URL):\n labels = \"\"\n if job[\"labels\"]:\n for label in job[\"labels\"]:\n labels += label[\"name\"]\n if job[\"labels\"].index(label) != len(job[\"labels\"]) - 1:\n labels += \",\"\n job_entries.append((job[\"number\"], job[\"id\"],\n job[\"title\"], job[\"html_url\"], labels))\n\n conn = sqlite3.connect('jobber/jobber.db')\n c = conn.cursor()\n c.executemany(('INSERT OR IGNORE INTO job_entries '\n 'VALUES (?,?,?,?,?)'), job_entries)\n conn.commit()\n conn.close()", "def create_dbs_for_project(self):\r\n assert self.index\r\n self.client.indices.create(self.index, ignore=400)\r\n bug_mapping = {\r\n \"bug\": {\r\n \"_id\": {\r\n \"path\": \"bugid\"\r\n },\r\n \"_timestamp\": {\r\n \"enabled\": True,\r\n \"path\": \"openedDate\",\r\n \"format\": \"dateOptionalTime\"\r\n },\r\n \"properties\": {\r\n \"openedDate\": {\r\n \"type\": \"date\",\r\n \"format\": \"dateOptionalTime\"\r\n }\r\n }\r\n }\r\n }\r\n self.client.indices.put_mapping(index=self.index,\r\n doc_type=ElasticSearch.BUGS, body=bug_mapping)\r\n generated_mapping = {\r\n \"generated\" : {\r\n \"properties\" : {\r\n \"data.tar.gz\" : {\r\n \"type\" : \"string\",\r\n \"index\" : \"no\",\r\n \"norms\" : {\r\n \"enabled\" : False\r\n }\r\n }\r\n }\r\n }\r\n }\r\n self.client.indices.put_mapping(index=self.index,\r\n doc_type=ElasticSearch.GENERATED, body=generated_mapping)", "def analysis_document(self, workers_num, stock_queues):\n\t\twhile not stock_queues.empty():\n\t\t\tfail_flag = False\n\t\t\tstock_name = stock_queues.get()\n\t\t\t#if not stock_name[0:1] in ['R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']:\n\t\t\t#\tcontinue\n\t\t\t#if not self.analysis_statement(stock_name):\n\t\t\t#\tcontinue\n\n\t\t\tsav_stock_csv_path = '{}.csv'.format(os.path.join(self.stock_folder_path, stock_name))\n\t\t\tsav_option_csv_path = '{}.csv'.format(os.path.join(self.option_folder_path, stock_name))\n\t\t\tsav_option_com_order_csv_path = '{}.csv'.format(os.path.join(self.option_com_order_folder_path, stock_name))\n\t\t\tif not os.path.exists(self.option_folder_path):\n\t\t\t\tos.mkdir(self.option_folder_path)\n\t\t\tif not os.path.exists(self.option_com_order_folder_path):\n\t\t\t\tos.mkdir(self.option_com_order_folder_path)\n\t\t\tif not os.path.exists(self.techidx_folder_path):\n\t\t\t\tos.mkdir(self.techidx_folder_path)\n\t\t\tif not os.path.exists(self.pickle_folder_path):\n\t\t\t\tos.mkdir(self.pickle_folder_path)\n\t\t\t\t\n\t\t\tdf = self.crawl_price(stock_name)\n\t\t\tif len(df) < self.min_days:\n\t\t\t\tcontinue\n\n\t\t\tresult_all = self.get_supporting_point(stock_name, sav_stock_csv_path)\n\t\t\t#continue\n\t\t\t#self.output_report(stock_name, sav_option_csv_path, sav_option_com_order_csv_path, result_all)\n\t\t\t#print (sav_stock_csv_path, sav_option_csv_path, sav_option_com_order_csv_path)\n\t\t\ttech_idx_path = 'techidx/{}.csv'.format(stock_name)\n\n\t\t\toptions_contract_file_path = 'options/{}.csv'.format(stock_name)\n\t\t\tsav_stock_csv_path = '{}.csv'.format(os.path.join(self.stock_folder_path, stock_name))\n\t\t\toptions_file_path = '{}.csv'.format(os.path.join(self.option_folder_path, stock_name))\n\t\t\toptions_com_order_csv_path = '{}.csv'.format(os.path.join(self.option_com_order_folder_path, stock_name))\n\t\t\tcombin_contract_list_all, state_flag = self.output_report(stock_name, options_file_path, options_com_order_csv_path, result_all)\n\t\t\tif not state_flag:\n\t\t\t\t#print ('continue')\n\t\t\t\tcontinue\n\t\t\tself.get_techidx_result(stock_name)\n\t\t\tbest_combin_contract_all = self.back_testing(tech_idx_path, options_contract_file_path, combin_contract_list_all)\n\t\t\tprint (best_combin_contract_all)\n\n\t\t\tprint ('worker number {}, stock_name is {}'.format(workers_num, stock_name))\n\t\t\t#stock_queues.put(stock_name)\n\n#\t\t\tfor date in best_combin_contract_all.keys():\n\n\n\t\t\tbest_combin_contract_all_json = json.dumps(best_combin_contract_all)\n\t\t\t#print (len(best_combin_contract_all) != 0, len(best_combin_contract_all))\n\t\t\tif len(best_combin_contract_all) != 0:\n\t\t\t\twith open(options_com_order_csv_path, 'w') as f_w:\n\t\t\t\t\tf_w.write(best_combin_contract_all_json)", "def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)", "def run(self):\n\n # The url is too deep, skip the url.. Work is done!\n if self.depth_ > self.depth:\n return\n\n # Get doc id corresponds to the url. Add a new entry into doc index if there is no entry.\n doc_id = self.crawler.document_id(self.curr_url)\n\n # Check if the doc_id has been visited/processed by any of crawler_threads. Add doc_id to seen if not so.\n if self.crawler.checkDocVisitedAndUpdate(doc_id):\n return\n\n # Process the document corresponds to the url\n socket = None\n try:\n socket = urllib2.urlopen(self.curr_url, timeout=self.timeout)\n soup = BeautifulSoup(socket.read())\n self._curr_depth = self.depth_ + 1\n self._curr_doc_id = doc_id\n # Traverse the document as deep as possible and add those newly discovered urls into url queue\n self._index_document(soup)\n # Store (wordId, docId) and (word, url) into inverted_index and resolved_inverted_index respectively.\n self.crawler._add_words_to_document(self._curr_words, self._curr_doc_id)\n except:\n pass\n finally:\n if socket:\n socket.close()", "async def scrape_submissions(self):\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n submission_count = 0\n async for submission in subreddit_origin.new(limit=self.limit):\n if self.memory.contains(submission.id):\n continue\n\n self.memory.add(submission.id)\n\n # Parse Submission\n submission = self.parse_submission(submission)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(submission)\n\n submission_count += 1\n\n return submission_count", "def test_get_all_issues_passes(self):\n # Act: no issues\n response = self.client.get(self.url)\n response_json = response.get_json()\n # test\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_json[\"categories\"]), 0)\n self.assertEqual(response_json[\"categories\"], [])\n\n # Act: add 1 issue\n self.test_issue = create_canned_mapping_issue()\n response = self.client.get(self.url)\n response_json = response.get_json()\n # test\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_json[\"categories\"]), 1)\n self.assertEqual(response_json[\"categories\"][0][\"name\"], TEST_ISSUE_NAME)", "def main():\n os.makedirs(\"../json-data\", exist_ok=True)\n # num_docs = 1005\n num_docs = int(sys.argv[1])\n for answerno in range(num_docs):\n print('Creating document', answerno, 'of', num_docs)\n basename = \"../json-data/chunck_%s\" % uuid.uuid4()\n tempname = basename + '.temp.gz'\n longtermname = basename + '.json.gz'\n\n # We compress with gzip.\n # It's relatively fast compression.\n # We could compress with bzip2 or zlib instead if we have the CPU time available.\n # We could do bits and bytes, but that's harder to debug, and only worth it if there's a LOT of data to store.\n # We could eliminate all unanswered responses, but that is a little prone to surprises.\n # We also have the option of using bson instead of json.\n with gzip.open(tempname, \"w\") as answerfile:\n row = {\"pk\": \"%d\" % answerno}\n for carvar in constants.carvars:\n row[carvar] = random.choice(constants.carbrands)\n for carvar in constants.mrcarvars:\n for carbrand in constants.carbrands:\n row[\"%s.%s\" % (carvar, carbrand)] = random.choice(constants.answers)\n for singvar in constants.singervars:\n row[singvar] = random.choice(constants.singers)\n for singvar in constants.mrsingervars:\n for singer in constants.singers:\n row[\"%s.%s\" % (singvar, singer)] = random.choice(constants.answers)\n string = json.dumps(row)\n answerfile.write(string.encode('UTF-8'))\n os.rename(tempname, longtermname)", "def save_file():\n generic = pull_list()\n result = list()\n i = 0\n while True:\n try:\n if generic[i].startswith('CVE'):\n cve_pattern = \"^CVE-\\d+-\\d+|^CVE-\\d+-[X]+\"\n header = re.findall(cve_pattern, generic[i])[0]\n i += 1\n notes = list()\n while not generic[i].startswith('CVE'):\n commit_pattern = \"http[s]?:\\/\\/.+commit\\/[\\S]+\"\n if re.search(commit_pattern, generic[i]):\n link = re.findall(commit_pattern, generic[i])\n notes.append(link[0])\n i += 1\n if notes != list():\n result.append(Data(header, notes))\n except IndexError:\n print('Finished')\n break\n return result", "def main():\n try:\n # Set up logging for this module\n logsetup = setuplogging(logfilestandardname='pullpahostats', \n logginglevel='logging.INFO', stdoutenabled=True)\n if logsetup == 0:\n logging.info(\"Logging set up successfully.\")\n # construct the full path to the folders containing the pdf files and the output csv files\n global paho_raw_reports_dir\n global paho_csv_reports_dir\n currentdir = os.path.dirname(os.path.realpath(__file__))\n paho_raw_reports_dir = os.path.join(currentdir,PAHO_RAW_REPORTS_DIR_NAME)\n paho_csv_reports_dir = os.path.join(currentdir,REPO_NAME,PAHO_CSV_REPORTS_DIR_NAME)\n # set up the local repo if it is not already created\n sync_git_repo()\n # download all pdfs from the PAHO website\n download_pdfs()\n # parse all the pdfs in the paho_raw_reports_dir \n pdf_reports_data = parse_pdfs()\n # create csvs in the output folder using the parsed content\n create_csvs(pdf_reports_data)\n # now resync the local repo to the remote repo to upload the files\n sync_git_repo() \n # add the data from the pdfs to the database\n write_data_to_db(pdf_reports_data)\n except Exception as exc:\n logging.exception(\"Error encountered while running script \"+os.path.basename(__file__))\n sys.exit(1)\n else:\n # Use 0 for normal exits, 1 for general errors and 2 for syntax errors (eg. bad input parameters)\n sys.exit(0)", "def download_filings(feedpath,args=None):\n\tlogger.info(\"Processing RSS feed %s\",feedpath)\n\n\tdir = filings_dir(feedpath)\n\tos.makedirs(dir,exist_ok=True)\n\n\tfiling_urls = []\n\tfor filing in feed_tools.read_feed(feedpath):\n\t\tif args:\n\t\t\tif args.company_re and not bool(args.company_re.match(filing['companyName'])):\n\t\t\t\tcontinue\n\t\t\tif args.cik and args.cik != filing['cikNumber']:\n\t\t\t\tcontinue\n\t\t\tif args.sic and args.sic != filing['assignedSic']:\n\t\t\t\tcontinue\n\t\t\tif args.form_type and args.form_type != filing['formType']:\n\t\t\t\tcontinue\n\t\tif 'enclosureUrl' in filing and not exists_filing(dir,filing['enclosureUrl'],filing['enclosureLength']):\n\t\t\tfiling_urls.append(filing['enclosureUrl'])\n\t\tif args and getattr(args,'with_exhibits',False):\n\t\t\tfiling_urls.extend( filing.get( 'exhibitList', [] ) )\n\n\tlogger.info(\"Start downloading %d new filings\",len(filing_urls))\n\twith concurrent.futures.ThreadPoolExecutor(max_workers=args.max_threads) as executor:\n\t\tfutures = [executor.submit(download_filing,dir,url,args.max_retries) for url in filing_urls]\n\t\tfor future in concurrent.futures.as_completed(futures):\n\t\t\ttry:\n\t\t\t\tfuture.result()\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)", "def findings(self):\n cache = FindingsCache()\n cached_nodes = cache.list()\n\n processed_uniq_ids = []\n\n om.out.debug('[xml_file.flush()] Starting findings()')\n start = time.time()\n\n #\n # This for loop is a performance improvement which should yield\n # really good results, taking into account that get_all_uniq_ids_iter\n # will only query the DB and yield IDs, without doing any of the\n # CPU-intensive cPickle.loads() done in get_all_findings_iter()\n # which we do below.\n #\n # Ideally, we're only doing a cPickle.loads() once for each finding\n # the rest of the calls to flush() will load the finding from the\n # cache in this loop, and use the exclude_ids to prevent cached\n # entries from being queried\n #\n # What this for loop also guarantees is that we're not simply\n # reading all the items from the cache and putting them into the XML,\n # which would be incorrect because some items are modified in the\n # KB (which changes their uniq id)\n #\n for uniq_id in kb.kb.get_all_uniq_ids_iter(include_ids=cached_nodes):\n node = cache.get_node_from_cache(uniq_id)\n\n # cached_nodes can be (), this means that get_all_uniq_ids_iter()\n # will return *all* findings, some might not be in the cache. When\n # that happens, the cache returns None\n if node is not None:\n yield node\n processed_uniq_ids.append(uniq_id)\n\n msg = '[xml_file.flush()] findings() processed %s cached nodes in %.2f seconds'\n spent = time.time() - start\n args = (len(processed_uniq_ids), spent)\n om.out.debug(msg % args)\n\n start = time.time()\n\n #\n # This for loop is getting all the new findings that w3af has found\n # In this context \"new\" means that the findings are not in the cache\n #\n new_findings = 0\n\n for finding in kb.kb.get_all_findings_iter(exclude_ids=cached_nodes):\n uniq_id = finding.get_uniq_id()\n processed_uniq_ids.append(uniq_id)\n node = Finding(self._jinja2_env, finding).to_string()\n cache.save_finding_to_cache(uniq_id, node)\n\n new_findings += 1\n\n yield node\n\n msg = '[xml_file.flush()] findings() processed %s new findings in %.2f seconds'\n spent = time.time() - start\n args = (new_findings, spent)\n om.out.debug(msg % args)\n\n start = time.time()\n\n #\n # Now that we've finished processing all the new findings we can\n # evict the findings that were removed from the KB from the cache\n #\n evicted_findings = 0\n\n for cached_finding in cached_nodes:\n if cached_finding not in processed_uniq_ids:\n cache.evict_from_cache(cached_finding)\n\n evicted_findings += 1\n\n msg = '[xml_file.flush()] findings() evicted %s findings from cache in %.2f seconds'\n spent = time.time() - start\n args = (evicted_findings, spent)\n om.out.debug(msg % args)", "def _openBug( self, sType, bSerial, sCompany, sName, sSummary='', sDesc='' ):\n\n\t\ttry:\n\t\t\tdbgMsg( 'Opening Bug serial-[%d] company-[%s] name-[%s]' % \\\n\t\t\t\t( bSerial, sCompany, sName ) )\n\n\t\t\tif self._fOfflineDebug:\n\t\t\t\tdbgMsg( 'Skipping since we are in offline debug mode' )\n\t\t\t\treturn True\n\n\t\t\tsFileIn = ''\n\t\t\tsFileOut = ''\n\t\t\tsAssignee = ''\n\t\t\tsSum = ''\n\t\t\tsDescription = ''\n\t\t\tif sType == \"Ticket\":\n\t\t\t\tsFileIn = BASE + 'templates/dvs.bug'\n\t\t\t\tsFileOut = '/tmp/dvs-%d.bug' % bSerial\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\t\t\t\tsSum = sSummary\n\t\t\t\tsDescription = sDesc\n\n\t\t\telif sType == \"On-Site Maintenance\":\n\t\t\t\tsFileIn = BASE + 'templates/onsite-maintenance.bug'\n\t\t\t\tsFileOut = '/tmp/onsite-maintenance.bug'\n\t\t\t\tsAssignee = 'bugzilla@dividia.net'\n\n\t\t\telse:\n\t\t\t\traise Exception, 'unknown bug type [%s]' % sType\n\n\t\t\t# Sanitize input for perl replacements\n\t\t\tsCompany = sCompany.replace( '\"', \"_\" )\n\t\t\tsName = sName.replace( '\"', \"_\" )\n\t\t\tsAssignee = sAssignee.replace( '\"', \"_\" )\n\t\t\tsSum = sSum.replace( '\"', \"_\" )\n\t\t\tsDescription = sDescription.replace( '\"', \"_\" )\n\n\t\t\tos.system( 'cp %s %s' % ( sFileIn, sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s/\\@\\@SERIAL\\@\\@/%03d/g\\\" %s\" % ( bSerial, sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s/\\@\\@COMPANY\\@\\@/%s/g\\\" %s\" % ( re.escape( sCompany ), sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s/\\@\\@NAME\\@\\@/%s/g\\\" %s\" % ( re.escape( sName ), sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s/\\@\\@ASSIGNEE\\@\\@/%s/g\\\" %s\" % ( re.escape( sAssignee ), sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s/\\@\\@SUMMARY\\@\\@/%s/g\\\" %s\" % ( re.escape( sSum ), sFileOut ) )\n\t\t\tos.system( \"perl -pi -e \\\"s/\\@\\@DESCRIPTION\\@\\@/%s/g\\\" %s\" % ( re.escape( sDescription ), sFileOut ) )\n\n\t\t\tos.system( \"perl -pi -e \\\"s/@/\\\\\\\\\\\\@/g\\\" %s\" % sFileOut )\n\n\t\t\tsResult = commands.getoutput( '/usr/local/bin/bz_webservice_demo.pl --uri http://tickets.dividia.net/xmlrpc.cgi --rememberlogin --login bugzilla --password \\'dt!8734\\' --create %s 2>/dev/null' % sFileOut )\n\t\t\tos.unlink( sFileOut )\n\n\t\t\ttry:\n\t\t\t\toMatch = re.search( \".*id: ([0-9]+).*\", sResult, re.MULTILINE )\n\t\t\t\tif not oMatch:\n\t\t\t\t\treturn False\n\n\t\t\t\tbBug = 0\n\t\t\t\ttry:\n\t\t\t\t\tbBug = int( oMatch.group( 1 ) )\n\t\t\t\texcept ValueError, e:\n\t\t\t\t\treturn False\n\n\t\t\texcept:\n\t\t\t\treturn False\n\n\t\t\treturn True\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error opening new bug in bugzilla' )\n\t\t\terrMsg( e )\n\t\t\treturn False", "def crawl(self, fuzzable_request, debugging_id):\n for domain_path in fuzzable_request.get_url().get_directories():\n\n if domain_path in self._analyzed_dirs:\n continue\n \n self._analyzed_dirs.add(domain_path)\n\n url_repeater = repeat(domain_path)\n args = izip(url_repeater, self._get_potential_phpinfos())\n\n self.worker_pool.map_multi_args(self._check_and_analyze, args)", "def get_bug_benchmarks():\n return [\n benchmark for benchmark in get_all_benchmarks()\n if get_type(benchmark) == BenchmarkType.BUG.value\n ]", "def post_defects(project, jira_issues, defects):\n payload = \"\"\n for defect in defects:\n #TODO: this is a hack which can be removed once, excel docs are done away with.\n if defect[\"assignee\"] == \"Unassigned\":\n defect[\"assignee\"] = None\n\n data = {\"host\": host,\n \"time\": int(datetime.datetime.strptime(defect[\"report_date\"], DATE_FORMAT).strftime(\"%s\")) * 1000,\n \"event\": defect,\n \"index\": INDEX,\n \"source\": \"defect\"}\n if config.splunk[config.environment].payload_limit and len(payload) + len(data) >= config.splunk[config.environment].payload_limit:\n logger.info(\"Reached length: {}, Restarting\".format(len(payload)))\n rsp = post_to_splunk(payload=payload)\n logger.info(\"Successfully posted batched data to Splunk {}\".format(project))\n payload = \"{}\".format(json.dumps(data))\n else:\n payload += \" {}\".format(json.dumps(data))\n\n rsp = post_to_splunk(payload=payload)\n logger.info(\"Successfully posted data to splunk for {}\".format(project))\n return {project: rsp.status_code, \"defects_require_fixing\": str(len(jira_issues) - len(defects))}", "def run(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n for entry in glob.glob(os.path.join(self.data_folder, self.data_expression)):\n f = open(entry)\n text = json.loads(f.read())\n f.close()\n self.create_page_objects(text)", "def run():\n\n api = api_start()\n stonks = {}\n check_function = load_symbol_list()\n for obj in (\"comments\", \"submissions\"):\n for post in get_text(api, obj):\n if obj == \"comments\":\n full_text = post.body\n else: # obj == \"submissions\"\n full_text = post.title + post.selftext\n try:\n stonks = check_texts(\n full_text, post.author.name, stonks, check_function\n )\n except AttributeError:\n pass\n\n return stonks", "async def bug(self, ctx):\n await ctx.message.delete()\n await ctx.send(\"File a bug report: https://github.com/TheSuperGamer20578/Sudan-bot/issues/new?labels=Bug&template=bug_report.md\")", "def ordered_crawling():\n queue.append(seed_url)\n visited.add(seed_url)\n while len(queue) >= 0:\n try:\n text = req_obj.get_html_text(queue[0])\n print queue[0]\n if text is None:\n raise requests.RequestException()\n add_links_to_queue(text, queue[0])\n # summary generated using summarizer1\n sum_obj.create_and_index_summary(\n req_obj.get_base_url(), text)\n\n # summary generated using summarizer2\n sum_obj2.create_and_index_summary(\n req_obj.get_base_url(), text)\n on_pg_sum.index_on_page_summary(text, queue[0])\n\n result_file.write(str(queue[0]) + \", \" + str(link_weights[queue[0]]))\n er_file.write(\"###########\" + str(link_weights) + \"\\n\\n\\n\\n\")\n update_weights(text)\n queue.sort(compare)\n result_file.write(\"\\n\")\n except requests.RequestException as trace:\n print str(trace) + '\\n'\n er_file.write(queue[0] + '\\n')\n er_file.write(str(trace) + '\\n\\n')\n del link_weights[queue[0]]\n queue.pop(0)", "def crawl(self):\n\n # create helper process and setup IPC\n self.socket.listen(1)\n help_out_fd = open(self.helper_outfile, \"w\")\n with subprocess.Popen(\"./crawl_helper.py\", stdout=help_out_fd, stderr=subprocess.STDOUT) as proc:\n self.helper_pid = proc.pid\n try:\n conn, _ = self.socket.accept()\n # create initial params for crawler helper and send them\n new_urls = set()\n setup_params = {\"start_urls\": self.start_urls, \"allowed_domains\": [self.domain],\n \"cookies\": self.cookies, \"user_agent\": self.config[\"user_agent\"]}\n ipc_operations.send_object(conn, setup_params)\n\n # loop: receive a response object, then send new URLs to crawl. Catch & handle problems.\n while True:\n try:\n proc.wait(timeout=0.001)\n break\n except subprocess.TimeoutExpired:\n response = ipc_operations.receive_object(conn)\n if not response: # socket is dead / closed\n break\n new_urls = self.process_response(response)\n ipc_operations.send_object(conn, new_urls)\n except socket.timeout:\n util.printit(\"Unix socket connection to scrapy crawler unexpectedly broke. \" +\n \"Quitting crawling of %s\" % self.base_url, color=util.RED)\n break\n finally:\n # ensure connection is closed and helper process killed in any case\n conn.close()\n proc.kill()\n\n # after the actual crawling, extract all the gathered cookies from Selenium\n if self.config[\"use_selenium\"].lower() == \"true\":\n selenium_cookies = self.driver.get_cookies()\n for cookie in selenium_cookies:\n if not any(cookie[\"name\"] == c[\"name\"] and cookie[\"path\"] == c[\"path\"] and\n cookie[\"domain\"] == c[\"domain\"] for c in self.found_cookies):\n parsed_cookie = {}\n for key in (\"name\", \"path\", \"domain\", \"httpOnly\", \"secure\"):\n parsed_cookie[key] = cookie[key]\n self.found_cookies.append(parsed_cookie)\n\n help_out_fd.close()\n return self.create_results()", "def run(self):\n\n #Load old cache\n if '-r' not in sys.argv:\n old_db = SiteDB([], 'upwork')\n old_db.load()\n\n #Set it to current DBs old cache\n self.db.pred = old_db.cache\n\n #Generate crawler object\n \n crawler = self.generate_crawler()\n\n #Generate DB for latest scan\n if '-full' not in sys.argv:\n self.db = crawler.crawl()\n else:\n _ = crawler.crawl()\n\n if '-r' not in sys.argv:\n #Get deleted info\n _ = self.db.compare(old_db)\n\n targets = ['https://community.upwork.com/t5/Announcements/bd-p/news', \\\n 'https://community.upwork.com/t5/Freelancers/bd-p/freelancers', \\\n 'https://community.upwork.com/t5/Clients/bd-p/clients', \\\n 'https://community.upwork.com/t5/Agencies/bd-p/Agencies']\n target = None\n for tar in targets:\n if tar in sys.argv:\n target = tar\n\n #Write result\n #self.db.write(target_name=target)", "def _render_reported(self) -> dict:\n logging.debug(f\"Fetching reported bugs for {self.user.display_name}\")\n reported = defaultdict(list)\n tasks = self.user.searchTasks(\n bug_reporter=self.user, status=self.status, created_since=self.since\n )\n tasks = [LPWrap(t) for t in tasks]\n for t in tasks:\n if in_window(self.window, t.bug.date_created):\n reported[t.bug_target_name].append(\n {t.bug.id: t.title,}\n )\n return reported", "def submit_sample_submissions(datadir: str) -> None:\n for competition in KAGGLE_COMPETITIONS:\n print(competition[NAME])\n if os.path.exists(os.path.join(datadir, competition[NAME])):\n log.info(\"Skipping %s already present\", competition[NAME])\n continue\n fd.fetch_kaggle_files(competition[NAME], datadir)\n fetch_processor = competition_meta.get(FETCH_PROCESSOR)\n if fetch_processor:\n files = fetch_processor(files)\n api = kaggle_api()\n for competition in KAGGLE_COMPETITIONS:\n sample_submission = os.path.join(datadir, competition[NAME], 'sample_submission.csv')\n res = submit_kaggle_competition(competition[NAME], sample_submission)\n print(res)", "def cgiIssue(formFields):\n \n # open the roundup tracker configuration file\n trackerConfig = ConfigParser.ConfigParser()\n trackerConfig.read(os.path.join(TRACKER_HOME, 'config.ini'))\n \n # open the roundup database\n r_instance = roundup.instance.open(TRACKER_HOME)\n r_db = r_instance.open(TRACKER_USER)\n\n # get handles to things like priority, etc\n title = (formFields.has_key('title') and formFields['title']) or \\\n DEFAULT_TITLE\n \n priority = findNode(r_db.getclass('priority'),\n {'name':(formFields.has_key('priority') and formFields['priority']) or DEFAULT_PRIORITY})['id']\n\n application = findNode(r_db.getclass('application'),\n {'identifier': formFields['app_id'],\n 'version' : formFields['app_version']})\n\n # see if we found the app record; if so, we just want the id\n if application:\n application = application['id']\n\n platform = findNode(r_db.getclass('platform'),\n {'identifier': formFields['platform']})\n if platform is None:\n # create the new platform, assuming \n\tp_id = formFields['platform']\n platform = r_db.getclass('platform').\\\n create(identifier=p_id, supported=True)\n else:\n # just get the ID\n platform = platform['id']\n\n if WATCH_USER is not None:\n nosy = [findNode(r_db.getclass('user'),\n {'username': WATCH_USER})['id']]\n else:\n nosy = []\n\n # get a handle to a default keyword we want to assign\n if DEFAULT_KEYWORD is not None:\n topics = [findNode(r_db.getclass('keyword'),\n\t {'name':DEFAULT_KEYWORD})['id']]\n else:\n topics=[]\n\n # add any notes to the issue as a message\n messages = []\n m_class = r_db.getclass('msg')\n\n if formFields.has_key('message'):\n msgs = formFields['message']\n \n # there may be one or more messages to create\n try:\n msgs.append(None)\n del msgs[-1]\n except:\n msgs = [msgs]\n\n for m in msgs:\n messages.append(m_class.create(content=m))\n \n \n issue_id = createIssue(r_db, title, priority, application,\n platform, nosy, messages, topics)\n\n return '%sissue%s' % (trackerConfig.get('tracker', 'web'),\n issue_id)", "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder information (based on today's date)\n now = datetime.datetime.now()\n datestr = now.strftime(\"%Y%m%d\")\n path = self.DATA_LOCATION+\"InjectedData/\"+datestr+\"_GenerationFuzzedPackets.csv\"\n filename = path\n outfile = open(filename,'a');\n dataWriter = csv.writer(outfile,delimiter=',');\n #dataWriter.writerow(['# Time Error Bytes 1-13']);\n #dataWriter.writerow(['#' + description])\n \n numIds = len(standardIDs)\n fuzzNumber = 0; #: counts the number of packets we have generated\n while( fuzzNumber < Fuzzes):\n id_new = standardIDs[random.randint(0,numIds-1)]\n print id_new\n #### split SID into different regs\n SIDhigh = (id_new >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (id_new & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet[0] = SIDhigh\n packet[1] = SIDlow\n \n #generate a fuzzed packet\n for i in range(0,8): # for each data byte, fuzz it\n idx = \"db%d\"%i\n limits = dbLimits[idx]\n value = random.randint(limits[0],limits[1]) #generate pseudo-random integer value\n packet[i+5] = value\n print packet\n #put a rough time stamp on the data and get all the data bytes \n row = [tT.time(), id_new,8] # could make this 8 a variable \n msg = \"Injecting: \"\n for i in range(5,13):\n row.append(packet[i])\n msg += \" %d\"%packet[i]\n #print msg\n dataWriter.writerow(row)\n self.client.txpacket(packet)\n tT.sleep(period/1000)\n \n #inject the packet the given number of times. \n for i in range(1,writesPerFuzz):\n self.client.MCPrts(TXB0=True)\n tT.sleep(period/1000)\n fuzzNumber += 1\n print \"Fuzzing Complete\" \n SIDhigh = (1056 >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0\n SIDlow = (1056 & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5\n packet = [SIDhigh, SIDlow, 0, 0, 8, 65, 255, 32, 120, 0, 0, 1, 247]\n self.client.txpacket(packet)\n for i in range(0,100):\n self.client.MCPrts(TXB0=True)\n tT.sleep(.01)\n outfile.close()", "def _auto_run(args):\n\n # TDH (2020-01-13) For developement testing the following section\n # replicates the functionality of \"standard_analysis.py\" so that\n # json_results can be created and used to create the graph image\n # files.\n import benchmark_postprocessing as bmpp\n file_list = bmpp.get_benchmark_files(args.benchmark_results_dir)\n json_results = bmpp.parse_files(file_list)\n json_results = bmpp.parse_and_add_benchmark_metadata(json_results)\n run_id_list = get_unique_run_ids(json_results)\n\n # TDH (2020-01-13) - Create unqiue reports for each run ID found.\n # Even a single results directory can contain results from multiple\n # run IDs.\n for run_id in run_id_list:\n output_path = os.path.join(\n args.benchmark_results_dir,\n '{}_report'.format(run_id))\n\n # TDH: Thorough attempt to safely create the results directory and\n # provide good error reporting if something went wrong.\n try:\n os.mkdir(output_path)\n except OSError:\n logging.error('Failed to create directory for report at {}'.format(\n output_path))\n create_standard_analysis_report(output_path,\n json_results,\n run_id)", "def main():\n cursor = PGCONN.cursor()\n # track our work\n with open(\"myhucs.txt\", \"w\") as fh:\n # Change the working directory to where we have data files\n os.chdir(\"../../data/%s\" % (sys.argv[2],))\n # collect up the GeoJSONs in that directory\n fns = glob.glob(\"smpldef3m_*.json\")\n fns.sort()\n i = 0\n\n for fn in fns:\n # Save our work every 100 HUC12s,\n # so to keep the database transaction\n # at a reasonable size\n if i > 0 and i % 100 == 0:\n PGCONN.commit()\n cursor = PGCONN.cursor()\n df, snapdf = get_data(fn)\n huc12 = process(cursor, fn, df, snapdf)\n fh.write(\"%s\\n\" % (huc12,))\n i += 1\n\n # Commit the database changes\n cursor.close()\n PGCONN.commit()\n LOG.info(\"Complete.\")", "def _collect_and_train(self) -> None:\n self.info_process('\\n\\n')\n self.info_process('Performing daily data collection and model training...')\n\n for symbol in Settings.get_symbols(self):\n # Interrupt collection if the collection loop was stopped\n if not self._running:\n break\n\n # Revert data to last stable day.\n date_last_collected_for = self.time().now().date()\n # If it's past midnight, move back a day.\n if self.time().get_secs_to_open() < timedelta(hours=9, minutes=30).total_seconds():\n date_last_collected_for -= timedelta(days=1)\n # Move back two market days from the most recent market day.\n date_last_collected_for = self.time().get_prev_mkt_day(date_last_collected_for)\n date_last_collected_for = self.time().get_prev_mkt_day(date_last_collected_for)\n # Remove mongo price data after the stable day.\n self.mongo().remove_price_data_after(symbol, date_last_collected_for, today=self.time().now().today())\n date_rest_available_for = self.time().get_next_mkt_day(date_last_collected_for)\n\n # Collect yesterday's polygon-rest data and train on it.\n if self._train_on_rest_data(symbol, date_rest_available_for):\n self.info_process(f'Trained {symbol} on yesterday\\'s polygon rest data')\n else:\n self.warn_process(f'Invalid {symbol} rest data collected for {date_rest_available_for}. '\n f'Discarding them and attempting to use cached stream data instead')\n if self._train_on_stream_data(symbol, date_rest_available_for):\n self.info_process(f'Trained {symbol} on yesterday\\'s polygon stream data')\n else:\n self.warn_process(f'Invalid {symbol} candles cached for {date_rest_available_for}. '\n f'Could not find valid data to train on yesterday!')\n\n # Load today's polygon-stream data and train on it.\n date_cache_available_for = self.time().get_next_mkt_day(date_rest_available_for)\n if self._train_on_stream_data(symbol, date_cache_available_for):\n self.info_process(f'Trained {symbol} on today\\'s polygon stream data')\n else:\n self.warn_process(f'Invalid {symbol} candles cached for {date_rest_available_for}. '\n f'Could not find valid data to train on today!')", "def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):\n\n # Deals with the joys of connex BS\n # Copy and open grade file\n in_gradefname = os.path.join(submissiondir, 'grades.csv')\n out_gradefname = os.path.join(outputdir, 'grades.csv')\n if not os.path.exists(in_gradefname):\n print(\"grade.csv doesn't exist\", \"Re-download submissions from Connex with grade.csv included\", sep=\"\\n\", file=sys.stderr)\n exit(1)\n with open(in_gradefname, 'r') as gradeFile:\n gradeReader = csv.reader(gradeFile, delimiter=',')\n l = [row for row in gradeReader]\n header = l[:3]\n order = [stud[1] for stud in l[3:]]\n details = {stud[1]: stud for stud in l[3:]}\n submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}\n assert len(details) == len(submissions) # If these don't match, panic\n cwd = os.getcwd() # Store this so we can go back to it later\n # And here we go with actually driving this stupid boat\n for idx, f in enumerate(details):\n submission_path = os.path.join(submissiondir, submissions[f], \"Submission attachment(s)\")\n output_path = os.path.join(outputdir, submissions[f])\n # If it has already been marked, show the marks and copy the comments file\n if details[f][-1]:\n if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):\n shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)\n resp = input(f\"{f}[{details[f][-1]}] already marked: Remark? [y/N]:\")\n if resp.lower() != 'y':\n # Copy comment file\n if not os.path.isfile(os.path.abspath(\"./comments.txt\")):\n with open(os.path.abspath(\"./comments.txt\"), 'w'):\n pass # Just create it and leave\n if not os.path.isdir(output_path):\n os.mkdir(output_path)\n shutil.copy(os.path.abspath(\"./comments.txt\"),\n os.path.join(output_path, \"comments.txt\"))\n continue\n\n copyContents(submission_path, tmpdir)\n copyContents(assndir, tmpdir) # Will overwrite anything already there\n if not os.path.isdir(os.path.join(tmpdir, 'build')):\n os.mkdir(os.path.join(tmpdir, 'build'))\n os.chdir(os.path.join(tmpdir, 'build'))\n compiled, compile_msg = cpp_compile() # compile submission\n\n if compiled:\n score, output, correct, total = mark()\n else:\n score = 0\n output = \"Failed to compile\"\n correct = 0\n total = 0\n\n # Okay, back to the workdir for comments and shipping the mark\n os.chdir(tmpdir)\n options = [\"Keep\",\n \"Comment\",\n \"Replace Grade\",\n \"Show Compiler Output\",\n \"Show Test Output\",\n \"Show Comment\",\n \"Append compiler message\",\n \"Append Test Output\",\n \"View Submission\"]\n\n while True:\n print(f\"\"\"Marking {submissions[f]}:\nStudent {idx+1} / {len(details)}\nMark: {score} ({correct} / {total})\"\"\")\n cidx, cmd = selectItems(options)\n if cidx == 0:\n break\n elif cidx == 1: # Comment on file\n editFile(os.path.abspath(\"./comments.txt\"))\n continue\n elif cidx == 2: # Change grade\n score = round(float(input(\"New Grade: \")), 2)\n continue\n elif cidx == 3:\n viewData(compile_msg)\n elif cidx == 4:\n viewData(output)\n elif cidx == 5:\n viewFile(os.path.abspath(\"./comments.txt\"))\n elif cidx == 6:\n appendToFile(os.path.abspath(\"./comments.txt\"),\n '\\n'.join([\"\\n<pre>\",\"=== [Compiler Output] =========\",\n compile_msg, \"</pre>\"]))\n elif cidx == 7:\n appendToFile(os.path.abspath(\"./comments.txt\"),\n '\\n'.join([\"\\n<pre>\", \"=== [Test Output] =============\",\n output, \"</pre>\"]) )\n elif cidx == 8:\n submittedFiles = getFiles(submission_path)\n if len(submittedFiles) > 1:\n _, fname = selectItems(submittedFiles)\n else:\n fname = submittedFiles[0]\n viewFile(os.path.abspath(\"./\" + fname))\n else:\n print(cidx, cmd)\n # Once everything is hunky dory, put away their mark and move on\n details[f][-1] = score\n\n if not os.path.isfile(os.path.abspath(\"./comments.txt\")):\n with open(os.path.abspath(\"./comments.txt\"), 'w'):\n pass # Just create it and leave\n if not os.path.isdir(output_path):\n os.mkdir(output_path)\n shutil.copy(os.path.abspath(\"./comments.txt\"),\n os.path.join(output_path, \"comments.txt\"))\n removeFiles(os.path.join(tmpdir, \"build\"), skipdirs=False)\n shutil.rmtree(os.path.join(tmpdir, \"tests\"))\n removeFiles(tmpdir, skipdirs=False)\n os.chdir(cwd)\n # Write grades to grade file\n with open(out_gradefname, \"w\") as outputgrades:\n csv_writer = csv.writer(outputgrades, dialect='unix')\n [csv_writer.writerow(el) for el in header]\n [csv_writer.writerow(details[stud]) for stud in order]\n\n return details", "def generateSentimentAnalysis(self, fs_db, cleaned_submissions, cleaned_tweets):\n all_posts = []\n\n for p in range(len(cleaned_submissions)):\n print('reddit', self.clean(cleaned_submissions[p][3]))\n all_posts.append(self.clean(cleaned_submissions[p][3]))\n\n for t in range(len(cleaned_tweets)):\n print('twitter', self.clean(cleaned_tweets[t][2]))\n all_posts.append(self.clean(cleaned_tweets[t][2]))\n \n if len(all_posts) == 0:\n raise Exception(\"No crawled data\")\n\n count = 0\n\n for c in all_posts:\n blob = TextBlob(c)\n\n polarity = blob.sentiment.polarity\n subjectivity = blob.sentiment.subjectivity\n\n doc_ref = fs_db.collection(u'sentimentAnalysis').document('first')\n if (polarity != 0 and subjectivity != 0):\n count += 1\n doc_ref.set({str(count): {'post': c, 'polarity': polarity, 'subjectivity':subjectivity}}, merge=True)\n\n with open('wc.txt', 'w') as output:\n for data in all_posts:\n output.write('%s\\n' % data)", "def main():\n print \"Parsing web log...\"\n log = parse_weblog(WEBLOG_FILEPATH)\n print \"Keeping only store page entries...\"\n store_page_entries = keep_only_store_page(log)\n print \"Grouping entries by domain...\"\n store_pages = hash_entries(store_page_entries) \n print \"Calculating bounce rates for each store page...\"\n bounce_rates = compute_bounce_rate(store_pages)\n print \"Saving results to file...\"\n save_as_csv(bounce_rates, OUTPUT_PATH)", "def mainloop():\n client = MongoClient()\n db = client.test\n col_submissions=db.submissions\n col_scores=db.scores\n result={}\n mailer.load_address()\n \n for user,programname in listofParticipants():\n if user not in result:\n result[user]=[]\n program_dir=conf.participant_dir+user+'/'+programname\n program_name=conf.program_dir+programname+'.xml'\n\n # Check if this programis something we support\n if not os.path.isfile(program_name): \n result[user].append('The program *%s* is INVALID' % programname)\n result[user].append('-----------------------------------------------')\n result[user].append('Sorry but we did not recognize this program name. \\nPerhaps you created a private directory for some other purpose.')\n col_submissions.save({\n \"user_name\":user,\n \"program\":programname,\n \"program_result\":'INVALID PROGRAM',\n \"test_case_result\":[None,None,None],\n \"time\":time.time(),\n })\n continue\n \n # Get more info about the program\n tree = ET.parse(program_name)\n root=tree.getroot()\n program_score=int(root.find(\"score\").text)\n program_timeout = root.find('time-limit')\n input_type = root.find('input-type')\n case_sensitive = root.find('case-sensitive')\n validation_program = root.find('validation-program')\n validation_program_info = root.find('validation-program-info')\n multi_line = root.find('multi-line')\n\n program_timeout = int(program_timeout.text) if program_timeout is not None else 5\n input_type = input_type.text if input_type is not None else 'text'\n case_sensitive = True if case_sensitive is not None and case_sensitive.text == 'true' else False\n validation_program = validation_program.text if validation_program is not None else None \n validation_program_info = validation_program_info.text if validation_program_info is not None else ''\n multi_line = True if multi_line is not None and multi_line.text == 'true' else False\n \n # Compile the program\n with file('compilation error.txt','w') as fp:\n ret=subprocess.call(['/bin/bash','compile.sh'],cwd=program_dir,\n stderr=fp,\n stdout=fp)\n if ret!=0:\n with file('compilation error.txt','r') as fp:\n error=fp.read()\n print error\n result[user].append('program *%s* [COMPILATION FAILED]' % programname)\n result[user].append(error)\n print \"==> Saving submission record in the DB, after compilation failure <==\"\n col_submissions.save({\n \"user_name\":user,\n \"program\":programname,\n \"program_result\":'COMPILATION FAILED',\n \"test_case_result\":[None,None,None],\n \"time\":time.time(),\n \"error\": error\n })\n continue\n \n # Execute the test cases\n p_pass=[]\n p_fail=[]\n p_error=[]\n \n # Hard_code_warning\n inputs_found = 0\n total_inputs = 0\n\n for input_i,output_o,description_d in inputoutput(programname):\n # Create the command to run. In case of file inputs, make\n # sure filenames are formatted for {pdir}\n run_cmd = ['/bin/bash','run.sh']\n additional_args = shlex.split(input_i)\n for each_arg in additional_args:\n null_file = file('/dev/null','w')\n is_found = subprocess.call('grep %s *' % each_arg, shell=True, cwd=program_dir, stdout=null_file, stderr=null_file)\n if is_found == 0:\n inputs_found += 1\n total_inputs += 1\n\n if input_type == 'filename':\n additional_args = [x.format(pdir=conf.program_dir[0:-1]) for x in additional_args]\n run_cmd.extend(additional_args)\n try:\n cmd_op = timed_execution.check_output_with_timeout(run_cmd, cwd=program_dir, timeout=program_timeout)\n cmd_op=cmd_op.strip()\n if not case_sensitive:\n cmd_op = cmd_op.lower()\n \n program_passed = False\n \n if validation_program is None:\n if not multi_line:\n program_passed = (cmd_op == output_o)\n else:\n expected_lines = sorted(x.strip() for x in output_o.split('\\n'))\n actual_lines = sorted(x.strip() for x in cmd_op.split('\\n'))\n expected_lines = filter(lambda x:x, expected_lines)\n actual_lines = filter(lambda x:x, actual_lines)\n if len(expected_lines) != len(actual_lines):\n print \"line count mismatch\"\n program_passed = False\n else:\n for el, al in zip(expected_lines, actual_lines):\n if el!=al:\n program_passed = False\n print \"line mismatch %s and %s\" % (el, al)\n break\n else:\n program_passed = True\n else:\n i_file = 'validation_program_inputs.json'\n with file(i_file, 'w') as fp:\n json.dump({'inputs': additional_args,\n 'output':cmd_op,\n 'info': validation_program_info}, fp)\n \n with file('validation_program_output.txt', 'w') as fp:\n prog_path = '{pdir}{pcode}'.format(pdir=conf.program_dir, pcode=validation_program)\n validation_result = subprocess.check_call(['python', prog_path, i_file])\n if validation_result == 0:\n program_passed = True\n else:\n program_passed = False\n \n if program_passed:\n p_pass.append('%s %s [successful]' %(description_d, programname))\n else:\n p_fail.append('%s %s [failed]' %(description_d, programname))\n if case_sensitive:\n p_fail.append('=============== Actual output =================')\n else:\n p_fail.append('======= Actual output (in lower case) =========')\n p_fail.append(cmd_op)\n p_fail.append('============== Expected output ================')\n p_fail.append(output_o)\n if input_type == 'filename':\n p_fail.append('======= Input Provided (file content) =========')\n else:\n p_fail.append('=========== Input Provided (args) =============')\n p_fail.append(input_i)\n p_fail.append('================================================')\n \n except Exception as ex:\n p_error.append('%s %s [error]' %(description_d, programname))\n p_error.append(str(ex))\n\n if len(p_pass) == 0:\n result[user].append('program *%s* [FAIL]' % programname)\n progstatus = 'FAIL'\n your_score = 0\n elif len(p_fail)+len(p_error)==0:\n result[user].append('program *%s* [SUCCESSFUL]' % programname)\n progstatus = 'SUCCESSFUL'\n your_score = program_score\n else:\n result[user].append('program *%s* [PARTIALLY SUCCESSFUL]' % programname)\n progstatus = 'PARTIALLY SUCCESSFUL'\n partial = root.find('partial')\n if partial and partial.text == 'true':\n your_score = (program_score * len(p_pass)) / (len(p_pass) + len(p_fail) + len(p_error))\n else:\n your_score = 0\n \n if (inputs_found*100)/total_inputs > 25:\n your_score = 0\n result[user].append('=================== WARNING for program %s =====================' % program_name)\n result[user].append(\"Too may inputs found in the directory\")\n result[user].append(\"If this is not intentional clean up your directory and remove hard coded inputs\")\n result[user].append(\"================================================================================\")\n \n result[user].extend(p_pass)\n result[user].extend(p_fail)\n result[user].extend(p_error)\n \n print \"==> Saving submission record in the DB, after execution <==\"\n col_submissions.save({\n \"user_name\":user,\n \"program\":programname,\n \"program_result\":progstatus,\n \"test_case_result\":[p_pass,p_fail,p_error],\n \"time\":time.time()\n })\n \n # If the user gets some score, update the score collection with latest\n # information\n if your_score:\n current_score = col_scores.find_one({'user_name':user})\n if not current_score:\n current_score = {\n 'user_name': user,\n 'programs': {}\n }\n current_score['programs'][programname] = {\n 'status': progstatus,\n 'score': your_score\n }\n col_scores.save(current_score)\n progs = current_score['programs']\n total_score = sum(progs[x]['score'] for x in progs)\n result[user].insert(0, \"=======================================\")\n result[user].insert(0, \"YOUR NEW SCORE IS %s\" % str(total_score))\n \n # Send the results to all users who submissions are found in this round\n for user in result:\n subject = \"Result of latest submission for %s. \" % user\n content = \"\\n\".join(result[user])\n mailer.feedbackmail(user,subject, content)", "def getRelevantIssues(db, data):\n last_updated = data.get('updated', None)\n query = {'$and': [\n {'jira.fields.issuetype.name': {'$nin': ['Tracking']}},\n {'jira.fields.project.key': {'$in': ['CS', 'MMSSUPPORT', 'SUPPORT',\n 'PARTNER']}},\n ]\n }\n\n if last_updated is None:\n # Only filter the first time, since we want to know if issues on the\n # dashboard have closed\n query['$and'].append({'jira.fields.status.name': {\n '$in': ['Open', 'Reopened', 'In Progress',\n 'Waiting for Customer', 'Waiting For User Input']}\n }\n )\n else:\n query[\"$and\"].append({\"jira.fields.updated\": {\n \"$gte\": last_updated\n }\n })\n\n # Only need these fields for determining if they belong, and displaying\n # them on the dashboard\n proj = {'_id': 0,\n 'dash.active.now': 1,\n 'deleted': 1,\n 'jira.fields.assignee': 1,\n 'jira.fields.created': 1,\n 'jira.fields.issuetype': 1,\n 'jira.fields.labels': 1,\n 'jira.fields.priority.id': 1,\n 'jira.fields.reporter': 1,\n 'jira.fields.status': 1,\n 'jira.fields.updated': 1,\n 'jira.fields.comment.comments.author.emailAddress': 1,\n 'jira.fields.comment.comments.created': 1,\n 'jira.fields.comment.comments.updated': 1,\n 'jira.fields.comment.comments.visibility': 1,\n 'jira.key': 1,\n 'jira.tags': 1,\n 'sla': 1,\n }\n cur = db.issues.find(query, proj)\n cur.batch_size(100000)\n return cur", "def received(self, data):\n try:\n has_wanted = hasattr(self, 'wanted_ticket_ids')\n for bug in self.parse(data):\n if has_wanted and bug.id not in self.wanted_ticket_ids:\n continue # manually skip unwanted tickets\n self.bugs[bug.id] = bug\n except BaseException, e:\n EXCEPTION(u\"Could not parse tracker response\")\n self.fail(e)\n else:\n self.success()", "def collect_pipeline_runs(self):\n db = self.mongo_client.metalearning\n collection = db.pipeline_runs\n collection_size = collection.count()\n pipeline_cursor = collection.find()\n list_of_experiments = {\"classification\": [], \"regression\": []}\n for index, pipeline_run in enumerate(pipeline_cursor):\n if index % 1000 == 0:\n print(\"At {} out of {} documents\".format(index, collection_size))\n # if index == 2000:\n # # running into memory errors\n # break\n pipeline_run_info = self.get_pipeline_run_info(pipeline_run)\n metafeatures = self.get_metafeature_info(pipeline_run)\n # TODO: get all metafeatures so we don't need this\n if metafeatures != {}:\n experiment_json = dict(pipeline_run_info, **metafeatures)\n list_of_experiments[experiment_json[\"problem_type\"]].append(experiment_json)\n\n for problem_type in list_of_experiments.keys():\n final_data_file = json.dumps(list_of_experiments[problem_type], sort_keys=True, indent=4, default=json_util.default)\n with open(\"data/complete_pipelines_and_metafeatures_test_{}.json\".format(problem_type), \"w\") as file:\n file.write(final_data_file)\n\n return", "def temp_fix():\n import os\n from dateutil.parser import parse\n from gather_data import read_df_from_file, get_dataframe_pickle_files\n\n df_pick_files = get_dataframe_pickle_files(df_pickle_dir='/Users/ken/Downloads')\n for f in df_pick_files:\n t1 = parse_date_from_filename(f)\n ee_stats = read_df_from_file(f)\n fname = '/Users/ken/Downloads/ee_stats_' + t1.strftime('%Y-%m-%d') + '.pkl'\n print 'saving to %s' % fname,\n with open(fname, 'wb') as fh:\n pickle.dump(ee_stats, fh, protocol=pickle.HIGHEST_PROTOCOL)\n print 'done'", "def main():\n\t\tn = 0 \n\t\tfor page in range(pages):\n\t\t\t\tpageNumber = str(page + 1)\n\t\t\t\tprint \"Processing page number \" + pageNumber\n\t\t\t\tpageUrl = 'https://api.github.com/users/' + USER + '/gists?page=' + pageNumber + '&per_page=' + str(int(perpage))\n\t\t\t\tu = urlopen (pageUrl)\n\t\t\t\tgists = json.load(u)\n\t\t\t\t\t\t \n\t\t\t\tfor gist in gists:\n\t\t\t\t\t\tn += 1\n\t\t\t\t\t\tprint \"==== %d ====\" % n\n\t\t\t\t\t\t# print gist.keys()\n\t\t\t\t\t\tgistd = gist['id']\n\t\t\t\t\t\tgisturl = gist['html_url']\n\t\t\t\t\t\tgistdesc = gist['description'] or gistd\n\t\t\t\t\t\tgistfiles = gist['files']\n\t\t\t\t\t\tprint \"gistd: \", gistd\n\t\t\t\t\t\tprint \"gisturl: \", gisturl\n\t\t\t\t\t\tprint \"gistdesc: \", gistdesc\n\t\t\t\t\t\tprint \"gistfiles: \", len(gistfiles)\n\t\t\t\t\t\tfor f in gistfiles:\n\t\t\t\t\t\t\t\tfileurl = gistfiles[f]['raw_url']\n\t\t\t\t\t\t\t\t_filetype = gistfiles[f]['language']\n\t\t\t\t\t\t\t\tif _filetype in ALLOWED_FILE_TYPES:\n\t\t\t\t\t\t\t\t\t\tfiletype = _filetype\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tfiletype = \"None\"\n\t\t\t\t\t\t\t\tprint \"fileurl: \", fileurl \n\t\t\t\t\t\t\t\tprint \"filetype: \", filetype, \"(found='%s')\" % _filetype \n\t\t\t\t\t \n\t\t\t\t\t\t\t\tif TESTING:\n\t\t\t\t\t\t\t\t\t\t# testing\n\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\t\t\tprint e\n\t\t\t\t\t\t\t\t\t\t\t\tprint \"*** ERROR WRITING TO sqlite3 ***\"\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\n\t\t\t\tif TESTING:\n\t\t\t\t\t\t# so to avoid calling github API too much...\n\t\t\t\t\t\tbreak" ]
[ "0.6098931", "0.581118", "0.58095926", "0.5599205", "0.55810404", "0.55514055", "0.5436911", "0.5393213", "0.5392565", "0.52618045", "0.5231558", "0.52075624", "0.519109", "0.514051", "0.5135371", "0.5125481", "0.5122285", "0.50928766", "0.50890225", "0.50798535", "0.50633335", "0.5046962", "0.50446755", "0.5044202", "0.5043592", "0.5025236", "0.5025231", "0.5024478", "0.50176877", "0.5013452", "0.50071526", "0.5003553", "0.4959374", "0.4956608", "0.49319378", "0.4921373", "0.4919737", "0.4917041", "0.490543", "0.4891044", "0.48646104", "0.48575196", "0.48494437", "0.48410088", "0.4837259", "0.48172444", "0.48136", "0.48040375", "0.47939235", "0.47933844", "0.47844607", "0.47791222", "0.4770006", "0.4764557", "0.47612804", "0.47537613", "0.47502515", "0.4744882", "0.47310898", "0.47096658", "0.47080168", "0.47048402", "0.4697699", "0.46955904", "0.46945596", "0.4693657", "0.46876943", "0.46807113", "0.4679788", "0.46700647", "0.4667543", "0.4662879", "0.46591723", "0.46591398", "0.465408", "0.46519884", "0.46408334", "0.46384344", "0.4637989", "0.4637568", "0.46367916", "0.46365815", "0.4628012", "0.46241263", "0.46233988", "0.4613312", "0.46112585", "0.4605221", "0.46040443", "0.4602579", "0.46001655", "0.4596707", "0.45924383", "0.4591785", "0.4582609", "0.45824575", "0.4580904", "0.45762974", "0.45669708", "0.4566113" ]
0.7346313
0
Crawls for all comments belonging to the bugs in the BugIDList.
def get_all_comments(self, idList: Union[List, str]) -> None: #loads pickle list if it is one if type(idList) == str and ".pickle" in idList: print("pickle load") with open(idList, "rb") as f: idList = pickle.load(f) elif type(idList) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #goes through idList for id in tqdm(idList): #performs request and replaces trouble some parts commentsString = self.session.get(self.commentURL.format(id)).text.\ replace('true', 'True').replace('false', 'False').replace('null', 'None') #gets only the comments commentsDict = ast.literal_eval(commentsString)["bugs"][str(id)]["comments"] #enters comments into db or file if there are any comments for the id if commentsDict: if self.mongoDB: self.mongoDB["Comments"].insert_many(commentsDict) if self.folder: with open(self.folderpath + "Bugzilla_Comments.txt", 'a') as f: f.write(str(commentsDict) + "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result list is empty\n while notEmpty:\n print(\"entered\")\n #interpretation of result as list plus formatting for eval errors\n result = ast.literal_eval(self.session.get(self.bugURL + \"&offset=\" + str(offset)).text.\n replace('true', 'True').replace('false', 'False').replace('null', 'None'))[\"bugs\"]\n #checks if the query needs to be set again with a new offset\n if result:\n resultBugList += result\n else:\n notEmpty = False\n\n #gets the ID out of all comments\n partList = [bug[\"id\"] for bug in result]\n bugIDList += partList\n #sets new starting point\n offset += 500\n\n #inserts bug ids and bugs into db if given one\n if self.mongoDB:\n for id in bugIDList:\n self.mongoDB[\"BugIDs\"].insert_one({\"ID\": id})\n self.mongoDB[\"BugsData\"].insert_many(resultBugList)\n\n #creates files for bug ids and bugs if given a folder\n if self.folder:\n #saves bug list as python object\n with open(self.folderpath + \"bugIDListP.pickle\", \"wb\") as a:\n pickle.dump(bugIDList, a)\n #saves bug list as csv\n with open(self.folderpath + \"bugIDList.csv\", \"w\") as b:\n for id in bugIDList:\n b.write(str(id) + \"\\n\")\n with open(self.folderpath + \"bugsData.txt\", \"w\") as c:\n for bug in resultBugList:\n c.write(str(bug) + \"\\n\")\n\n #returns List Object for further processing\n return(bugIDList)", "def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments", "async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all", "def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']", "def main(u, o):\n click.echo(f\"Web crawling on {u} started successfully...\")\n\n comment_regex = re.compile('<!--(.*?-->)')\n\n with requests.Session() as session:\n resp = session.get(u)\n soup = BeautifulSoup(resp.text, 'lxml')\n #TODO: search for hidden attributes, may be useful\n comments = soup.find_all(text=comment_regex)\n print(comments)", "def test_issue_get_comments(self):\n pass", "def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()", "def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)", "def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()", "def all_comments_by_docket_id(docket_id,\n sort_by='postedDate', sort_order='ASC'):\n # Determine total number of public submissions in docket.\n params = {'docket_id': docket_id, 'document_type': 'PS'}\n total_records = RegulationDocumentSearch.number_of_records(**params)\n\n # Use the maximum page size to download all public submissions.\n documents = []\n for page in range(total_records // 1000 + 1):\n parameters = {\n 'docket_id': docket_id,\n 'document_type': 'PS',\n 'results_per_page': 1000,\n 'offset': page * 1000,\n 'sort_by': sort_by,\n 'sort_order': sort_order\n }\n response = RegulationDocumentSearch.by_docket_id(**parameters)\n documents.extend(response['documents'])\n\n return documents", "def _commentsInThisFunction(self):\n show_unique_c = self.config.display_unique_comments\n\n msg = \"Searching comments within function '\" + misc.get_function_name() + \"'\"\n self._console_output(msg)\n\n comment_list = self.ba.comments_in_function()\n\n # Found any comment at all?\n nrows = len(comment_list)\n if not nrows:\n self._console_output(\"[!] No comments found\", err = True)\n return\n\n self.table.setColumnCount(2)\n self.table_label.setText(\"Comments within current function\")\n self.table.setHorizontalHeaderLabels((\"Address\", \"Comments\"))\n self.table.clearContents()\n self.table.setRowCount(0)\n\n # Fill with contents\n displayed_comments = []\n\n idx = 0\n for (addr, comment) in comment_list:\n if show_unique_c and comment in displayed_comments:\n continue\n\n displayed_comments.append(comment)\n\n self.table.insertRow(idx)\n addr_item = QTableWidgetItem(\"%08x\" % addr)\n addr_item.setFlags(addr_item.flags() ^ QtCore.Qt.ItemIsEditable)\n comment_item = QTableWidgetItem(comment)\n\n self.table.setItem(idx, 0, addr_item)\n self.table.setItem(idx, 1, comment_item)\n\n idx += 0", "def tick(self):\n\n # Get new comments from /r/all\n print('\\n\\nRetrieving comments...', end=\"\")\n comments = list(self.reddit.get_comments('all', limit=None))\n print('[DONE]')\n\n comment_count = comments.__len__()\n print('Comments to read: ' + str(comment_count))\n for i in range(0, comment_count):\n comment = comments[i]\n\n # Update percent counter\n pcent = i / float(comment_count) * 100\n print('\\rReading comments: [%d%%]' % pcent, end=\"\")\n time.sleep(0.1)\n\n # Parse words\n words = comment.body.split()\n permalink = None\n for word in words:\n if word.startswith('/u/'):\n\n # Get the redditor\n redditor = self.parse_redditor(word)\n if redditor is None:\n continue\n\n # Check to see if we've parsed this comment already\n permalink = comment.permalink\n if permalink in self.already_done:\n print('Comment was already read.')\n break\n\n # Notify the mentioned redditor\n self.notify('comment', redditor, permalink, comment.body, comment.author.name)\n self.record_mention(redditor.name, 'comment')\n\n # permalink will not be None if a user was notified\n if permalink is not None:\n self.already_done.append(permalink)\n\n # Wait 30 seconds\n print('')\n util.wait(30)", "def comments(self, q=None, sort=None):\n params = {}\n if sort is not None:\n params[\"sort\"] = sort\n if q is not None:\n params[\"q\"] = q\n for comment in self._get_paged(\"comments\", params=params):\n yield Comment(comment, **self._new_session_args)", "def get_all_comments_mp(self, list: Union[List, str], workers: int = 10) -> None:\n # loads pickle list if it is one\n if type(list) == str and \".pickle\" in list:\n print(\"wat\")\n with open(list, \"rb\") as f:\n list = pickle.load(f)\n elif type(list) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #gets workers and splits list into chunks fitting the worker amount\n pool = Pool(workers)\n list = np.array(list)\n lists = np.array_split(list, workers)\n\n #each worker crawls for comments\n for sub_list in lists:\n print(sub_list)\n pool.apply_async(self.get_all_comments, (sub_list,))\n\n pool.close()\n pool.join()", "def fetch_comments(item):\n # pylint: disable=R0912\n # pylint: disable=R0914\n cw, ch, _ = getxy()\n ch = max(ch, 10)\n ytid, title = item.ytid, item.title\n dbg(\"Fetching comments for %s\", c.c(\"y\", ytid))\n writestatus(\"Fetching comments for %s\" % c.c(\"y\", title[:55]))\n qs = {'textFormat': 'plainText',\n 'videoId': ytid,\n 'maxResults': 50,\n 'part': 'snippet'}\n\n # XXX should comment threads be expanded? this would require\n # additional requests for comments responding on top level comments\n\n jsdata = call_gdata('commentThreads', qs)\n\n coms = jsdata.get('items', [])\n coms = [x.get('snippet', {}) for x in coms]\n coms = [x.get('topLevelComment', {}) for x in coms]\n # skip blanks\n coms = [x for x in coms if len(x.get('snippet', {}).get('textDisplay', '').strip())]\n if not len(coms):\n g.message = \"No comments for %s\" % item.title[:50]\n g.content = generate_songlist_display()\n return\n\n items = []\n\n for n, com in enumerate(coms, 1):\n snippet = com.get('snippet', {})\n poster = snippet.get('authorDisplayName')\n _, shortdate = yt_datetime(snippet.get('publishedAt', ''))\n text = snippet.get('textDisplay', '')\n cid = (\"%s/%s\" % (n, len(coms)))\n out = (\"%s %-35s %s\\n\" % (cid, c.c(\"g\", poster), shortdate))\n out += c.c(\"y\", text.strip())\n items.append(out)\n\n cw = Config.CONSOLE_WIDTH.get\n\n def plain(x):\n \"\"\" Remove formatting. \"\"\"\n return x.replace(c.y, \"\").replace(c.w, \"\").replace(c.g, \"\")\n\n def linecount(x):\n \"\"\" Return number of newlines. \"\"\"\n return sum(1 for char in x if char == \"\\n\")\n\n def longlines(x):\n \"\"\" Return number of oversized lines. \"\"\"\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))\n\n def linecounter(x):\n \"\"\" Return amount of space required. \"\"\"\n return linecount(x) + longlines(x)\n\n pagenum = 0\n pages = paginate(items, pagesize=ch, delim_fn=linecounter)\n\n while 0 <= pagenum < len(pages):\n pagecounter = \"Page %s/%s\" % (pagenum + 1, len(pages))\n page = pages[pagenum]\n pagetext = (\"\\n\\n\".join(page)).strip()\n content_length = linecount(pagetext) + longlines(pagetext)\n blanks = \"\\n\" * (-2 + ch - content_length)\n g.content = pagetext + blanks\n screen_update(fill_blank=False)\n xprint(\"%s : Use [Enter] for next, [p] for previous, [q] to return:\"\n % pagecounter, end=\"\")\n v = input()\n\n if v == \"p\":\n pagenum -= 1\n\n elif not v:\n pagenum += 1\n\n else:\n break\n\n g.content = generate_songlist_display()", "def check_comments():\n\n # Get the id of the group track\n try:\n group_track = soundcloud.get('/me/tracks')[config.post_track_id]\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)\n sys.exit(1)\n else:\n raise\n\n # Get the comment list for the group track\n comments = soundcloud.get('/tracks/%d/comments' % group_track.id)\n if not comments:\n logging.info('Nothing found...')\n return\n \n # Process each comment and delete it\n for comment in reversed(comments): \n logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)\n response = None\n \n # Try to process the comment\n try:\n response = process_comment(comment)\n except HTTPError as e:\n if e.response.status_code == 429:\n logging.exception('Failed to repost track: too many requests:')\n return\n elif e.response.status_code // 100 == 4:\n logging.exception('Failed to process comment due to a client request error:')\n else:\n raise\n except Exception as e: # Program crash\n logging.exception('Failed to process comment:')\n else:\n if response:\n logging.info('The comment would have this response: %s', response) \n else:\n logging.info('Comment processed successfully')\n \n # Delete the processed comment\n try:\n soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.warning('Comment already deleted')\n else:\n raise\n\n if config.use_advanced_description and should_update_description:\n update_description()", "def problem_comments(self, identifier):\n return self._get(\"problems/%d/comments\" % identifier).json()", "def get_comments(self, project_id, forum_id, param=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/forums/' + str(forum_id) + '/comments/'\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_comments(response)", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def test_get_comments_from_submission():\n # gets a test submission\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n submission_id = threads[0].d_['id']\n\n # prints link to thread\n thread_full_link = threads[0].d_['full_link']\n print(thread_full_link)\n\n # prints submission title\n thread_title = threads[0].d_['title']\n print(thread_title)\n\n submission = get_comments_from_submission(submission_id)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)", "def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1", "def watch2():\n\tcomments = r.get_comments('all', limit=None)\n\tfor comment in comments:\n\t\tif comment in visited:\n\t\t\tcontinue\n\t\telse:\n\t\t\tvisited[comment] = 1\n\t\t\tif \"LexiconBot define\" in comment.body:\n\t\t\t\tprint comment, \"from\", comment.permalink, \" / \", comment.submission\n\t\t\t\tmsg = define(comment.body.split()[2])\n\t\t\t\tcomment.reply(msg)\n\n\tprint \"Sleeping...\"\n\tsleep(1)", "def get_comments_from_submission_id(submission_id):\n flat_comments = []\n tree_comments = []\n\n submission = (REDDIT.submission(id=submission_id))\n print(submission.num_comments)\n print(submission.shortlink)\n\n # sort comments by best and get the flattened list\n submission.comment_sort = 'confidence'\n\n # tree comments traversal\n submission.comments.replace_more(limit=1)\n for comm in submission.comments.list():\n tree_comments.append(comm)\n\n flat_comments = list(submission.comments)\n\n return flat_comments, tree_comments", "def all_user_comments(username):\n return commentslist", "def get_comments(self):\n raise NotImplementedError", "def comments_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"comments\", access_token)", "def get_comments(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/comments\".format(project.id,\n story.id)\n params = {\"fields\": Comment.FIELDS}\n comments = self._request(\"get\", resource, params=params)\n\n for comment in comments:\n ret_val.append(Comment(comment))\n\n return ret_val", "def get_comments(convo_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/comments\"\n payload = {}\n headers = {\"Authorization\": BEARER_TOKEN}\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n for comment in response.json()[\"_results\"]:\n # For each comment in Front, print out its message\n print_friendly_JSON_object(comment[\"body\"])", "def run(self):\n comment_df_list = []\n post_df_list = []\n subreddit_df_list = []\n\n reddit = sr.reddit_interface()\n subreddits = reddit.subreddits.popular(limit = SUBREDDIT_LIMIT) # Lists the top 50 subreddits\n\n for subreddit in subreddits:\n top_posts = reddit.subreddit(str(subreddit)).top()\n for post in top_posts:\n if not post.stickied:\n post_list = [post.id, str(post.subreddit), post.title, post.num_comments]\n post.comments.replace_more(limit = 0)\n for comment in post.comments.list():\n comment_list = [str(comment.parent()), comment.id, comment.body, int(comment.score)]\n comment_df_list.append(comment_list)\n post_df_list.append(post_list)\n subreddit_df_list.append([str(subreddit)])\n\n comment_df_list = pd.DataFrame(comment_df_list, columns = COMMENTS_COLUMNS)\n post_df_list = pd.DataFrame(post_df_list, columns = POSTS_COLUMNS)\n subreddit_df_list = pd.DataFrame(subreddit_df_list, columns =['Subreddit'])\n reddit_df = [subreddit_df_list, post_df_list, comment_df_list]\n sr.save_xlsx(reddit_df, self.output().path)", "def get_comments(self):\n\t\tself.comments = graph.get_connections(post['id'], 'comments')", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def comments(accountable):\n comments = accountable.issue_comments()\n headers = sorted(['author_name', 'body', 'updated'])\n\n if comments:\n rows = [[v for k, v in sorted(c.items()) if k in headers]\n for c in comments]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho('No comments found for {}'.format(\n accountable.issue_key\n ), fg='red')", "def comment_data(post_id: str, \n sub_reddit: str):\n url_to_open = f\"https://www.reddit.com/r/{sub_reddit}/comments/{post_id}.json\"\n success_status = 0\n while success_status != 200:\n try:\n response = urlopen(url_to_open, timeout=10)\n success_status = response.status\n except HTTPError:\n logging.info(f\"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.\")\n time.sleep(120)\n success_status = 400\n \n sub_reddit_page = json.loads(response.read())\n comments_df = pd.json_normalize(sub_reddit_page[1]['data']['children'])\n comments_df['post_id'] = post_id\n comments_df = comments_df[['post_id', 'data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n comments_df = comments_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n comments_df['reply'] = 'N'\n comments_df['comment_replied_id'] = ''\n # get all replies \n replies_list = []\n for comment in sub_reddit_page[1]['data']['children']:\n replies = comment.get('data').get('replies')\n comment_id = comment.get('data').get('id') \n if replies is None or replies == '':\n pass\n else:\n replies_df = pd.json_normalize(replies['data']['children'])\n try:\n replies_df = replies_df[['data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n except KeyError:\n pass\n replies_df = replies_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n replies_df['reply'] = 'Y'\n replies_df['comment_replied_id'] = comment_id\n replies_df['post_id'] = post_id\n replies_list.append(replies_df)\n if len(replies_list) == 1:\n all_replies = replies_list[0]\n elif len(replies_list) > 1: \n all_replies = pd.concat(replies_list, ignore_index = True)\n else:\n all_replies = None \n\n column_order = [c for c in comments_df.columns]\n comments_df = comments_df[column_order]\n if all_replies is not None:\n all_replies = all_replies[column_order]\n all_comments_replies = pd.concat([comments_df, replies_df], ignore_index=True)\n else:\n all_comments_replies = comments_df\n\n return all_comments_replies", "def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)", "def get_comments(self, sort, time):\r\n from r2.models import Comment\r\n return self.get_links(sort, time, Comment)", "def get_comment(self, index):\r\n\r\n # Get request to get all the comments for all exercises\r\n comments = requests.get(API.url_comment, headers = self.headers).json()\r\n # Parse the response\r\n for my_comment in comments:\r\n if my_comment['id'] == index:\r\n print(my_comment['comment'])", "def comments(self, limit=100, all=False):\n source, edge = self.id, \"comments\"\n return lazygen(Comment, source, edge,\n limit=limit, get_all=all)", "def get_comments_between(self, start_date, end_date):\n ret = []\n ids = self.get_post_ids(start_date, end_date)\n\n for id in ids:\n comments = self.reddit.submission(id).comments\n ret.append(self.get_nested_comments(comments))\n return ret", "def get_comments(self, resp):\n comments = CommentList()\n for value in resp['comments']:\n comment = Comment()\n comment.set_comment_id(value['comment_id'])\n comment.set_expense_id(value['expense_id'])\n comment.set_description(value['description'])\n comment.set_commented_by_id(value['commented_by_id'])\n comment.set_commented_by(value['commented_by'])\n comment.set_date(value['date'])\n comment.set_date_description(value['date_description'])\n comment.set_time(value['time'])\n comment.set_operation_type(value['operation_type'])\n comment.set_transaction_id(value['transaction_id'])\n comment.set_transaction_type(value['transaction_type'])\n comments.set_comments(comment)\n return comments", "def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments", "def test_projects_id_comments_get(self):\n response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def get_comments_by_ids(self, comment_ids):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_ids\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('commenting',\n collection='Comment',\n runtime=self._runtime)\n object_id_list = []\n for i in comment_ids:\n object_id_list.append(ObjectId(self._get_id(i, 'commenting').get_identifier()))\n result = collection.find(\n dict({'_id': {'$in': object_id_list}},\n **self._view_filter()))\n result = list(result)\n sorted_result = []\n for object_id in object_id_list:\n for object_map in result:\n if object_map['_id'] == object_id:\n sorted_result.append(object_map)\n break\n return objects.CommentList(sorted_result, runtime=self._runtime, proxy=self._proxy)", "def get_bugs(self, year):\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n for bug in helpers.load_json(path):\n yield bug", "def analyze_comments():\n\n scores = {} # {docket_id: [comment1_score, comment2_score, ...]}\n positive_counts = {} # {docket_id: num_positive_comments}\n neutral_counts = {} # {docket_id: num_neutral_comments}\n negative_counts = {} # {docket_id: num_negative_comments}\n\n comment_sentiments = {} # {comment_id: sentiment} to write to database\n comment_complexity = {} # {comment_id: complexity} to write to database\n\n for comment in lib.mongo.retrieve_comments(1000):\n docket_id = comment['docketId']\n comment_id = comment['documentId']\n text = comment.get('commentText', '').strip()\n\n # Fill in the 'sentiment' field of this comment.\n if 'sentiment' in comment:\n score = comment['sentiment']\n else:\n score = lib.analyze_text.getSentiment(text)\n comment_sentiments[comment_id] = score\n\n logging.info('docket %s, comment %s: sentiment %s (%r)' %\n (docket_id, comment_id, score, text[:20]))\n\n # Fill in the 'complexity' field of this comment.\n if 'complexity' not in comment:\n comment_complexity[comment_id] = lib.analyze_text.get_complexity(text)\n\n # Aggregate the sentiment scores for each docket.\n scores.setdefault(docket_id, []).append(score)\n counts = positive_counts if score > 0 else (\n negative_counts if score < 0 else neutral_counts)\n counts[docket_id] = counts.get(docket_id, 0) + 1\n\n if len(comment_sentiments) >= 10:\n logging.info('updating %d comments sentiment...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n comment_sentiments = {}\n\n if len(comment_complexity) >= 10:\n logging.info('updating %d comments complexity...' % len(comment_complexity))\n lib.mongo.update_comments('complexity', comment_complexity)\n comment_complexity = {}\n\n logging.info('updating %d comments...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n lib.mongo.update_comments('complexity', comment_complexity)\n logging.info('done!')\n\n docket_sentiments = {} # {docket_id: sentiment} to write to database\n\n for docket in lib.mongo.dockets.find():\n docket_id = docket.get('docketId', '')\n positive_count = positive_counts.get(docket_id, 0)\n neutral_count = neutral_counts.get(docket_id, 0)\n negative_count = negative_counts.get(docket_id, 0)\n rating = compute_rating(positive_count, neutral_count, negative_count)\n logging.info('docket %s: %d positive, %d neutral, %d negative - %s' %\n (docket_id, positive_count, neutral_count, negative_count,\n rating))\n\n docket_sentiments[docket_id] = {\n 'positive': positive_count,\n 'neutral': neutral_count,\n 'negative': negative_count,\n 'rating': rating\n }\n\n logging.info('updating %d dockets...' % len(docket_sentiments))\n lib.mongo.update_dockets('sentiment', docket_sentiments)\n logging.info('done!')", "def comments(self):\n return self.get_queryset().filter(content_type__model='comment').order_by('-comments__createdAt')", "def get_comments(self):\n\t\treturn self._client.get_comments(self)", "def load_comments_by_post_id(self, id):\n comments = self.session.query(Comment).filter(Comment.post_id == id).all()\n return comments", "def _apply_comment(self, iid, comment):\n data = {\"body\" : comment._body}\n resp = self._post(\n self._base + \"/issues/{}/comments\".format(iid),\n data=self._format_data(data))", "def comment(self, *comments):\n for comment in comments:\n self._p('[*]', comment)", "def dfs(comment, fun):\n # comment has no replies\n if not comment.replies:\n return\n else:\n for r in comment.replies:\n # do something with a comment here\n fun(r)\n # recurr\n Comment.dfs(r, fun)", "def comment_issue(self, msg, issue_id, comment):\n self._asset_bind(msg)\n client = self._github_operator(msg)\n comment_obj = client.issue_comment(task_repository_name(), issue_id, comment)\n yield comment_obj.html_url", "def return_filtered_comments(submission):\n submission.comment_sort = COMMENT_SORT_BY\n submission.comment_limit = COMMENT_LIMIT\n filtered_comments = []\n for top_level_comment in submission.comments:\n if isinstance(top_level_comment, praw.models.MoreComments):\n continue\n # Here you can fetch data off the comment.\n comment = top_level_comment.body\n\n # ensure that the comment does not contain any words in blacklist\n # and also it is less than COMMENT_MAX_WORDS\n fail_test = 0\n lcomment = comment.lower()\n for badword in blacklist:\n if badword not in lcomment and len(comment) < COMMENT_MAX_WORDS:\n pass\n else:\n fail_test += 1\n if not fail_test:\n filtered_comments.append(replace_words(comment).capitalize())\n\n return filtered_comments", "def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)", "def comments(self):\r\n return comments.ForumSuggestionComments(self)", "def comments(self):\r\n return c.Comments(self)", "def comments(self):\r\n return c.Comments(self)", "def comments(self):\r\n return c.Comments(self)", "def GET_comments(self, article, comment, context, sort, num_comments):\r\n if comment and comment.link_id != article._id:\r\n return self.abort404()\r\n\r\n if not c.default_sr and c.site._id != article.sr_id:\r\n return self.redirect(article.make_permalink_slow(), 301)\r\n\r\n # moderator is either reddit's moderator or an admin\r\n is_moderator = c.user_is_loggedin and c.site.is_moderator(c.user) or c.user_is_admin\r\n if article._spam and not is_moderator:\r\n return self.abort404()\r\n\r\n if not article.subreddit_slow.can_view(c.user):\r\n abort(403, 'forbidden')\r\n\r\n #check for 304\r\n self.check_modified(article, 'comments')\r\n\r\n # if there is a focal comment, communicate down to comment_skeleton.html who\r\n # that will be\r\n if comment:\r\n c.focal_comment = comment._id36\r\n\r\n # check if we just came from the submit page\r\n infotext = None\r\n if request.get.get('already_submitted'):\r\n infotext = strings.already_submitted % article.resubmit_link()\r\n\r\n check_cheating('comments')\r\n\r\n # figure out number to show based on the menu\r\n user_num = c.user.pref_num_comments or g.num_comments\r\n num = g.max_comments if num_comments == 'true' else user_num\r\n\r\n # Override sort if the link has a default set\r\n if hasattr(article, 'comment_sort_order'):\r\n sort = article.comment_sort_order\r\n\r\n builder = CommentBuilder(article, CommentSortMenu.operator(sort),\r\n comment, context)\r\n listing = NestedListing(builder, num = num,\r\n parent_name = article._fullname)\r\n\r\n displayPane = PaneStack()\r\n\r\n # if permalink page, add that message first to the content\r\n if comment:\r\n permamessage = PermalinkMessage(\r\n comment.make_anchored_permalink(\r\n context = context + 1 if context else 1,\r\n anchor = 'comments'\r\n ),\r\n has_more_comments = hasattr(comment, 'parent_id')\r\n )\r\n displayPane.append(permamessage)\r\n\r\n # insert reply box only for logged in user\r\n if c.user_is_loggedin and article.subreddit_slow.can_comment(c.user):\r\n displayPane.append(CommentReplyBox())\r\n #no comment box for permalinks\r\n if not comment:\r\n displayPane.append(CommentReplyBox(link_name =\r\n article._fullname))\r\n # finally add the comment listing\r\n displayPane.append(listing.listing())\r\n\r\n loc = None if c.focal_comment or context is not None else 'comments'\r\n\r\n if article.comments_enabled:\r\n sort_menu = CommentSortMenu(default = sort, type='dropdown2')\r\n if hasattr(article, 'comment_sort_order'):\r\n sort_menu.enabled = False\r\n nav_menus = [sort_menu,\r\n NumCommentsMenu(article.num_comments,\r\n default=num_comments)]\r\n\r\n content = CommentListing(\r\n content = displayPane,\r\n num_comments = article.num_comments,\r\n nav_menus = nav_menus,\r\n )\r\n else:\r\n content = PaneStack()\r\n\r\n is_canonical = article.canonical_url.endswith(_force_unicode(request.path)) and not request.GET\r\n\r\n res = LinkInfoPage(link = article, comment = comment,\r\n content = content,\r\n infotext = infotext,\r\n is_canonical = is_canonical).render()\r\n\r\n if c.user_is_loggedin:\r\n article._click(c.user)\r\n\r\n return res", "def get(self, id):\n return get_comments(id)", "def handle_free_comments(self):\r\n comments = FreeComment.objects.all()\r\n for c in comments:\r\n new = FreeThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n name = c.person_name,\r\n website = '',\r\n email = '',\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = c.approved\r\n )\r\n new.save()", "def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())", "def _comment():\r\n id = request.args.get('answer_id')\r\n per_page=current_app.config['FLASKY_ANSWERS_PER_PAGE']\r\n answer = Answer.query.get_or_404(id)\r\n page = request.args.get('page', type=int, default=1)\r\n comment =request.args.get('comment')\r\n if current_user.can(Permission.COMMENT) and comment is not None:\r\n comment = Comment(body=comment,\r\n author=current_user._get_current_object(),\r\n answer_id=id)\r\n db.session.add(comment)\r\n db.session.commit()\r\n page = -1\r\n if page == -1:\r\n page = answer.comments.count() / per_page\r\n pagination = Comment.query.order_by(Comment.timestamp).filter_by(answer_id=id).paginate(\r\n page,per_page=per_page,error_out=False\r\n )\r\n macro_comment = get_template_attribute(\"_comments.html\", \"render_comments\")\r\n macro_page = get_template_attribute(\"_page.html\", \"render_page\")\r\n comments = pagination.items\r\n return jsonify({'result': True,\r\n 'comment_html': macro_comment(comments),\r\n 'page_html':macro_page(pagination),\r\n 'comments_timestamp':[comment.timestamp for comment in comments],\r\n 'comments_id':[comment.id for comment in comments]\r\n })", "def search_thru_comments(urls, listOfKWs):\n browser = webdriver.Chrome('/Users/sophie/documents/chromedriverCurrent')\n\n listKWs = []\n for KW in listOfKWs:\n listKWs.append([KW])\n # ex: listKWs=[['poverty'], ['inequality'], ['aids'], ['hiv']]\n # list where list[something]=name of KW. append after that the urls.\n global listKWsDate\n listKWsDate = []\n for KW in listOfKWs:\n listKWsDate.append([KW])\n print(listKWs == listKWsDate)\n\n for link in urls:\n browser.get(link)\n\n source = browser.page_source\n data = bs(source, 'html.parser')\n body = data.find('body')\n script = body.find('script',\n text=lambda t: t.startswith('window._sharedData'))\n #print(script)\n scriptStr = str(script)\n scriptStr.replace(\"'\",\"\")\n #scriptSplit=script.split('shortcode')\n #print(scriptSplit)\n\n #pass to searchForEach which will check the indiv posts for all KWs\n # and will then add them to the appropriate spread sheet\n for KW in listOfKWs:\n searchForEachKW(KW, scriptStr, listKWs, listKWsDate)\n\n #need to change so that calls search for each KW here. so that\n # searching each link for all the hashtags, and then add link to\n # appropriatre kw spreadsheet\n\n return listKWs", "def comments(self, request, pk=None):\n post = self.get_object()\n comments = Comment.objects.filter(post=post).order_by('created_at')\n serializer = PostCommentsSerializer(comments, many=True)\n return Response(serializer.data, status.HTTP_200_OK)", "def comments(self) -> list:\n return self._node[\"app_data\"][\"ui_data\"].get(\"comments\", [])", "def getComments(source):\n\n markup = []\n for f in source:\n markup += extractMarkup(f)\n\n docs = collateDocs(markup)\n return docs", "def wrap_comments(comment_list, cls=None):\n if not cls:\n cls = CommentDetails\n return [cls(d) for d in CachedCall.multicall([cmt.details for cmt in comment_list])]", "def comment_cable(cid, comment):\n\n SQL.execute('''\n SELECT \n cid,\n ticket,\n comment\n FROM \n cables\n WHERE\n cables.cid = ?\n LIMIT 1\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall():\n vlog(2, 'add comment to cable c%s: %s' % (cid, comment))\n\n SQL.execute('''\n UPDATE\n cables \n SET\n comment = ?\n WHERE\n cid = ?\n ;''', (\n comment,\n cid\n ));\n\n if row['ticket'] and not DISABLE_TICKETS:\n EV.add_resolver_comment(row['ticket'], 'Bad Cable Comment:\\n%s' % comment)\n vlog(3, 'Updated Extraview Ticket %s for c%s with comment: %s' % (row['ticket'], cid, comment))", "def get_comments(id_post):\n return Comms.objects.filter(post__id=id_post)", "def cmd_comment_report(client, args):\n comment_report = client.comment_report(args.comment_id)\n generate_output({'comment_report': comment_report})", "def getRemoteComments(post_id):\n servers = Server.objects.all()\n for server in servers:\n if server.username and server.password:\n host = server.hostname\n if not host.endswith(\"/\"):\n host = host + \"/\"\n server_api = \"{}posts/{}/comments\".format(host, post_id)\n print('Request:')\n print(server_api)\n try:\n r = requests.get(server_api, auth=(server.username, server.password))\n print(r)\n if r.status_code in [200, 201]:\n comments = r.json()\n return remoteCommentList(comments)\n except Exception as e:\n print(e)\n return None", "def do_comments(self, line):\n for comment in self.review.comments():\n print(comment)", "def core(self):\n \n \n comments = self.bot.subreddit(\n \"all\").stream.comments(\n skip_existing = True)\n \n \n for comment in comments:\n \n text = comment.body.lower().replace(\".\", \"\")\n \n for card in self.catalog:\n \n if (\n card[1].lower() in text\n and card[0].lower() in text\n and not comment.submission.id in self.responded\n and not comment.subreddit.user_is_banned):\n\n self.get_info(card)\n\n if not self.details:\n \n break\n\n audio = [\n \"audiobook\", \n \"audio book\"]\n \n author_format = [\n name.lower() for name in card[1].split(\" \") \n if len(name) >= 3]\n\n if (\n self.details[\"duration\"] > 10800\n and card[0].lower() in self.details[\n \"title\"].lower()\n and any(\n item in self.details[\n \"title\"].lower() for item in audio)\n and all(\n item in self.details[\n \"title\"].lower() for item in author_format)):\n \n \n saw_the_sign = (\n \"\"\"[^(Source Code)](https://capybasilisk.com/posts/\"\"\"\n \"\"\"2020/04/speculative-fiction-bot/) \"\"\"\n \"\"\"^| [^(Feedback)](https://www.reddit.com/message/\"\"\"\n \"\"\"compose?to=Capybasilisk&subject=Robot) \"\"\"\n \"\"\"^| [^(Programmer)](https://www.reddit.com/u/\"\"\"\n \"\"\"capybasilisk) \"\"\"\n \"\"\"^| ^(Downvote To Remove) \"\"\" \n \"\"\"^| ^(Version 1.4.0) \"\"\"\n \"\"\"^| ^(Support Robot Rights!)\"\"\")\n \n\n comment.reply(\n f\"\"\"Hi. You just mentioned *{card[0]}* by \"\"\" \n f\"\"\"{card[1]}.\\n\\nI've found an audiobook of \"\"\" \n \"\"\"that novel on YouTube. You can listen to it here\"\"\"\n f\"\"\":\\n\\n[YouTube | {self.details['title']}]\"\"\"\n f\"\"\"({self.details['webpage_url']})\\n\\n*I\\'m a bot that \"\"\" \n \"\"\"searches YouTube for science fiction and fantasy\"\"\" \n f\"\"\" audiobooks.*\\n***\\n{saw_the_sign}\"\"\")\n\n \n self.responded.append(\n comment.submission.id)\n \n with open(\n \"activity.csv\", \n \"a\", \n encoding = \"UTF-8\") as actlog:\n\n activity = clevercsv.writer(\n actlog)\n\n if actlog.tell() == 0:\n\n activity.writerow(\n [\"Book\",\n \"Comment\", \n \"Author\", \n \"Thread\", \n \"Subreddit\", \n \"Time\"])\n\n activity.writerow(\n [f\"{card[0]} by {card[1]}\",\n f\"{comment.body}\",\n f\"{comment.author}\",\n f\"{comment.submission.title}\",\n f\"{comment.subreddit}\",\n f\"{pendulum.now().to_datetime_string()}\"])\n \n self.details = None\n \n break\n \n break \n \n if pendulum.now().to_time_string().endswith(\n \"0:00\"):\n \n self.tidy()", "def list(self, number, user=None, repo=None):\n request = self.make_request('issues.comments.list', user=user,\n repo=repo, number=number)\n return self._get_result(request)", "def issues_comments_list(self, mar, request):\n issue = self._services.issue.GetIssueByLocalID(\n mar.cnxn, mar.project_id, request.issueId)\n comments = self._services.issue.GetCommentsForIssue(\n mar.cnxn, issue.issue_id)\n comments = [comment for comment in comments if not comment.approval_id]\n visible_comments = []\n for comment in comments[\n request.startIndex:(request.startIndex + request.maxResults)]:\n visible_comments.append(\n api_pb2_v1_helpers.convert_comment(\n issue, comment, mar, self._services, mar.granted_perms))\n\n return api_pb2_v1.IssuesCommentsListResponse(\n kind='monorail#issueCommentList',\n totalResults=len(comments),\n items=visible_comments)", "def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues", "def comments(self, comments):\n\n self.container['comments'] = comments", "def comments(number):\n if g.browse_mode == \"normal\":\n item = g.model.songs[int(number) - 1]\n fetch_comments(item)\n\n else:\n g.content = generate_songlist_display()\n g.message = \"Comments only available for video items\"", "def parse_comment(comment, postid):\n urls = get_links_from_body(comment.body)\n if urls:\n # Only insert comment into DB if it contains a link\n comid_db = db.insert('Comments',\n (None,\n postid,\n comment.id,\n comment.author,\n comment.body,\n comment.upvotes,\n comment.downvotes,\n comment.created_utc))\n for url in urls:\n parse_url(url, postid=postid, commentid=comid_db)\n # Recurse over child comments\n for child in comment.children:\n parse_comment(child, postid)", "def test_issue_get_repo_comments(self):\n pass", "def get_list_of_comments(path):\n\n # opens comments file\n try:\n return [\n re.sub(\" +\", \" \", comment.strip().rstrip())\n for comment in list(open(path, \"r\"))\n ]\n except Exception as e:\n print(\"Error loading comments file: \", e)\n sys.exit(1)", "def comments(self):\r\n return IssueComments(self)", "def comments(self):\r\n return IssueComments(self)", "def get_comments_for_issue(owner, repo, issue_number, session=None):\n url = (\n f'{GITHUB_API_URL}/repos/{owner}/{repo}/issues/{issue_number}/comments'\n )\n return get_one_item_at_a_time(url, session=session)", "def get_comments_by_percentage(submission_id, percent_of_comments):\n comments_list = []\n submission = (REDDIT.submission(id=submission_id))\n max_comments = int(submission.num_comments * percent_of_comments)\n\n print(submission.num_comments)\n print(max_comments)\n\n comment_count = 0\n\n # sort comments by best and get list of id's\n submission.comment_sort = 'confidence'\n submission.comments.replace_more(limit=40)\n for comment_id in submission.comments.list():\n if comment_count >= max_comments:\n break\n comments_list.append(comment_id)\n comment_count += 1\n\n return comments_list", "async def get_todo_comments(self, *, todo: TodoInDB) -> List[CommentInDB]:\n comments = await self.db.fetch_all(query=GET_ALL_TODO_COMMENTS_QUERY, values={\"todo_id\": todo.id})\n return [CommentInDB(**comment) for comment in comments]", "def all_user_comments(username):\n # comment = [\n # comment for comment in commentslist if comment[\"username\"] == username\n # ]\n return commentslist", "def get_comments(youtube, video_id, channel_id):\n global nextPageToken\n \n results = youtube.commentThreads().list(\n part=\"snippet\", \n videoId=video_id, \n allThreadsRelatedToChannelId=AUTH_USER_CHANNEL_ID\n ).execute()\n\n nextPageToken = results.get(\"nextPageToken\")\n\n for item in results[\"items\"]:\n comment = item[\"snippet\"][\"topLevelComment\"]\n \tauthor = comment[\"snippet\"][\"authorDisplayName\"]\n \ttry:\n \t authorChannelId = comment[\"snippet\"][\"authorChannelId\"]\n \texcept KeyError:\n \t pass\n \tchannel = authorChannelId.get(\"value\")\n \t\n \tchannel_list.append(channel)\n \t\n return results[\"items\"]", "def get_comments(\n match_id: MatchID,\n *,\n party_id: Optional[PartyID] = None,\n include_hidden: bool = False,\n) -> Sequence[DbMatchComment]:\n query = DbMatchComment.query \\\n .for_match(match_id)\n\n if not include_hidden:\n query = query.filter_by(hidden=False)\n\n comments = query \\\n .for_match(match_id) \\\n .order_by(DbMatchComment.created_at) \\\n .all()\n\n # Add creator objects.\n creator_ids = {comment.created_by_id for comment in comments}\n creators_by_id = _get_users_by_id(creator_ids, party_id=party_id)\n for comment in comments:\n comment.creator = creators_by_id[comment.created_by_id]\n\n # Add rendered bodies.\n for comment in comments:\n comment.body_rendered = text_markup_service.render_html(comment.body)\n\n return comments", "def get_comments_for_reference(self, reference_id):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('commenting',\n collection='Comment',\n runtime=self._runtime)\n result = collection.find(\n dict({'referenceId': str(reference_id)},\n **self._view_filter())).sort('_sort_id', ASCENDING)\n return objects.CommentList(result, runtime=self._runtime)", "def get_comments(subreddit, start_date, end_date, limit):\n api = PushshiftAPI()\n return api.search_comments(after=start_date, before=end_date,\n subreddit=subreddit, limit=limit\n # , filter=['author', 'body', 'created_utc', 'nest_level']\n )", "def bug(request, bugid):\n user = request.user\n if user.is_authenticated:\n if request.method == \"POST\":\n user = request.user\n comment = request.POST['comment']\n ticket = get_object_or_404(Ticket, pk=bugid)\n if comment.strip() == '':\n messages.error(request, 'Comment message is required.')\n return redirect('bug', bugid=ticket.pk)\n\n comment = Comment(user=user, comment=comment, ticket=ticket)\n comment.save()\n messages.success(request, 'Thanks for your comment.')\n return redirect('bug', bugid=ticket.pk)\n\n current_bug = get_object_or_404(Ticket, pk=bugid)\n comments = Comment.objects.all().filter(ticket=bugid)\n votes = Vote.objects.all().filter(ticket=bugid).count()\n context = {\n 'bug': current_bug,\n 'comments': comments,\n 'votes': votes\n }\n return render(request, 'bug.html', context)", "def parse_comments(submission):\n comments = []\n submission.replace_more_comments()\n for c in praw.helpers.flatten_tree(submission.comments):\n comment_dict = c.__dict__\n\n # NOTE: author is a special case (and must be present)\n author = c.author.name if hasattr(c.author, \"name\") else None\n if not author:\n continue\n\n comment = {\n \"submission_id\": submission.id,\n \"author\": author\n }\n del comment_dict[\"author\"] # no longer needed\n for k in _model_columns(Comment):\n if k in comment_dict:\n comment[k] = comment_dict[k]\n comments.append(comment)\n\n return comments", "def get_comments(self, asset_id):\n endpoint = '/assets/{}/comments'.format(asset_id)\n return self._api_call('get', endpoint)", "def crawl(self):\n retrievedSubs = []\n reddit = praw.Reddit(\n client_id='QRl_4bwjckcg9A',\n client_secret='dsavqFoOk5NgWEOWtMf9NknwxRIoIw',\n password='P@ssword123',\n user_agent='cluelessv1',\n username='theclueless1009'\n )\n submissions = reddit.subreddit('all').search(self.keyword, sort='relevance', limit=50, time_filter='week')\n\n for sub in submissions:\n self.data = [sub.selftext, sub.upvote_ratio, sub.score,\n sub.title, sub.id, sub.total_awards_received, sub.created_utc]\n self.data = tuple(self.data)\n retrievedSubs.append(self.data)\n\n return retrievedSubs", "def comments(self):\r\n return Comments(self)", "def comments(self):\r\n return Comments(self)", "def comments(self):\r\n return Comments(self)" ]
[ "0.64830816", "0.6414829", "0.64109683", "0.6044062", "0.5997073", "0.5927429", "0.5870754", "0.58518946", "0.57999045", "0.5750483", "0.5748707", "0.5685098", "0.5681446", "0.5678411", "0.5673924", "0.56592536", "0.5652936", "0.5610223", "0.5589951", "0.55766493", "0.55682576", "0.5560317", "0.5557409", "0.5521636", "0.5514581", "0.5503788", "0.5498528", "0.5495697", "0.54768056", "0.5469559", "0.5466173", "0.5460245", "0.5451036", "0.54494274", "0.5443303", "0.54021627", "0.5395336", "0.53917253", "0.53874224", "0.53821725", "0.53773534", "0.53709894", "0.5341772", "0.53301495", "0.5327651", "0.53258157", "0.53246033", "0.5315553", "0.5303043", "0.5294499", "0.52933705", "0.5284557", "0.52807397", "0.52805203", "0.52804685", "0.52795184", "0.5279341", "0.5279341", "0.5279341", "0.5276303", "0.52459824", "0.5243795", "0.5243333", "0.52424914", "0.5240439", "0.5219329", "0.52184606", "0.5204946", "0.52020955", "0.5199488", "0.5194928", "0.5187181", "0.51858085", "0.5179945", "0.5175232", "0.5172763", "0.51528096", "0.5150329", "0.51476437", "0.5113433", "0.51108867", "0.51007175", "0.5098851", "0.50926507", "0.50926507", "0.50868815", "0.507441", "0.50739604", "0.50738835", "0.5068847", "0.5063283", "0.50578964", "0.5054897", "0.5052209", "0.5049216", "0.50488865", "0.5046041", "0.5040988", "0.5040988", "0.5040988" ]
0.7094204
0
Crawls for all comments belonging to the bugs in the BugIDList utilizing parallelization.
def get_all_comments_mp(self, list: Union[List, str], workers: int = 10) -> None: # loads pickle list if it is one if type(list) == str and ".pickle" in list: print("wat") with open(list, "rb") as f: list = pickle.load(f) elif type(list) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #gets workers and splits list into chunks fitting the worker amount pool = Pool(workers) list = np.array(list) lists = np.array_split(list, workers) #each worker crawls for comments for sub_list in lists: print(sub_list) pool.apply_async(self.get_all_comments, (sub_list,)) pool.close() pool.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #goes through idList\n for id in tqdm(idList):\n #performs request and replaces trouble some parts\n commentsString = self.session.get(self.commentURL.format(id)).text.\\\n replace('true', 'True').replace('false', 'False').replace('null', 'None')\n #gets only the comments\n commentsDict = ast.literal_eval(commentsString)[\"bugs\"][str(id)][\"comments\"]\n\n #enters comments into db or file if there are any comments for the id\n if commentsDict:\n if self.mongoDB:\n self.mongoDB[\"Comments\"].insert_many(commentsDict)\n if self.folder:\n with open(self.folderpath + \"Bugzilla_Comments.txt\", 'a') as f:\n f.write(str(commentsDict) + \"\\n\")", "def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result list is empty\n while notEmpty:\n print(\"entered\")\n #interpretation of result as list plus formatting for eval errors\n result = ast.literal_eval(self.session.get(self.bugURL + \"&offset=\" + str(offset)).text.\n replace('true', 'True').replace('false', 'False').replace('null', 'None'))[\"bugs\"]\n #checks if the query needs to be set again with a new offset\n if result:\n resultBugList += result\n else:\n notEmpty = False\n\n #gets the ID out of all comments\n partList = [bug[\"id\"] for bug in result]\n bugIDList += partList\n #sets new starting point\n offset += 500\n\n #inserts bug ids and bugs into db if given one\n if self.mongoDB:\n for id in bugIDList:\n self.mongoDB[\"BugIDs\"].insert_one({\"ID\": id})\n self.mongoDB[\"BugsData\"].insert_many(resultBugList)\n\n #creates files for bug ids and bugs if given a folder\n if self.folder:\n #saves bug list as python object\n with open(self.folderpath + \"bugIDListP.pickle\", \"wb\") as a:\n pickle.dump(bugIDList, a)\n #saves bug list as csv\n with open(self.folderpath + \"bugIDList.csv\", \"w\") as b:\n for id in bugIDList:\n b.write(str(id) + \"\\n\")\n with open(self.folderpath + \"bugsData.txt\", \"w\") as c:\n for bug in resultBugList:\n c.write(str(bug) + \"\\n\")\n\n #returns List Object for further processing\n return(bugIDList)", "async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count", "def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()", "def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all", "def fetch_comments(item):\n # pylint: disable=R0912\n # pylint: disable=R0914\n cw, ch, _ = getxy()\n ch = max(ch, 10)\n ytid, title = item.ytid, item.title\n dbg(\"Fetching comments for %s\", c.c(\"y\", ytid))\n writestatus(\"Fetching comments for %s\" % c.c(\"y\", title[:55]))\n qs = {'textFormat': 'plainText',\n 'videoId': ytid,\n 'maxResults': 50,\n 'part': 'snippet'}\n\n # XXX should comment threads be expanded? this would require\n # additional requests for comments responding on top level comments\n\n jsdata = call_gdata('commentThreads', qs)\n\n coms = jsdata.get('items', [])\n coms = [x.get('snippet', {}) for x in coms]\n coms = [x.get('topLevelComment', {}) for x in coms]\n # skip blanks\n coms = [x for x in coms if len(x.get('snippet', {}).get('textDisplay', '').strip())]\n if not len(coms):\n g.message = \"No comments for %s\" % item.title[:50]\n g.content = generate_songlist_display()\n return\n\n items = []\n\n for n, com in enumerate(coms, 1):\n snippet = com.get('snippet', {})\n poster = snippet.get('authorDisplayName')\n _, shortdate = yt_datetime(snippet.get('publishedAt', ''))\n text = snippet.get('textDisplay', '')\n cid = (\"%s/%s\" % (n, len(coms)))\n out = (\"%s %-35s %s\\n\" % (cid, c.c(\"g\", poster), shortdate))\n out += c.c(\"y\", text.strip())\n items.append(out)\n\n cw = Config.CONSOLE_WIDTH.get\n\n def plain(x):\n \"\"\" Remove formatting. \"\"\"\n return x.replace(c.y, \"\").replace(c.w, \"\").replace(c.g, \"\")\n\n def linecount(x):\n \"\"\" Return number of newlines. \"\"\"\n return sum(1 for char in x if char == \"\\n\")\n\n def longlines(x):\n \"\"\" Return number of oversized lines. \"\"\"\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))\n\n def linecounter(x):\n \"\"\" Return amount of space required. \"\"\"\n return linecount(x) + longlines(x)\n\n pagenum = 0\n pages = paginate(items, pagesize=ch, delim_fn=linecounter)\n\n while 0 <= pagenum < len(pages):\n pagecounter = \"Page %s/%s\" % (pagenum + 1, len(pages))\n page = pages[pagenum]\n pagetext = (\"\\n\\n\".join(page)).strip()\n content_length = linecount(pagetext) + longlines(pagetext)\n blanks = \"\\n\" * (-2 + ch - content_length)\n g.content = pagetext + blanks\n screen_update(fill_blank=False)\n xprint(\"%s : Use [Enter] for next, [p] for previous, [q] to return:\"\n % pagecounter, end=\"\")\n v = input()\n\n if v == \"p\":\n pagenum -= 1\n\n elif not v:\n pagenum += 1\n\n else:\n break\n\n g.content = generate_songlist_display()", "def ordered_crawling():\n queue.append(seed_url)\n visited.add(seed_url)\n while len(queue) >= 0:\n try:\n text = req_obj.get_html_text(queue[0])\n print queue[0]\n if text is None:\n raise requests.RequestException()\n add_links_to_queue(text, queue[0])\n # summary generated using summarizer1\n sum_obj.create_and_index_summary(\n req_obj.get_base_url(), text)\n\n # summary generated using summarizer2\n sum_obj2.create_and_index_summary(\n req_obj.get_base_url(), text)\n on_pg_sum.index_on_page_summary(text, queue[0])\n\n result_file.write(str(queue[0]) + \", \" + str(link_weights[queue[0]]))\n er_file.write(\"###########\" + str(link_weights) + \"\\n\\n\\n\\n\")\n update_weights(text)\n queue.sort(compare)\n result_file.write(\"\\n\")\n except requests.RequestException as trace:\n print str(trace) + '\\n'\n er_file.write(queue[0] + '\\n')\n er_file.write(str(trace) + '\\n\\n')\n del link_weights[queue[0]]\n queue.pop(0)", "def run(self):\n comment_df_list = []\n post_df_list = []\n subreddit_df_list = []\n\n reddit = sr.reddit_interface()\n subreddits = reddit.subreddits.popular(limit = SUBREDDIT_LIMIT) # Lists the top 50 subreddits\n\n for subreddit in subreddits:\n top_posts = reddit.subreddit(str(subreddit)).top()\n for post in top_posts:\n if not post.stickied:\n post_list = [post.id, str(post.subreddit), post.title, post.num_comments]\n post.comments.replace_more(limit = 0)\n for comment in post.comments.list():\n comment_list = [str(comment.parent()), comment.id, comment.body, int(comment.score)]\n comment_df_list.append(comment_list)\n post_df_list.append(post_list)\n subreddit_df_list.append([str(subreddit)])\n\n comment_df_list = pd.DataFrame(comment_df_list, columns = COMMENTS_COLUMNS)\n post_df_list = pd.DataFrame(post_df_list, columns = POSTS_COLUMNS)\n subreddit_df_list = pd.DataFrame(subreddit_df_list, columns =['Subreddit'])\n reddit_df = [subreddit_df_list, post_df_list, comment_df_list]\n sr.save_xlsx(reddit_df, self.output().path)", "def run(self):\n\n # The url is too deep, skip the url.. Work is done!\n if self.depth_ > self.depth:\n return\n\n # Get doc id corresponds to the url. Add a new entry into doc index if there is no entry.\n doc_id = self.crawler.document_id(self.curr_url)\n\n # Check if the doc_id has been visited/processed by any of crawler_threads. Add doc_id to seen if not so.\n if self.crawler.checkDocVisitedAndUpdate(doc_id):\n return\n\n # Process the document corresponds to the url\n socket = None\n try:\n socket = urllib2.urlopen(self.curr_url, timeout=self.timeout)\n soup = BeautifulSoup(socket.read())\n self._curr_depth = self.depth_ + 1\n self._curr_doc_id = doc_id\n # Traverse the document as deep as possible and add those newly discovered urls into url queue\n self._index_document(soup)\n # Store (wordId, docId) and (word, url) into inverted_index and resolved_inverted_index respectively.\n self.crawler._add_words_to_document(self._curr_words, self._curr_doc_id)\n except:\n pass\n finally:\n if socket:\n socket.close()", "def tick(self):\n\n # Get new comments from /r/all\n print('\\n\\nRetrieving comments...', end=\"\")\n comments = list(self.reddit.get_comments('all', limit=None))\n print('[DONE]')\n\n comment_count = comments.__len__()\n print('Comments to read: ' + str(comment_count))\n for i in range(0, comment_count):\n comment = comments[i]\n\n # Update percent counter\n pcent = i / float(comment_count) * 100\n print('\\rReading comments: [%d%%]' % pcent, end=\"\")\n time.sleep(0.1)\n\n # Parse words\n words = comment.body.split()\n permalink = None\n for word in words:\n if word.startswith('/u/'):\n\n # Get the redditor\n redditor = self.parse_redditor(word)\n if redditor is None:\n continue\n\n # Check to see if we've parsed this comment already\n permalink = comment.permalink\n if permalink in self.already_done:\n print('Comment was already read.')\n break\n\n # Notify the mentioned redditor\n self.notify('comment', redditor, permalink, comment.body, comment.author.name)\n self.record_mention(redditor.name, 'comment')\n\n # permalink will not be None if a user was notified\n if permalink is not None:\n self.already_done.append(permalink)\n\n # Wait 30 seconds\n print('')\n util.wait(30)", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def main(u, o):\n click.echo(f\"Web crawling on {u} started successfully...\")\n\n comment_regex = re.compile('<!--(.*?-->)')\n\n with requests.Session() as session:\n resp = session.get(u)\n soup = BeautifulSoup(resp.text, 'lxml')\n #TODO: search for hidden attributes, may be useful\n comments = soup.find_all(text=comment_regex)\n print(comments)", "def main():\n global collection\n #args = argparse.ArgumentParser()\n #args.add_argument('directory', help='Directory in which the files'\n #'are stored.')\n #args.add_argument('collection', help='The collection to use.')\n #parser = args.parse_args()\n collection = get_collection()\n #documents = glob.glob('*.asm')\n documents = collection.find()\n num_cores = multiprocessing.cpu_count()\n print('Running code on %d processors' % num_cores)\n Parallel(n_jobs=num_cores)(\\\n delayed(save_comments)(doc) for doc in documents)", "def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()", "def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments", "def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1", "def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)", "def analyze_comments():\n\n scores = {} # {docket_id: [comment1_score, comment2_score, ...]}\n positive_counts = {} # {docket_id: num_positive_comments}\n neutral_counts = {} # {docket_id: num_neutral_comments}\n negative_counts = {} # {docket_id: num_negative_comments}\n\n comment_sentiments = {} # {comment_id: sentiment} to write to database\n comment_complexity = {} # {comment_id: complexity} to write to database\n\n for comment in lib.mongo.retrieve_comments(1000):\n docket_id = comment['docketId']\n comment_id = comment['documentId']\n text = comment.get('commentText', '').strip()\n\n # Fill in the 'sentiment' field of this comment.\n if 'sentiment' in comment:\n score = comment['sentiment']\n else:\n score = lib.analyze_text.getSentiment(text)\n comment_sentiments[comment_id] = score\n\n logging.info('docket %s, comment %s: sentiment %s (%r)' %\n (docket_id, comment_id, score, text[:20]))\n\n # Fill in the 'complexity' field of this comment.\n if 'complexity' not in comment:\n comment_complexity[comment_id] = lib.analyze_text.get_complexity(text)\n\n # Aggregate the sentiment scores for each docket.\n scores.setdefault(docket_id, []).append(score)\n counts = positive_counts if score > 0 else (\n negative_counts if score < 0 else neutral_counts)\n counts[docket_id] = counts.get(docket_id, 0) + 1\n\n if len(comment_sentiments) >= 10:\n logging.info('updating %d comments sentiment...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n comment_sentiments = {}\n\n if len(comment_complexity) >= 10:\n logging.info('updating %d comments complexity...' % len(comment_complexity))\n lib.mongo.update_comments('complexity', comment_complexity)\n comment_complexity = {}\n\n logging.info('updating %d comments...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n lib.mongo.update_comments('complexity', comment_complexity)\n logging.info('done!')\n\n docket_sentiments = {} # {docket_id: sentiment} to write to database\n\n for docket in lib.mongo.dockets.find():\n docket_id = docket.get('docketId', '')\n positive_count = positive_counts.get(docket_id, 0)\n neutral_count = neutral_counts.get(docket_id, 0)\n negative_count = negative_counts.get(docket_id, 0)\n rating = compute_rating(positive_count, neutral_count, negative_count)\n logging.info('docket %s: %d positive, %d neutral, %d negative - %s' %\n (docket_id, positive_count, neutral_count, negative_count,\n rating))\n\n docket_sentiments[docket_id] = {\n 'positive': positive_count,\n 'neutral': neutral_count,\n 'negative': negative_count,\n 'rating': rating\n }\n\n logging.info('updating %d dockets...' % len(docket_sentiments))\n lib.mongo.update_dockets('sentiment', docket_sentiments)\n logging.info('done!')", "def _crawl(self, data: list) -> list:\n \n\n self.smph = th.Semaphore(self.MAX_THREADS)\n self.miss_lock = th.Lock()\n self.data_lock = th.Lock()\n\n total = len(data)\n\n with Progress(total) as self.prog:\n for url, *args in data:\n self.smph.acquire()\n new_thread = th.Thread(target=self._crawl_this, args=(url, *args))\n new_thread.start()\n self.prog.wait()\n\n return self._miss", "def test_get_comments_from_submission():\n # gets a test submission\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n submission_id = threads[0].d_['id']\n\n # prints link to thread\n thread_full_link = threads[0].d_['full_link']\n print(thread_full_link)\n\n # prints submission title\n thread_title = threads[0].d_['title']\n print(thread_title)\n\n submission = get_comments_from_submission(submission_id)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)", "async def crawl(self):\n fetch_urls = [self.start_url]\n results = []\n while len(fetch_urls):\n \"\"\"\n slicing array urls with max_async_call arg and then run extract_data_urls\n extract_data_urls return a object that contains url, data, found_urls, and all_urls\n url is a url that we crawled\n data is Html content of the url\n found_urls are new urls that we have to crawl that\n all_urls are all links in the html page\n \"\"\"\n urls = await self.extract_data_urls(fetch_urls[0:self.max_async_call])\n del fetch_urls[0:self.max_async_call]\n for url, data, found_urls, all_urls in urls:\n fetch_urls.extend(found_urls)\n result = self.parse_html_content(data)\n result['urls'] = all_urls\n results.append((url, result))\n return results", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def all_comments_by_docket_id(docket_id,\n sort_by='postedDate', sort_order='ASC'):\n # Determine total number of public submissions in docket.\n params = {'docket_id': docket_id, 'document_type': 'PS'}\n total_records = RegulationDocumentSearch.number_of_records(**params)\n\n # Use the maximum page size to download all public submissions.\n documents = []\n for page in range(total_records // 1000 + 1):\n parameters = {\n 'docket_id': docket_id,\n 'document_type': 'PS',\n 'results_per_page': 1000,\n 'offset': page * 1000,\n 'sort_by': sort_by,\n 'sort_order': sort_order\n }\n response = RegulationDocumentSearch.by_docket_id(**parameters)\n documents.extend(response['documents'])\n\n return documents", "def test_issue_get_comments(self):\n pass", "def watch2():\n\tcomments = r.get_comments('all', limit=None)\n\tfor comment in comments:\n\t\tif comment in visited:\n\t\t\tcontinue\n\t\telse:\n\t\t\tvisited[comment] = 1\n\t\t\tif \"LexiconBot define\" in comment.body:\n\t\t\t\tprint comment, \"from\", comment.permalink, \" / \", comment.submission\n\t\t\t\tmsg = define(comment.body.split()[2])\n\t\t\t\tcomment.reply(msg)\n\n\tprint \"Sleeping...\"\n\tsleep(1)", "def get_event_data(self, ):\n \n if os.path.exists(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\"):\n self.bug_data_frame = pd.read_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\")\n else:\n self.bug_data_frame = self.get_bug_data()\n self.closed_bug_data_frame = self.bug_data_frame[self.bug_data_frame['STATE'] == 'closed']\n self.closed_bug_data_frame = self.closed_bug_data_frame.reset_index()\n\n self.event_data_frame = self.closed_bug_data_frame[[\"ISSUE_ID\", \"CREATED_TIMESTAMP\", \"UPDATED_TIMESTAMP\"]]\n\n \"\"\"Fetch the Bug Id's from the data frame\"\"\"\n list_of_issues = self.closed_bug_data_frame['ISSUE_ID'].tolist()\n\n \"\"\"using the Bugs Id list create event url list\"\"\"\n url_list = Utilities.format_url(self.event_url, list_of_issues)\n start_time = time.time()\n\n results = self.web_connection.get_async_data_using_asyncio(url_list, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size)\n\n list_of_buggy_commits = results[0]\n failed_urls = results[1]\n loop_counter = 1\n\n while len(failed_urls) > 0:\n time.sleep(60 * loop_counter)\n print(f\"Total Failed URL's re-trying {len(failed_urls)}\")\n results = self.web_connection.get_async_data_using_asyncio(failed_urls, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size // 2)\n failed_urls = results[1]\n list_of_buggy_commits = list_of_buggy_commits + results[0]\n end_time = time.time()\n print(\"Parallel time taken to get all event data using (asyncio) =\", end_time - start_time)\n\n list_of_buggy_commits = pd.DataFrame(list_of_buggy_commits, columns=[\"ISSUE_ID\", \"JSON_RESPONSE\"])\n list_of_buggy_commits['ISSUE_ID'] = list_of_buggy_commits['ISSUE_ID'].astype(str)\n self.event_data_frame['ISSUE_ID'] = self.event_data_frame['ISSUE_ID'].astype(str)\n self.event_data_frame = pd.merge(self.event_data_frame, list_of_buggy_commits, how=\"left\",\n left_on=[\"ISSUE_ID\"],\n right_on=[\"ISSUE_ID\"])\n\n self.event_data_frame.to_csv(f\"{self.cdp_dump_path}/github_events_cdp_dump.csv\", encoding='utf-8-sig',\n index=False)\n event_parser = EventsJsonParser()\n event_parser.find_buggy_commits_based_on_repository_fixes(self.web_constants, self.event_data_frame,\n f\"{self.cdp_dump_path}/\"\n f\"{CDPConfigValues.closed_events_list_file_name}\")", "def comments(self, q=None, sort=None):\n params = {}\n if sort is not None:\n params[\"sort\"] = sort\n if q is not None:\n params[\"q\"] = q\n for comment in self._get_paged(\"comments\", params=params):\n yield Comment(comment, **self._new_session_args)", "def dfs(comment, fun):\n # comment has no replies\n if not comment.replies:\n return\n else:\n for r in comment.replies:\n # do something with a comment here\n fun(r)\n # recurr\n Comment.dfs(r, fun)", "def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)", "def comment_data(post_id: str, \n sub_reddit: str):\n url_to_open = f\"https://www.reddit.com/r/{sub_reddit}/comments/{post_id}.json\"\n success_status = 0\n while success_status != 200:\n try:\n response = urlopen(url_to_open, timeout=10)\n success_status = response.status\n except HTTPError:\n logging.info(f\"HTTP Error for exceeding requests. Sleeping for 2 minutes at {datetime.today()}.\")\n time.sleep(120)\n success_status = 400\n \n sub_reddit_page = json.loads(response.read())\n comments_df = pd.json_normalize(sub_reddit_page[1]['data']['children'])\n comments_df['post_id'] = post_id\n comments_df = comments_df[['post_id', 'data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n comments_df = comments_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n comments_df['reply'] = 'N'\n comments_df['comment_replied_id'] = ''\n # get all replies \n replies_list = []\n for comment in sub_reddit_page[1]['data']['children']:\n replies = comment.get('data').get('replies')\n comment_id = comment.get('data').get('id') \n if replies is None or replies == '':\n pass\n else:\n replies_df = pd.json_normalize(replies['data']['children'])\n try:\n replies_df = replies_df[['data.id', 'data.author_fullname', 'data.body', 'data.created', \n 'data.downs', 'data.ups']]\n except KeyError:\n pass\n replies_df = replies_df.rename(columns = {'data.id': 'comment_id', 'data.author_fullname': 'author', 'data.body': 'comment', \n 'data.created': 'created_utc', 'data.downs': 'downs', 'data.ups': 'ups'})\n replies_df['reply'] = 'Y'\n replies_df['comment_replied_id'] = comment_id\n replies_df['post_id'] = post_id\n replies_list.append(replies_df)\n if len(replies_list) == 1:\n all_replies = replies_list[0]\n elif len(replies_list) > 1: \n all_replies = pd.concat(replies_list, ignore_index = True)\n else:\n all_replies = None \n\n column_order = [c for c in comments_df.columns]\n comments_df = comments_df[column_order]\n if all_replies is not None:\n all_replies = all_replies[column_order]\n all_comments_replies = pd.concat([comments_df, replies_df], ignore_index=True)\n else:\n all_comments_replies = comments_df\n\n return all_comments_replies", "def gen_comment_threads_for_videos(\n self, videos: List\n ) -> Generator[List, None, None]:\n print(\"Requesting comment threads for videos.\")\n\n for video in videos:\n threads = self.get_comment_threads_for_video(video[\"id\"])\n\n yield threads\n\n return None", "def search_thru_comments(urls, listOfKWs):\n browser = webdriver.Chrome('/Users/sophie/documents/chromedriverCurrent')\n\n listKWs = []\n for KW in listOfKWs:\n listKWs.append([KW])\n # ex: listKWs=[['poverty'], ['inequality'], ['aids'], ['hiv']]\n # list where list[something]=name of KW. append after that the urls.\n global listKWsDate\n listKWsDate = []\n for KW in listOfKWs:\n listKWsDate.append([KW])\n print(listKWs == listKWsDate)\n\n for link in urls:\n browser.get(link)\n\n source = browser.page_source\n data = bs(source, 'html.parser')\n body = data.find('body')\n script = body.find('script',\n text=lambda t: t.startswith('window._sharedData'))\n #print(script)\n scriptStr = str(script)\n scriptStr.replace(\"'\",\"\")\n #scriptSplit=script.split('shortcode')\n #print(scriptSplit)\n\n #pass to searchForEach which will check the indiv posts for all KWs\n # and will then add them to the appropriate spread sheet\n for KW in listOfKWs:\n searchForEachKW(KW, scriptStr, listKWs, listKWsDate)\n\n #need to change so that calls search for each KW here. so that\n # searching each link for all the hashtags, and then add link to\n # appropriatre kw spreadsheet\n\n return listKWs", "def wrap_comments(comment_list, cls=None):\n if not cls:\n cls = CommentDetails\n return [cls(d) for d in CachedCall.multicall([cmt.details for cmt in comment_list])]", "def check_comments():\n\n # Get the id of the group track\n try:\n group_track = soundcloud.get('/me/tracks')[config.post_track_id]\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)\n sys.exit(1)\n else:\n raise\n\n # Get the comment list for the group track\n comments = soundcloud.get('/tracks/%d/comments' % group_track.id)\n if not comments:\n logging.info('Nothing found...')\n return\n \n # Process each comment and delete it\n for comment in reversed(comments): \n logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)\n response = None\n \n # Try to process the comment\n try:\n response = process_comment(comment)\n except HTTPError as e:\n if e.response.status_code == 429:\n logging.exception('Failed to repost track: too many requests:')\n return\n elif e.response.status_code // 100 == 4:\n logging.exception('Failed to process comment due to a client request error:')\n else:\n raise\n except Exception as e: # Program crash\n logging.exception('Failed to process comment:')\n else:\n if response:\n logging.info('The comment would have this response: %s', response) \n else:\n logging.info('Comment processed successfully')\n \n # Delete the processed comment\n try:\n soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.warning('Comment already deleted')\n else:\n raise\n\n if config.use_advanced_description and should_update_description:\n update_description()", "def crawlDocuments(docIds, skipIssns):\n rootLog = logging.getLogger('')\n successCount = 0\n consecErrorCount = 0\n fileLogHandler = None\n for i, docIdTuple in enumerate(docIds):\n docId, srcDir = docIdTuple\n removeLocks()\n checkCreateLock(srcDir)\n if fileLogHandler is not None:\n rootLog.handlers.remove(fileLogHandler)\n fileLogHandler = pubGeneric.logToFile(join(srcDir, 'crawler.log'))\n todoCount = len(docIds) - i\n logging.info('--- Crawling document with ID %s, dir %s (%d IDs left)' % (docId, srcDir, todoCount))\n webCache.clear()\n try:\n artMeta = getArticleMeta(docId)\n except pubGetError:\n writeDocIdStatus(srcDir, docId, 'no meta', '')\n continue\n\n logging.info('Got Metadata: %s, %s, %s' % (artMeta['journal'], artMeta['year'], artMeta['title']))\n try:\n checkIssnErrorCounts(artMeta, skipIssns, srcDir)\n paperData = crawlOneDoc(artMeta, srcDir)\n writePaperData(docId, artMeta, paperData, srcDir)\n consecErrorCount = 0\n successCount += 1\n except pubGetError as e:\n consecErrorCount += 1\n docId = artMeta['pmid']\n writeDocIdStatus(srcDir, docId, e.logMsg, e.longMsg, e.detailMsg)\n issnYear = getIssnYear(artMeta)\n issnYearErrorCounts[issnYear] += 1\n if e.logMsg not in ('noOutlinkOrDoi', 'unknownHost', 'noLicense'):\n waitSec = ERRWAIT * consecErrorCount\n logging.debug('Sleeping for %d secs after error' % waitSec)\n time.sleep(waitSec)\n if consecErrorCount > MAXCONSECERR:\n logging.error('Too many consecutive errors, stopping crawl')\n e.longMsg = 'Crawl stopped after too many consecutive errors / ' + e.longMsg\n raise\n if DO_PAUSE:\n raw_input('Press Enter to process next paper...')\n except:\n raise\n\n logging.info('Downloaded %d articles' % successCount)\n removeLocks()\n if fileLogHandler != None:\n rootLog.handlers.remove(fileLogHandler)\n return", "def crawl(self):\n\n # create helper process and setup IPC\n self.socket.listen(1)\n help_out_fd = open(self.helper_outfile, \"w\")\n with subprocess.Popen(\"./crawl_helper.py\", stdout=help_out_fd, stderr=subprocess.STDOUT) as proc:\n self.helper_pid = proc.pid\n try:\n conn, _ = self.socket.accept()\n # create initial params for crawler helper and send them\n new_urls = set()\n setup_params = {\"start_urls\": self.start_urls, \"allowed_domains\": [self.domain],\n \"cookies\": self.cookies, \"user_agent\": self.config[\"user_agent\"]}\n ipc_operations.send_object(conn, setup_params)\n\n # loop: receive a response object, then send new URLs to crawl. Catch & handle problems.\n while True:\n try:\n proc.wait(timeout=0.001)\n break\n except subprocess.TimeoutExpired:\n response = ipc_operations.receive_object(conn)\n if not response: # socket is dead / closed\n break\n new_urls = self.process_response(response)\n ipc_operations.send_object(conn, new_urls)\n except socket.timeout:\n util.printit(\"Unix socket connection to scrapy crawler unexpectedly broke. \" +\n \"Quitting crawling of %s\" % self.base_url, color=util.RED)\n break\n finally:\n # ensure connection is closed and helper process killed in any case\n conn.close()\n proc.kill()\n\n # after the actual crawling, extract all the gathered cookies from Selenium\n if self.config[\"use_selenium\"].lower() == \"true\":\n selenium_cookies = self.driver.get_cookies()\n for cookie in selenium_cookies:\n if not any(cookie[\"name\"] == c[\"name\"] and cookie[\"path\"] == c[\"path\"] and\n cookie[\"domain\"] == c[\"domain\"] for c in self.found_cookies):\n parsed_cookie = {}\n for key in (\"name\", \"path\", \"domain\", \"httpOnly\", \"secure\"):\n parsed_cookie[key] = cookie[key]\n self.found_cookies.append(parsed_cookie)\n\n help_out_fd.close()\n return self.create_results()", "def run(self):\n\n for url in self.urls:\n try:\n # Use requests to retrieve web page data\n print(url)\n response = session.get(url, ) # allow_redirects=True)\n\n if response.status_code != 200:\n print('Failed to retrieve page, URL: {0}, error: {1}\\n'.format(url, response.status_code))\n return\n\n # Get web page data from HTML response\n content = get_json_data(response.text)\n\n # Compile data into dictionary to be used for reporting\n summary_data = generate_report(content)\n\n # Generate/print report\n print_report(summary_data)\n\n except Exception as error:\n print('Scraper failed to run for URL {0}, error: {1}, {2}\\n'.format(\n url, type(error).__name__, error\n ))\n\n # time.sleep(1) # for load concerns", "def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\\\n num_of_workers=8):\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n\n # uses a pool of 'curl' workers\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\\\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, \\\n ((output_dir, saved_location, save_to_path, bug, curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)", "def get_comments_between(self, start_date, end_date):\n ret = []\n ids = self.get_post_ids(start_date, end_date)\n\n for id in ids:\n comments = self.reddit.submission(id).comments\n ret.append(self.get_nested_comments(comments))\n return ret", "def run_crawler(self) -> List[JobEventSchema]:\n print(f\"Ready for scraping, current task: {self.tasks}\")\n\n crawling_result = []\n for task in self.tasks:\n result = task.run()\n crawling_result.extend(result)\n return crawling_result", "def get_comments_from_submission_id(submission_id):\n flat_comments = []\n tree_comments = []\n\n submission = (REDDIT.submission(id=submission_id))\n print(submission.num_comments)\n print(submission.shortlink)\n\n # sort comments by best and get the flattened list\n submission.comment_sort = 'confidence'\n\n # tree comments traversal\n submission.comments.replace_more(limit=1)\n for comm in submission.comments.list():\n tree_comments.append(comm)\n\n flat_comments = list(submission.comments)\n\n return flat_comments, tree_comments", "def core(self):\n \n \n comments = self.bot.subreddit(\n \"all\").stream.comments(\n skip_existing = True)\n \n \n for comment in comments:\n \n text = comment.body.lower().replace(\".\", \"\")\n \n for card in self.catalog:\n \n if (\n card[1].lower() in text\n and card[0].lower() in text\n and not comment.submission.id in self.responded\n and not comment.subreddit.user_is_banned):\n\n self.get_info(card)\n\n if not self.details:\n \n break\n\n audio = [\n \"audiobook\", \n \"audio book\"]\n \n author_format = [\n name.lower() for name in card[1].split(\" \") \n if len(name) >= 3]\n\n if (\n self.details[\"duration\"] > 10800\n and card[0].lower() in self.details[\n \"title\"].lower()\n and any(\n item in self.details[\n \"title\"].lower() for item in audio)\n and all(\n item in self.details[\n \"title\"].lower() for item in author_format)):\n \n \n saw_the_sign = (\n \"\"\"[^(Source Code)](https://capybasilisk.com/posts/\"\"\"\n \"\"\"2020/04/speculative-fiction-bot/) \"\"\"\n \"\"\"^| [^(Feedback)](https://www.reddit.com/message/\"\"\"\n \"\"\"compose?to=Capybasilisk&subject=Robot) \"\"\"\n \"\"\"^| [^(Programmer)](https://www.reddit.com/u/\"\"\"\n \"\"\"capybasilisk) \"\"\"\n \"\"\"^| ^(Downvote To Remove) \"\"\" \n \"\"\"^| ^(Version 1.4.0) \"\"\"\n \"\"\"^| ^(Support Robot Rights!)\"\"\")\n \n\n comment.reply(\n f\"\"\"Hi. You just mentioned *{card[0]}* by \"\"\" \n f\"\"\"{card[1]}.\\n\\nI've found an audiobook of \"\"\" \n \"\"\"that novel on YouTube. You can listen to it here\"\"\"\n f\"\"\":\\n\\n[YouTube | {self.details['title']}]\"\"\"\n f\"\"\"({self.details['webpage_url']})\\n\\n*I\\'m a bot that \"\"\" \n \"\"\"searches YouTube for science fiction and fantasy\"\"\" \n f\"\"\" audiobooks.*\\n***\\n{saw_the_sign}\"\"\")\n\n \n self.responded.append(\n comment.submission.id)\n \n with open(\n \"activity.csv\", \n \"a\", \n encoding = \"UTF-8\") as actlog:\n\n activity = clevercsv.writer(\n actlog)\n\n if actlog.tell() == 0:\n\n activity.writerow(\n [\"Book\",\n \"Comment\", \n \"Author\", \n \"Thread\", \n \"Subreddit\", \n \"Time\"])\n\n activity.writerow(\n [f\"{card[0]} by {card[1]}\",\n f\"{comment.body}\",\n f\"{comment.author}\",\n f\"{comment.submission.title}\",\n f\"{comment.subreddit}\",\n f\"{pendulum.now().to_datetime_string()}\"])\n \n self.details = None\n \n break\n \n break \n \n if pendulum.now().to_time_string().endswith(\n \"0:00\"):\n \n self.tidy()", "def get_comments_by_percentage(submission_id, percent_of_comments):\n comments_list = []\n submission = (REDDIT.submission(id=submission_id))\n max_comments = int(submission.num_comments * percent_of_comments)\n\n print(submission.num_comments)\n print(max_comments)\n\n comment_count = 0\n\n # sort comments by best and get list of id's\n submission.comment_sort = 'confidence'\n submission.comments.replace_more(limit=40)\n for comment_id in submission.comments.list():\n if comment_count >= max_comments:\n break\n comments_list.append(comment_id)\n comment_count += 1\n\n return comments_list", "def comments(self, limit=100, all=False):\n source, edge = self.id, \"comments\"\n return lazygen(Comment, source, edge,\n limit=limit, get_all=all)", "def handle_free_comments(self):\r\n comments = FreeComment.objects.all()\r\n for c in comments:\r\n new = FreeThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n name = c.person_name,\r\n website = '',\r\n email = '',\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = c.approved\r\n )\r\n new.save()", "def crawler(self):\n\n\t\tfor page in range(self.first_page, self.last_page+1):\n\t\t\tprint(\"\\nCrawling Page \" + str(page))\n\t\t\tpage_url = self.site_url + \"?page=\" + str(page) +\\\n\t\t\t \"&index=prod_all_products_term_optimization\"\n\t\t\t\n\t\t\tself.scrape_features(page_url)", "def _commentsInThisFunction(self):\n show_unique_c = self.config.display_unique_comments\n\n msg = \"Searching comments within function '\" + misc.get_function_name() + \"'\"\n self._console_output(msg)\n\n comment_list = self.ba.comments_in_function()\n\n # Found any comment at all?\n nrows = len(comment_list)\n if not nrows:\n self._console_output(\"[!] No comments found\", err = True)\n return\n\n self.table.setColumnCount(2)\n self.table_label.setText(\"Comments within current function\")\n self.table.setHorizontalHeaderLabels((\"Address\", \"Comments\"))\n self.table.clearContents()\n self.table.setRowCount(0)\n\n # Fill with contents\n displayed_comments = []\n\n idx = 0\n for (addr, comment) in comment_list:\n if show_unique_c and comment in displayed_comments:\n continue\n\n displayed_comments.append(comment)\n\n self.table.insertRow(idx)\n addr_item = QTableWidgetItem(\"%08x\" % addr)\n addr_item.setFlags(addr_item.flags() ^ QtCore.Qt.ItemIsEditable)\n comment_item = QTableWidgetItem(comment)\n\n self.table.setItem(idx, 0, addr_item)\n self.table.setItem(idx, 1, comment_item)\n\n idx += 0", "def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())", "def run(self):\n if self.is_full():\n return\n for crawler in self.crawlers:\n logger.info(f'crawler {crawler} to get proxy')\n proxies = crawler.run()\n if proxies:\n for proxy in proxies:\n self.redis.add(proxy)\n logger.info(f'crawled {len(proxies)} proxies from {crawler}')\n else:\n logger.info(f'cannot crawl proxies from {crawler}')", "def get_bug_data(self, current_date=None):\n start_time = time.time()\n bug_data = self.web_connection.get_async_data_using_asyncio_paginated(self.bug_url, self.web_constants, 5)\n end_time = time.time()\n # print(f\"Commit data using Parallel (asyncio)\\n {commit_data}\\n\\n\")\n print(f\"Time Taken to Fetch Bug Details {end_time - start_time}\")\n\t\t\n bugs_parser = BugsJsonParser()\n bug_list_df = bugs_parser.parse_json(bug_data)\n\n if current_date is None:\n current_date = datetime.today().strftime('%Y-%m-%d')\n directory = f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}\"\n CDPConfigValues.create_directory(directory)\n bug_list_df.to_csv(\n f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}/\"\n f\"{CDPConfigValues.project_issue_list_file_name}\",\n index=False)\n else:\n bug_list_df.to_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\", index=False)\n\n return bug_list_df", "def get_comments_by_ids(self, comment_ids):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_ids\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('commenting',\n collection='Comment',\n runtime=self._runtime)\n object_id_list = []\n for i in comment_ids:\n object_id_list.append(ObjectId(self._get_id(i, 'commenting').get_identifier()))\n result = collection.find(\n dict({'_id': {'$in': object_id_list}},\n **self._view_filter()))\n result = list(result)\n sorted_result = []\n for object_id in object_id_list:\n for object_map in result:\n if object_map['_id'] == object_id:\n sorted_result.append(object_map)\n break\n return objects.CommentList(sorted_result, runtime=self._runtime, proxy=self._proxy)", "def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)", "def run(self):\n urls_to_download = self._get_links()\n results = ThreadPool(8).imap_unordered(self._download_url, urls_to_download)\n for path in results:\n print(path)", "def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)", "def crawl(self, fuzzable_request, debugging_id):\n for domain_path in fuzzable_request.get_url().get_directories():\n\n if domain_path in self._analyzed_dirs:\n continue\n \n self._analyzed_dirs.add(domain_path)\n\n url_repeater = repeat(domain_path)\n args = izip(url_repeater, self._get_potential_phpinfos())\n\n self.worker_pool.map_multi_args(self._check_and_analyze, args)", "def crawl_twitter(list_ids, api, wait_on_rate_limit=False):\n\n ldc = []\n count = 0\n try:\n for curr_id in list_ids:\n for post in tweepy.Cursor(api.user_timeline, id=curr_id, summary=False, tweet_mode=\"extended\",\n wait_on_rate_limit=wait_on_rate_limit).items():\n dc = OrderedDict()\n curr_post = post._json\n dc['tweet_from'] = curr_id\n dc['created_at'] = curr_post['created_at']\n dc['hashtags'] = [x['text']\n for x in curr_post['entities']['hashtags']]\n dc['urls'] = [x['expanded_url']\n for x in curr_post['entities']['urls']]\n dc['user_mentions_id'] = [x['id']\n for x in curr_post['entities']['user_mentions']]\n if 'media' in curr_post['entities']:\n dc['media'] = [x['media_url_https']\n for x in curr_post['entities']['media']]\n dc['user_mentions_name'] = [x['screen_name']\n for x in curr_post['entities']['user_mentions']]\n dc['origin_device'] = BeautifulSoup(\n curr_post['source'], 'html.parser').a.string\n dc['favorite_count'] = curr_post['favorite_count']\n dc['text'] = curr_post['full_text']\n dc['id'] = curr_post['id']\n dc['in_reply_to_screen_name'] = curr_post[\n 'in_reply_to_screen_name']\n dc['in_reply_to_user_id'] = curr_post['in_reply_to_user_id']\n dc['in_reply_to_status_id'] = curr_post[\n 'in_reply_to_status_id']\n dc['retweet_count'] = curr_post['retweet_count']\n # adding retweet information because it is important.\n if ('retweeted_status' in curr_post):\n dc['retweeted_status_text'] = curr_post[\n 'retweeted_status']['full_text']\n dc['retweeted_status_url'] = [x['expanded_url']\n for x in curr_post['retweeted_status']['entities']['urls']]\n dc['retweeted_status_id'] = curr_post[\n 'retweeted_status']['id']\n dc['retweeted_status_user_name'] = curr_post[\n 'retweeted_status']['user']['name']\n dc['retweeted_status_user_handle'] = curr_post[\n 'retweeted_status']['user']['screen_name']\n ldc.append(dc)\n count += 1\n except Exception as twe:\n print(str(twe))\n print(\"Total count : \" + str(count))\n return (ldc)", "def comment_issue(self, msg, issue_id, comment):\n self._asset_bind(msg)\n client = self._github_operator(msg)\n comment_obj = client.issue_comment(task_repository_name(), issue_id, comment)\n yield comment_obj.html_url", "def main():\r\n \r\n data_dir = Path.cwd().joinpath('OUTPUT')\r\n config_dir = Path.cwd().joinpath('CONFIG')\r\n \r\n # Load deduplicated comments\r\n data = utils.load(data_dir, 'student_comment_deduplicated')\r\n \r\n # Get the luis API url\r\n with open(config_dir.joinpath('luis_url.txt'), 'r') as f:\r\n luis_url = f.readline()\r\n \r\n request_api(\r\n data,\r\n luis_url,\r\n 1000,\r\n )", "def crawl(spider: str, book_id: int):\n proc = CrawlerProcess(get_project_settings())\n\n proc.crawl(spider, book_id=book_id)\n\n proc.start()", "def work(self):\n while True:\n url, depth = self.crawl_queue.get(timeout=self.crawl_queue_time_out)\n self.crawl_queue.task_done()\n try:\n if depth <= self.depth_limit:\n with Spider.seen_urls_lock:\n seen_already = url in self.seen_urls\n if not seen_already:\n page, links = self.crawl_page(url, depth, self.domain_name)\n self._add_links_to_crawl_queue(links, depth)\n self._add_page_to_rank_queue(page)\n self._add_page_to_storage(page)\n with Spider.seen_urls_lock:\n self.seen_urls.add(url)\n except Exception as e:\n self.logger.debug(e)", "def get_events_data_for_scheduler(self, current_date, previous_bug_df, previous_closed_events_df):\n self.bug_data_frame = self.get_bug_data()\n self.closed_bug_data_frame = self.bug_data_frame[self.bug_data_frame['STATE'] == 'closed']\n\n self.closed_bug_data_frame = self.closed_bug_data_frame.reset_index()\n\n self.closed_bug_data_frame = self.closed_bug_data_frame[\n ~(self.closed_bug_data_frame.ISSUE_ID.isin(previous_bug_df.ISSUE_ID))]\n\n if len(self.closed_bug_data_frame) != 0:\n self.event_data_frame = self.closed_bug_data_frame[[\"ISSUE_ID\", \"CREATED_TIMESTAMP\", \"UPDATED_TIMESTAMP\"]]\n\n \"\"\"Fetch the Bug Id's from the data frame\"\"\"\n list_of_issues = self.closed_bug_data_frame['ISSUE_ID'].tolist()\n\n \"\"\"using the Bugs Id list create event url list\"\"\"\n url_list = Utilities.format_url(self.event_url, list_of_issues)\n start_time = time.time()\n\n results = self.web_connection.get_async_data_using_asyncio(url_list, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size)\n\n list_of_buggy_commits = results[0]\n failed_urls = results[1]\n loop_counter = 1\n\n while len(failed_urls) > 0:\n time.sleep(60 * loop_counter)\n print(f\"Total Failed URL's re-trying {len(failed_urls)}\")\n results = self.web_connection.get_async_data_using_asyncio(failed_urls, self.web_constants,\n CDPConfigValues.git_api_batch_size // 2)\n failed_urls = results[1]\n list_of_buggy_commits = list_of_buggy_commits + results[0]\n\n end_time = time.time()\n print(\"Parallel time taken to get all event data using (asyncio) =\", end_time - start_time)\n\n list_of_buggy_commits = pd.DataFrame(list_of_buggy_commits, columns=[\"ISSUE_ID\", \"JSON_RESPONSE\"])\n list_of_buggy_commits['ISSUE_ID'] = list_of_buggy_commits['ISSUE_ID'].astype(str)\n self.event_data_frame['ISSUE_ID'] = self.event_data_frame['ISSUE_ID'].astype(str)\n self.event_data_frame = pd.merge(self.event_data_frame, list_of_buggy_commits, how=\"left\",\n left_on=[\"ISSUE_ID\"],\n right_on=[\"ISSUE_ID\"])\n\n self.event_data_frame.to_csv(\n f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}/github_events_cdp_dump.csv\",\n encoding='utf-8-sig', index=False)\n event_parser = EventsJsonParser()\n event_df = event_parser.find_buggy_commits_based_on_repository_fixes(self.web_constants,\n self.event_data_frame)\n\n event_df = pd.concat([event_df, previous_closed_events_df], ignore_index=True)\n\n return event_df\n\n else:\n return None", "def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues", "def online_parser(self, team_id_lst):\n print(\"Running Online Parser ...\\n\")\n self.report_exec(online=True)\n time.sleep(0.5)\n\n conti_err_cnt = 0\n max_conti_err_reached = False\n for team_id in tqdm(team_id_lst):\n self.logger.info(\"Working on %d\" % team_id)\n\n if max_conti_err_reached:\n self.logger.debug(\"\\tClearing Queue (Max Conti-Error Cnt Reached)\")\n continue\n if conti_err_cnt >= self._online_max_conti_err:\n max_conti_err_reached = True\n self.logger.critical(\n \"Maximum Continuous Error Count (%d) Reached. To End Crawler Workflow\"\n % self._online_max_conti_err)\n continue\n\n try:\n content = self.request_pdf_stream(team_id)\n self.file_cnt += 1\n conti_err_cnt = 0\n except Exception as err:\n self.logger.error(\"[ERROR] %s\" % err)\n conti_err_cnt += 1\n continue\n\n try:\n info = self.translate_pdf(filename=None, filestream=content, fs_team_id=team_id)\n self.update_res_to_cache(info)\n self.suc_cnt += 1\n except Exception as err:\n self.failed_list.append(team_id)\n self.logger.error(\"[ERROR] %s\" % err)\n continue\n\n self.logger.info(\"Parser Finished for %d\" % team_id)\n\n self.cache_to_json()\n self.report_del()", "def task_fetch_posts_and_comments(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx',\n comments_out='data/comments_data.xlsx'):\n\n # Create query instances for posts and comments\n post_query = Query(PostParser)\n comment_query = Query(CommentParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)\n\n # Query comments data of posts\n comment_data = []\n for i, post in enumerate(post_data):\n logger.info(\"Get comment of %d %s\" % (i, post['short_code']))\n comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {\n \"shortcode\": post['short_code'],\n \"first\": 50,\n }, None)\n for comment in comment_data_of_one_post:\n comment['post_short_code'] = post['short_code']\n comment_data.extend(comment_data_of_one_post)\n logger.info(\"Count of comment_data: %d\" % len(comment_data))\n\n # Save the comments data\n comment_data_df = pd.DataFrame(comment_data)\n comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)\n logger.info(\"Save the comments data to %s.\" % comments_out)", "async def crawl(self) -> None:\n workers = [\n asyncio.Task(self.queued_coroutine(), loop=self.ev)\n for _ in range(self.max_tasks)\n ]\n\n self.t0 = time.time()\n\n await self.queue.join()\n\n self.t1 = time.time()\n\n for w in workers:\n w.cancel()", "def return_filtered_comments(submission):\n submission.comment_sort = COMMENT_SORT_BY\n submission.comment_limit = COMMENT_LIMIT\n filtered_comments = []\n for top_level_comment in submission.comments:\n if isinstance(top_level_comment, praw.models.MoreComments):\n continue\n # Here you can fetch data off the comment.\n comment = top_level_comment.body\n\n # ensure that the comment does not contain any words in blacklist\n # and also it is less than COMMENT_MAX_WORDS\n fail_test = 0\n lcomment = comment.lower()\n for badword in blacklist:\n if badword not in lcomment and len(comment) < COMMENT_MAX_WORDS:\n pass\n else:\n fail_test += 1\n if not fail_test:\n filtered_comments.append(replace_words(comment).capitalize())\n\n return filtered_comments", "def build_newscomment_paracrawl(self):\n # Note: build_newscomment_only sets a default_builder_obj\n # if removed, set explicitly\n nc_train_data, _ = self.build_newscomment_limited()\n\n nc_data_size = nc_train_data.cardinality().numpy() # Should be 284246\n logging.info('News commentary size is... %d', nc_data_size)\n paracrawl_builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[PARACRAWL],\n data_dir=self.data_dir)\n paracrawl_shard_spec = self.build_shard_spec(self.paracrawl_size,\n False)\n para_train_data = paracrawl_builder.as_dataset(\n split='train' + paracrawl_shard_spec,\n shuffle_files=self.shuffle_train_files)\n logging.info('Paracrawl size is... %d',\n para_train_data.cardinality().numpy())\n\n total_dataset_size = float(nc_data_size + self.paracrawl_size)\n nc_prop = float(nc_data_size) / total_dataset_size\n pc_prop = float(self.paracrawl_size) / total_dataset_size\n logging.info('Sampling proportion is %f, %f', nc_prop, pc_prop)\n\n train_data = tf.data.experimental.sample_from_datasets(\n [nc_train_data, para_train_data],\n weights=[nc_prop, pc_prop],\n seed=RANDOM_SAMPLE_SEED)\n\n _, nc_eval_data = self.build_newscomment_ft()\n\n return train_data, nc_eval_data", "def get_comments(self, sort, time):\r\n from r2.models import Comment\r\n return self.get_links(sort, time, Comment)", "def getMovieShortComments(movieid, pages=1, proxy=1):\n\n commentList = []\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.98 Safari/537.36',\n 'Cookie': 'bid=PFXqD9SdoDo; douban-fav-remind=1; gr_user_id=0f03311e-0e28-4e2f-a8fd-3a272d2a525f; _vwo_uuid_v2=D54BE21A153A50F178B1EEA3EE252805F|d0f6410ffbf6226399de9cd1715afb86; viewed=\"1148282_30329536_25815142\"; ll=\"118172\"; push_doumail_num=0; douban-profile-remind=1; __yadk_uid=7QS0r1GHatoz4fkcP2sh8IWeD8YWzQ4u; push_noty_num=0; __utmv=30149280.18600; _ga=GA1.2.449624121.1587021337; __utmc=30149280; __utmz=30149280.1589694675.4.3.utmcsr=m.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/; __utmc=223695111; __utmz=223695111.1589694675.4.3.utmcsr=m.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/movie/; __gads=ID=352a53130bca4285:T=1589699239:S=ALNI_MYKpXBWoi1resUvUVMC-9bRu-CuSw; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1589784625%2C%22https%3A%2F%2Fm.douban.com%2Fmovie%2F%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; __utma=30149280.449624121.1587021337.1589694675.1589784731.5; __utma=223695111.299663224.1587002697.1589694675.1589784731.5; __utmb=223695111.0.10.1589784731; __utmt=1; __utmb=30149280.1.10.1589784731; dbcl2=\"186000836:vB8x8LL+q3k\"; ck=kTW_; _pk_id.100001.4cf6=ffb676b0890cad74.1587002697.6.1589786159.1589699369.'\n }\n session = requests.Session()\n\n proxies = None\n if proxy == 1:\n proxies = get_proxy.get_workable_ip()\n\n # First, try to get the total of comments.\n r = session.get(\n \"https://movie.douban.com/subject/\" + str(movieid) + \"/comments?limit=20&sort=new_score&status=P&start=\",\n headers=headers, proxies=proxies)\n bsObj = bs4.BeautifulSoup(r.text, \"html.parser\")\n numstr = bsObj.body.find('div', {'id': 'wrapper'}).find('ul', {'class': 'fleft CommentTabs'}) \\\n .find('li', {'class': 'is-active'}).span.get_text()\n num = re.match(r'(\\D+)(\\d+)', numstr)\n total = int(num.group(2))\n print(total)\n\n # To avoid the situation that the total of comments is less than the number we set.\n if pages * 20 > total:\n pages = int(total / 20 + 1)\n\n # Get comments.\n try:\n for i in range(0, pages):\n r = session.get(\n \"https://movie.douban.com/subject/\" + str(\n movieid) + \"/comments?limit=20&sort=new_score&status=P&start=\" +\n str(i * 20), headers=headers)\n bsObj = bs4.BeautifulSoup(r.text, \"html.parser\")\n comment_tags = bsObj.body.find('div', {'id': 'comments'}).find_all('div', {'class': 'comment-item'})\n pattern = re.compile('\\d{2}')\n for tag in comment_tags:\n temp = {}\n t = tag.find('span', {'class': re.compile('(.*) rating')})\n if t is not None:\n star = int(pattern.findall(t['class'][0])[0])\n # print(star)\n temp['comment'] = tag.find('p').span.get_text()\n temp['star'] = star\n commentList.append(temp)\n except AttributeError as e:\n print(\"Limited by website, please change your proxy.爬虫好像受到网站的限制,请更换代理。\")\n return commentList", "def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments", "async def main(self, loop: asyncio.get_event_loop) -> None:\n queue = asyncio.Queue()\n\n for url in self.url_list:\n queue.put_nowait(url)\n\n async with aiohttp.ClientSession(loop=loop) as session:\n workers = [\n asyncio.create_task(self.worker(queue, session))\n for _ in range(self.max_treads)\n ]\n await queue.join()\n\n for worker in workers:\n worker.cancel()\n\n await asyncio.gather(*workers, return_exceptions=True)", "def get_bugs(self, year):\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n for bug in helpers.load_json(path):\n yield bug", "def get_comments(self):\n raise NotImplementedError", "def calc_conv_comments(self):\n for conv_comment in self.pull_request.get_issue_comments():\n self._users.add(conv_comment.user.login)\n lowercase_body = conv_comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_conv_comments += 1\n for reaction in conv_comment.get_reactions():\n self._users.add(reaction.user.login)\n self.conv_comment_reactions += 1\n if conv_comment.body is not None:\n self.len_issue_comments += len(conv_comment.body)", "def scrapeFacebookComments(file_id, result_file, access_token):\n with open(file_id, 'r', encoding='utf8') as f, \\\n open(result_file, 'w', encoding='utf8', newline='') as o:\n input_file = csv.DictReader(f)\n output_file = csv.DictWriter(o, \n fieldnames=[\n 'sentence_id', \n 'sentence_text'])\n\n output_file.writeheader()\n\n num_processed = 0\n scrape_starttime = datetime.datetime.now()\n base = \"https://graph.facebook.com/v2.12\"\n parameters = \"/?access_token={}\".format(access_token)\n\n print(\"Scraping {} Comments: {}\\n\".format(\n file_id, scrape_starttime))\n\n comment_contents = {}\n\n for row in input_file:\n if row['comment_id'] in comment_contents:\n comment = comment_contents[row['comment_id']]\n else:\n node = \"/{}\".format(row['comment_id'])\n url = base + node + parameters\n reply = request_until_succeed(url)\n \n if not reply:\n print(\"Comment doesn't exists anymore: \" + row['comment_id'])\n continue\n \n try:\n comment = json.loads(reply)\n except:\n comment = json.loads(reply.decode('utf-8')) #python 3.5 and earlier bugfix\n comment_contents[row['comment_id']] = comment # cache result in case of reuse\n\n comment_message = '' if 'message' not in comment \\\n or comment['message'] is '' else \\\n unicode_decode(comment['message'])\n\n sentence_texts = sent_tokenize(comment_message,\n language='german')\n sentence_text = sentence_texts[int(row['sentence_number'])]\n\n ha = hashlib.md5(sentence_text.encode()).hexdigest()\n\n if ha != row['md5_hash']:\n print(\"Wrong MD5 hash for comment: \" + row['comment_id'] + \", \" + sentence_text)\n continue\n\n output_file.writerow({'sentence_id': row['sentence_id'],\n 'sentence_text': sentence_text})\n\n num_processed += 1\n if num_processed % 100 == 0:\n print(\"{} Comments Processed: {}\".format(\n num_processed, datetime.datetime.now()))\n\n print(\"\\nDone!\\n{} Comments Processed in {}\".format(\n num_processed, datetime.datetime.now() - scrape_starttime))", "def get_thread_urls(self, response):\n\n print(\"scraping {0}\".format(response.url))\n url_stories = []\n\n # <li_tags> is a list of all the <li> tags in the html doc with a certain class value.\n # This corresponds to all threads that are NOT sticky.\n li_tags = response.xpath(\"//li[@class='discussionListItem visible ']\")\n\n for thread_tag in li_tags:\n\n author_name = thread_tag.xpath('@data-author').extract_first()\n\n # Get the last post date for a thread ========================================================\n last_post_date = thread_tag.xpath(\".//dl[@class='lastPostInfo']//abbr/text()\").extract_first()\n if last_post_date is not None:\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n else:\n # fix with line continuation.\n last_post_date = thread_tag.xpath(\".//span[@class='DateTime']/@title\").extract_first()\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n\n # ============================================================================================\n\n author, created = Author.objects.get_or_create(name=author_name)\n if created:\n author.save()\n\n title = thread_tag.xpath(\".//h3[@class='title']/a/text()\").extract_first().encode('utf-8')\n story, created = Story.objects.get_or_create(title=title)\n\n # if created is true, then it's a brand new story, so make sure to save it.\n if created:\n story.save()\n story.authors.add(author)\n\n a_node = thread_tag.xpath(\"div/div/h3/a\")\n thread_url = a_node.xpath(\"@href\").extract_first()\n\n cur_date = datetime.now(tz=utc)\n oldest_date = datetime.min.replace(tzinfo=utc)\n\n created = False\n \"\"\"\n Over here, I am attempting to either update an existing storyhost\n object, OR I am creating a new one. It looks redundant, but I found that\n if I just used get_or_create, I was forced to set last_date automatically.\n\n I didn't always want to create a brand new object, so this verbose code\n was necessary.\n \"\"\"\n try:\n # TRY TO UPDATE EXISTING object\n storyhost = StoryHost.objects.get(host=self.HOST, story=story, url=thread_url)\n storyhost.save()\n except StoryHost.DoesNotExist:\n\n # CREATE BRAND NEW STORYHOST OBJECT\n storyhost, created = StoryHost.objects.get_or_create(host=self.HOST,\n story=story,\n url=thread_url,\n last_scraped=oldest_date)\n\n storyhost.save()\n\n \"\"\"\n Check if the last post date is more recent than the\n storyhost's last scraped date. If it's not, skip it.\n\n If it is, update the last scraped date, and add it to the\n list of url_stories to be returned at the end of this function.\n \"\"\"\n\n last_seg_date = self.get_last_seg_date(story)\n if thread_url is not None:\n if last_post_date > storyhost.last_scraped or last_seg_date < last_post_date:\n storyhost.last_scraped = cur_date\n storyhost.save()\n thread_link = response.urljoin(thread_url)\n\n # Add this story to two separate lists, one for updating, one for just\n # scraping.\n if created:\n url_stories.append((thread_link, story))\n else:\n self.update_list.append((\"{0}threadmarks\".format(thread_link), story))\n else:\n print(\"Skipping {0}\".format(storyhost.url))\n\n return url_stories", "def get_comments(self, project_id, forum_id, param=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/forums/' + str(forum_id) + '/comments/'\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_comments(response)", "def visit_all_issues_in_list(self, issues):\n for issue in issues:\n self.driver.implicitly_wait(3)\n self.driver.get(issue)\n config_type_text = self.driver.find_element_by_xpath(\"/html/body/b-service-bootstrap/\"\\\n \"app-root/div[7]/div/div/edit-issue-page/b-resolving-issue-references/div[2]/div[1]/\"\\\n \"div[3]/div/div/div[2]/div[2]/div[3]/div/div[1]/div/span/span[6]/span/span/a\").text\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n\n advanced_fields = {}\n advanced_fields[\"Issue Id\"] = issue.replace(\"https://b.corp.google.com/issues/\", \"\")\n reporter_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner \"\\\n \"bv2-issue-metadata-field-reporter\")\n reporter = reporter_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n advanced_fields[reporter[0]] = reporter[1]\n assignee_tag = soup.find(\"div\", \"bv2-issue-metadata-field-inner bv2-issue-metadata-\"\\\n \"field-assignee\")\n assignee = assignee_tag[\"aria-label\"].replace(\n \" value is \", \"\\n\").split(\"\\n\")\n if assignee[1] != \"empty\":\n advanced_fields[assignee[0]] = assignee[1]\n\n if \"EnqueueRule\" in config_type_text:\n config_type = \"EnqueueRules\"\n elif \"RoutingTargets\" in config_type_text:\n config_type = \"RoutingTargets\"\n elif \"QueueInfo\" in config_type_text:\n config_type = \"QueueInfo\"\n\n advanced_fields[\"Config Type\"] = config_type\n\n if config_type == \"QueueInfo\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n\n self.scrape_queue_info(advanced_fields)\n elif config_type == \"RoutingTargets\":\n if assignee[1] != constants.AUTOMATION_USER:\n continue\n self.scrape_routing_targets(advanced_fields)\n elif config_type == \"EnqueueRules\":\n self._message_parsing_util.parse_page(soup, reporter[1], issue)", "def scrap_keywords():\n ParScr = ParallelScraper()\n ParScr.create_and_run_threads()\n return", "def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return", "def run():\n\n api = api_start()\n stonks = {}\n check_function = load_symbol_list()\n for obj in (\"comments\", \"submissions\"):\n for post in get_text(api, obj):\n if obj == \"comments\":\n full_text = post.body\n else: # obj == \"submissions\"\n full_text = post.title + post.selftext\n try:\n stonks = check_texts(\n full_text, post.author.name, stonks, check_function\n )\n except AttributeError:\n pass\n\n return stonks", "def get_comments_for_issue(owner, repo, issue_number, session=None):\n url = (\n f'{GITHUB_API_URL}/repos/{owner}/{repo}/issues/{issue_number}/comments'\n )\n return get_one_item_at_a_time(url, session=session)", "def scrape_comments(video_list, driver_path=\"C:/WebDriver/bin/chromedriver.exe\", csv_path=\"../comments.csv\"):\n \n csv_file = open(csv_path,'w', encoding=\"UTF-8\", newline=\"\")\n writer = csv.writer(csv_file) \n \n writer.writerow(['query', 'url', 'title', 'upload_date', 'channel', 'no_of_views', 'likes', 'dislikes', 'comment', 'author', 'comment_date', 'no_of_replies','upvotes']) \n driver = webdriver.Chrome(executable_path=driver_path)\n\n for video in video_list:\n \n url = video['url']\n title = video['title']\n upload_date = video['date']\n query = video['query']\n \n # Scrape basic video data\n print(\"=\" * 40)\n print(\"video title : \", title)\n driver.get(url)\n v_channel = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#upload-info yt-formatted-string\"))).text\n print(\"channel : \",v_channel) \n v_views = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#count span.view-count\"))).text\n print(\"no. of views : \",v_views)\n v_timeUploaded = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#date yt-formatted-string\"))).text\n print(\"time uploaded : \",v_timeUploaded)\n w = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#top-level-buttons yt-formatted-string\")))\n w = driver.find_elements_by_css_selector(\"div#top-level-buttons yt-formatted-string\")\n v_likes = w[0].text\n v_dislikes = w[1].text\n print(\"video has \", v_likes, \"likes and \", v_dislikes, \" dislikes\")\n \n youtube_dict ={}\n \n print(\"+\" * 40)\n print(\"Scraping child links \")\n \n # Load comments section\n driver.execute_script('window.scrollTo(0,390);')\n time.sleep(2)\n \n try:\n # Sort by top comments\n print(\"sorting by top comments\")\n sort= WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\"div#icon-label\")))\n sort.click()\n topcomments =driver.find_element_by_xpath(\"\"\"//*[@id=\"menu\"]/a[1]/paper-item/paper-item-body/div[1]\"\"\")\n topcomments.click()\n \n # Loads more comments\n for i in range(0,5):\n driver.execute_script(\"window.scrollTo(0,Math.max(document.documentElement.scrollHeight,document.body.scrollHeight,document.documentElement.clientHeight))\")\n print(\"scrolling to load more comments\")\n time.sleep(4)\n \n # Count total number of comments and set index to number of comments if less than 50 otherwise set as 50. \n totalcomments= len(driver.find_elements_by_xpath(\"\"\"//*[@id=\"content-text\"]\"\"\"))\n \n if totalcomments < 100:\n index= totalcomments\n else:\n index= 100 \n \n # Loop through each comment and scrape info\n print(\"scraping through comments\")\n ccount = 0\n while ccount < index: \n try:\n comment = driver.find_elements_by_xpath('//*[@id=\"content-text\"]')[ccount].text\n except:\n comment = \"\"\n try:\n authors = driver.find_elements_by_xpath('//a[@id=\"author-text\"]/span')[ccount].text\n except:\n authors = \"\"\n try:\n comment_date = driver.find_elements_by_xpath('//*[@id=\"published-time-text\"]/a')[ccount].text\n except:\n comment_date = \"\"\n try:\n replies = driver.find_elements_by_xpath('//*[@id=\"more-text\"]')[ccount].text \n if replies ==\"View reply\":\n replies= 1\n else:\n replies =replies.replace(\"View \",\"\")\n replies =replies.replace(\" replies\",\"\")\n except:\n replies = \"\"\n try:\n upvotes = str(driver.find_elements_by_xpath('//*[@id=\"vote-count-middle\"]')[ccount].text)\n except:\n upvotes = \"\"\n \n \n # Write scraped data to csv file\n youtube_dict['query'] = query\n youtube_dict['url'] = url\n youtube_dict['title'] = title\n youtube_dict['upload_date'] = upload_date\n youtube_dict['channel'] = v_channel\n youtube_dict['no_of_views'] = v_views\n youtube_dict['likes'] = v_likes\n youtube_dict['dislikes'] = v_dislikes\n youtube_dict['comment'] = comment\n youtube_dict['author'] = authors\n youtube_dict['comment_date'] = comment_date\n youtube_dict['no_of_replies'] = replies\n youtube_dict['upvotes'] = upvotes\n writer.writerow(youtube_dict.values())\n \n ccount = ccount + 1\n \n # If video errors out, move onto the next one\n except TimeoutException as e:\n print(title, \" errored out: \",str(e))\n print(\"moving onto next video\")", "def problem_comments(self, identifier):\n return self._get(\"problems/%d/comments\" % identifier).json()", "def run(self):\n while True:\n letter = self.queue.get()\n course_scraper = CourseSession(self.location)\n course_scraper.scrape(letter)\n self.queue.task_done()", "def get_comment_order(self):\n\n with g.stats.get_timer('comment_tree.get.1') as comment_tree_timer:\n comment_tree = CommentTree.by_link(self.link, comment_tree_timer)\n sort_name = self.sort.col\n sorter = get_comment_scores(\n self.link, sort_name, comment_tree.cids, comment_tree_timer)\n comment_tree_timer.intermediate('get_scores')\n\n if isinstance(self.sort, operators.shuffled):\n # randomize the scores of top level comments\n top_level_ids = comment_tree.tree.get(None, [])\n top_level_scores = [\n sorter[comment_id] for comment_id in top_level_ids]\n shuffle(top_level_scores)\n for i, comment_id in enumerate(top_level_ids):\n sorter[comment_id] = top_level_scores[i]\n\n self.timer.intermediate(\"load_storage\")\n\n comment_tree = self.modify_comment_tree(comment_tree)\n self.timer.intermediate(\"modify_comment_tree\")\n\n initial_candidates, offset_depth = self.get_initial_candidates(comment_tree)\n\n comment_tuples = self.get_initial_comment_list(comment_tree)\n if comment_tuples:\n # some comments have bypassed the sorting/inserting process, remove\n # them from `initial_candidates` so they won't be inserted again\n comment_tuple_ids = {\n comment_tuple.comment_id for comment_tuple in comment_tuples}\n initial_candidates = [\n comment_id for comment_id in initial_candidates\n if comment_id not in comment_tuple_ids\n ]\n\n candidates = []\n self.update_candidates(candidates, sorter, initial_candidates)\n self.timer.intermediate(\"pick_candidates\")\n\n # choose which comments to show\n while candidates and len(comment_tuples) < self.max_comments:\n sort_val, comment_id = heapq.heappop(candidates)\n if comment_id not in comment_tree.cids:\n continue\n\n comment_depth = comment_tree.depth[comment_id] - offset_depth\n if comment_depth >= self.max_depth:\n continue\n\n child_ids = comment_tree.tree.get(comment_id, [])\n\n comment_tuples.append(CommentTuple(\n comment_id=comment_id,\n depth=comment_depth,\n parent_id=comment_tree.parents[comment_id],\n num_children=comment_tree.num_children[comment_id],\n child_ids=child_ids,\n ))\n\n child_depth = comment_depth + 1\n if child_depth < self.max_depth:\n self.update_candidates(candidates, sorter, child_ids)\n\n self.timer.intermediate(\"pick_comments\")\n\n # add all not-selected top level comments to the comment_tuples list\n # so we can make MoreChildren for them later\n top_level_not_visible = {\n comment_id for sort_val, comment_id in candidates\n if comment_tree.depth.get(comment_id, 0) - offset_depth == 0\n }\n\n if top_level_not_visible:\n num_children_not_visible = sum(\n 1 + comment_tree.num_children[comment_id]\n for comment_id in top_level_not_visible\n )\n comment_tuples.append(MissingChildrenTuple(\n num_children=num_children_not_visible,\n child_ids=top_level_not_visible,\n ))\n\n self.timer.intermediate(\"handle_morechildren\")\n return comment_tuples", "def crawl(self):\n retrievedSubs = []\n reddit = praw.Reddit(\n client_id='QRl_4bwjckcg9A',\n client_secret='dsavqFoOk5NgWEOWtMf9NknwxRIoIw',\n password='P@ssword123',\n user_agent='cluelessv1',\n username='theclueless1009'\n )\n submissions = reddit.subreddit('all').search(self.keyword, sort='relevance', limit=50, time_filter='week')\n\n for sub in submissions:\n self.data = [sub.selftext, sub.upvote_ratio, sub.score,\n sub.title, sub.id, sub.total_awards_received, sub.created_utc]\n self.data = tuple(self.data)\n retrievedSubs.append(self.data)\n\n return retrievedSubs", "def get_comments_by_country(pages, hotel, country):\n url = \"http://www.booking.com/reviewlist.es.html\"\n headers = {\n 'User-Agent': \"PostmanRuntime/7.20.1\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"4b4e2c78-12c0-42a7-807a-29f5f7378ae5,e75b58fb-25dd-4fdd-b97a-47650ed52d41\", # NOQA\n 'Host': \"www.booking.com\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Cookie': \"bkng=11UmFuZG9tSVYkc2RlIyh9Yaa29%2F3xUOLbca8KLfxLPeck0I1eO54zQUW2YGGgHUJ6NVSV%2BmLwJzaS5ibHX0J%2BdueF6GNDCq1X0NvEJAU9t%2FoaAC2%2FMBm39Gz0lTSWuf6zuBVIiNGAI88YDjaj4w5H8Lrv7T0Yug9jg%2FpPsONkdMVLMiYifIslIsLvFl07K%2BTKGRykCAxOsgE%3D\", # NOQA\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n params = {\n 'cc1': country,\n 'pagename': hotel,\n 'type': 'total',\n 'dist': str(1),\n 'rows': str(20)\n }\n\n def build_soup_comment_request(page: int, list_of_countries):\n if page == 0:\n params['offset'] = str(page)\n else:\n params['offset'] = str(page * 20)\n\n response = requests.get(url=url, params=params, headers=headers)\n comments_soup = BeautifulSoup(response.content, 'html.parser')\n span = comments_soup.select('.bui-avatar-block__flag img')\n [get_flags(item, list_of_countries) for item in span]\n\n countries_list = {}\n [build_soup_comment_request(page, countries_list) for page in range(pages)]\n return countries_list", "def all_user_comments(username):\n return commentslist", "def main_loop(bot):\n # Start looping\n i = 0\n bot.tick()\n for comment in bot.r_all.stream.comments():\n # Check if comment is and iambic pentameter\n done = bot.process_comment(comment)\n # If enough commebts have been processed, kill the procgram\n if done:\n exit()\n # Increment counter\n i += 1\n # Report periodically\n if i >= bot.options.report_every:\n # Print infos\n percent_length_removed = (bot.n_length_removed) / bot.options.report_every * 100\n print('Analyzed %d comments, ' % i +\n '%.2f%% too short/long, ' % percent_length_removed +\n 'found %d iambic pentameters ' % bot.n_pentameters_epoch +\n '(total: %d), ' % bot.n_pentameters +\n '%.1f comments/s' % (i / bot.tick()))\n sys.stdout.flush()\n # Sleep a bit\n time.sleep(bot.options.sleep_for) # Reset periodic counters\n # Reset periodic counters\n bot.n_length_removed = 0\n bot.n_pentameters_epoch = 0\n i = 0\n # Occasionally tweet a quatrain\n try:\n bot.tweet_quatrain()\n except Exception as e:\n print(\"Failed to tweet \" + str(e), file=sys.stderr)", "def decide_action(self, mode: CrawlMode = CrawlMode.NO, bugList: Union[List, str] = None) -> None:\n # checks on which crawl operation to execute\n if mode == CrawlMode.BUG:\n self.get_all_bugs()\n elif mode == CrawlMode.COMMENT:\n if bugList:\n self.get_all_comments(bugList)\n else:\n print('Error: No buglist to be found. Please check your params and start again.')\n return\n elif mode == CrawlMode.BOTH:\n bugIDList = self.get_all_bugs()\n self.get_all_comments(bugIDList)\n elif mode == CrawlMode.CFAST:\n self.get_all_comments_mp(bugList, self.workers)\n elif mode == CrawlMode.BFAST:\n bugsIDList = self.get_all_bugs()\n self.get_all_comments_mp(bugsIDList, self.workers)\n else:\n return", "def comment_extraction(self, part, Identity, limit=None, order=None, nextPageToken=None, searchTerms=None):\n key = self.keylist[self.keyindex]\n url_ct = \"https://www.googleapis.com/youtube/v3/commentThreads\"\n comment_details = {}\n\n if Identity.startswith(\"UC\"):\n channelId = Identity\n ct_id = None\n videoId = None\n\n elif Identity.startswith(\"Ug\"):\n ct_id = Identity\n channelId = None\n videoId = None\n\n elif len(Identity) == 11:\n videoId = Identity\n ct_id = None\n channelId = None\n\n else:\n return \"Invalid input to Identity Parameter\" \n \n if limit != None and limit >= 1 and limit <= 100:\n maxResults = limit\n else:\n maxResults = 100\n \n comment_count = initial = 0\n \n try:\n while nextPageToken or initial == 0:\n querystring = {\"part\": part,\n \"channelId\": channelId,\n \"id\": ct_id,\n \"videoId\": videoId,\n \"maxResults\": maxResults,\n \"key\": key,\n \"order\": order,\n \"pageToken\": nextPageToken,\n \"searchTerms\": searchTerms\n }\n\n response=request_handler(self, url_ct, params=querystring, wait=5)\n #print(response) \n if response.get('error'):\n while response['error']['errors'][0]['reason'] == 'quotaExceeded' or \\\n response['error']['errors'][0]['reason'] == 'dailyLimitExceeded':\n key = keychange(self)\n querystring = {\"part\": part,\n \"channelId\": channelId,\n \"id\": ct_id,\n \"videoId\": videoId,\n \"key\": key,\n \"maxResults\": maxResults,\n \"order\": order,\n \"pageToken\": nextPageToken,\n \"searchTerms\": searchTerms\n }\n \n response = request_handler(self, url_ct, params=querystring, wait=5)\n if response.get('error'):\n continue\n else:\n break\n # print(response)\n if response.get('error'):\n comment_details.update({Identity: [str(response)]})\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{Identity: [str(response), response.text]}]\n break\n \n if response.get('Interneterror'):\n comment_details.update({Identity: response})\n break\n # print(response) \n # if limit == -1:\n # limit = response['pageInfo']['totalResults']\n nextPageToken = response.get(\"nextPageToken\")\n \n try:\n comment_count = comment_count + len(response['items'])\n # print(\"total comment extracted\",comment_count)\n if comment_details.get(Identity):\n comment_details[Identity].extend(response['items'])\n else:\n comment_details[Identity] = response['items']\n if nextPageToken==None or (comment_count>= limit and limit!=-1):\n break\n \n\n except:\n pass\n\n initial += 1\n\n # try:\n # comment_details[Identity] = response['items']\n # except:\n # pass\n\n except Exception as e:\n print(e,traceback.format_exc())\n\n return comment_details", "def thread(comments):\r\n \r\n ret = {'root': []}\r\n for comment in comments:\r\n if not comment.parent_id:\r\n ret['root'].append(comment)\r\n else:\r\n if comment.parent_id not in ret:\r\n ret[comment.parent_id] = []\r\n ret[comment.parent_id].append(comment)\r\n return ret", "def process(self):\n for user in self.repos:\n for repo in self.repos[user]:\n self.process_issues(user, repo)", "def create_view_all_comments_widget(ws_names2id: Dict[str, str], ws_paths: Dict[str, WorkspacePaths], output):\n workspace_chooser = widgets.Dropdown(\n options=ws_names2id,\n value=None,\n description='<b>Choose a workspace to view:</b>',\n style={'description_width': 'initial'},\n layout=widgets.Layout(width='900px')\n )\n\n def on_choose_workspace(changed):\n with output:\n output.clear_output()\n workspace_paths = ws_paths[changed['new']]\n try:\n comment_files = tf.io.gfile.glob(pattern=workspace_paths.get_comment_file_glob())\n except tf.errors.PermissionDeniedError as e:\n target_workspace = [name for name, id in ws_names2id.items() if id == changed['new']]\n display(HTML(f'''<div class=\"alert alert-block alert-danger\">\n <b>Warning:</b> Unable to view HTML snapshots in workspace {target_workspace} from <b>this workspace</b>.\n <hr><p><pre>{e.message}</pre></p>\n </div>'''))\n return\n if not comment_files:\n display(HTML('''<div class=\"alert alert-block alert-warning\">\n No comment files found for HTML snapshots in this workspace.</div>'''))\n return\n\n def get_comment(f):\n with tf.io.gfile.GFile(f, 'r') as fh:\n return fh.readlines()\n\n def process_task(f):\n return f, get_comment(f)\n\n max_pool = 8\n with Pool(max_pool) as p:\n pool_outputs = list(tqdm(p.imap(process_task, comment_files), total=len(comment_files)))\n\n comments = pd.DataFrame.from_dict({f.replace(workspace_paths.get_subfolder(), ''): c[0] for f, c in pool_outputs},\n orient = 'index',\n columns = ['comment']\n ).reset_index()\n comments[['extra', 'author', 'date', 'time', 'item']] = comments['index'].str.split(pat='/', expand=True)\n display(comments[['date', 'time', 'author', 'item', 'comment']].sort_values(by=['date', 'time']).reset_index(drop=True))\n workspace_chooser.observe(on_choose_workspace, names='value')\n\n return widgets.VBox(\n [widgets.HTML('''\n <h3>View all comments for a workspace</h3>\n <p>Use the dropdown to choose a workspace. Then this will display the contents of all comment files for the selected workspace.\n <br>The user, date, time, and notebook name are shown in the left column. The comment is shown in the right column.\n </p><hr>'''),\n workspace_chooser],\n layout=widgets.Layout(width='auto', border='solid 1px grey'))", "def fetch_cases():\n logger.info(\"Start fetching cases\")\n fb = fogbugz.FogBugz(\n settings.AUTH_FOGBUGZ_SERVER,\n settings.FOGBUGZ_TOKEN)\n release_query = ' OR '.join('milestone:\"{0}\"'.format(release.number) for release in Release.objects.all())\n resp = fb.search(\n q='({0}) AND ({ciproject}:\"*\")'.format(release_query, ciproject=settings.FOGBUGZ_CI_PROJECT_FIELD_ID),\n cols='sTitle,sOriginalTitle,sFixFor,dtFixFor,sProject,sArea,dtLastUpdated,tags,' +\n settings.FOGBUGZ_CI_PROJECT_FIELD_ID\n )\n cases = resp.findAll('case')\n logger.info('Found %s cases to fetch from fogbugz', len(cases))\n for case_xml in cases:\n update_case_from_fogbugz.apply_async(kwargs=dict(case_id=int(case_xml.attrs['ixbug'])))\n logger.info(\"Task finished\")", "async def initialiser_crawler(self) -> Dict[str, List[req.Response]]:\n web_pages = {}\n with ThreadPoolExecutor(max_workers=NUM_WORKERS) as exe:\n try:\n loop = asyncio.get_event_loop()\n tasks = [\n loop.run_in_executor(exe, self.collect_webpages, keyword)\n for keyword in self.keywords \n ]\n for res in await asyncio.gather(*tasks):\n web_pages.update(res)\n except KeyboardInterrupt:\n loop.close()\n raise KeyboardInterrupt\n return web_pages", "def crawl(self) -> None:\n result = self.__exec_request(self.url)\n if result == \"failed\":\n raise InterruptedError(\"The server responded with status code: {}\".format(self._status_code))\n self.__save_relevants_in_results(result, total=True)\n self.total_nums = self.results[\"total_results\"]\n pbar = tqdm(total=self.total_nums / 100) if self.to_be_num > self.total_nums else tqdm(total=self.to_be_num/100)\n pbar.update(1)\n if len(self.results[\"documents\"]) != self.to_be_num:\n while self.num_res < self.total_nums:\n # print(\"Is: {} | To be: {}\".format(self.num_res, self.total_nums))\n for el in result['search-results']['link']:\n if el['@ref'] == 'next':\n next_url = el['@href']\n result = self.__exec_request(next_url)\n if result == \"failed\":\n print(\"Invalid request. Server responded with Statuscode 400 while crawling. \"\n \"The found articles will be saved further on...\")\n break\n self.__save_relevants_in_results(result)\n pbar.update(1)\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n pbar.close()" ]
[ "0.6642331", "0.6419399", "0.6326556", "0.590753", "0.58366686", "0.5725965", "0.56005704", "0.55982554", "0.55817443", "0.5574108", "0.5573544", "0.55464286", "0.5534647", "0.55332947", "0.55286044", "0.5525885", "0.55153495", "0.54998296", "0.549831", "0.5463732", "0.5442687", "0.5419565", "0.5389291", "0.5376693", "0.5376668", "0.53130734", "0.53128296", "0.5299989", "0.5294363", "0.5293513", "0.52877617", "0.5282338", "0.5262726", "0.52437496", "0.5201804", "0.5193839", "0.51799154", "0.51750463", "0.5156866", "0.5114355", "0.51064354", "0.51039875", "0.51028824", "0.5099848", "0.5099131", "0.50916564", "0.5089109", "0.50863504", "0.5070284", "0.50688416", "0.5054991", "0.50495774", "0.5047135", "0.5033724", "0.503355", "0.5028963", "0.5013033", "0.5011669", "0.5007456", "0.50056326", "0.50029784", "0.50006187", "0.49924234", "0.49722335", "0.4972173", "0.49668425", "0.49610403", "0.49501485", "0.4947666", "0.4934941", "0.4934313", "0.49288493", "0.49233532", "0.492123", "0.49191713", "0.49177828", "0.491737", "0.49154153", "0.49107173", "0.49103102", "0.49046794", "0.4897516", "0.48969638", "0.48965722", "0.48959646", "0.48896724", "0.48893142", "0.48856193", "0.48829752", "0.48797572", "0.48782048", "0.48753804", "0.48674786", "0.4866235", "0.48555988", "0.4847973", "0.48457155", "0.4840758", "0.48396695", "0.48327768" ]
0.64263386
1
Creates a directory if it doesn't exist already
def createFolder(self, foldername: str) -> None: try: if not os.path.exists(foldername): os.makedirs(foldername) except OSError: print('Error: Creating following directory: ' + foldername)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return", "def create_directory(dir_path):\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path, exist_ok=True)", "def create_dir(_dir):\n if not os.path.exists(_dir):\n os.makedirs(_dir)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def create_directory_if_not_exists(directory_path):\n os.makedirs(directory_path, exist_ok=True)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def new_dir(the_dir):\n try:\n os.makedirs(the_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass #not a problem if file exists", "def create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "def create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)", "def make_dir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path, exist_ok=True)\n return True", "def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def mkdir_if_not_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_dir_if_necessary(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def CreateDirectory(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, 0777)", "def create_dir(dir_path):\n\n if not path.exists(dir_path):\n log('Creating directory: {0}'.format(dir_path))\n run(sh.mkdir, dir_path, p=True)", "def mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)", "def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def create_dir(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def _create_dir_if_not_exists(dir: str):\n\n if os.path.exists(dir) and not os.path.isdir(dir):\n raise ValueError(f'Provided path {dir} was not a directory')\n\n if not os.path.exists(dir):\n _log.info(f'Creating directory {dir}')\n os.mkdir(dir)", "def create_directory(path):\n try:\n os.makedirs(path) # pylint: disable=no-member\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise", "def make_dir(self, path):\n import os\n if not os.path.exists(path):\n os.makedirs(path)", "def create_directory(dirname):\n\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)", "def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)", "def create_dir(dir_):\n try:\n os.makedirs(dir_)\n logger.debug(\"Creating directory %s\", dir_)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise", "def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def _mkdir_p(path):\n if not osp.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def MakeDir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path)\n return True", "def create_new_dir(path):\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called from save_single_file_locally', extra=d)\n\n if not os.path.exists(path):\n logger.debug('Calling Function: % s',\n 'create_new_dir: create_new_dir calling makedirs', extra=d)\n os.makedirs(path)\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called makedirs', extra=d)", "def mkdir(path):\n\tif not Path(path).exists():\n\t\tPath(path).mkdir(parents=True, exist_ok=True)", "def createDir(directory):\n if not os.path.exists(directory):\n statusCreation = os.makedirs(directory)\n else:\n statusCreation = 2\n return statusCreation", "def mkdir_p(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_directory(directory=DIRECTORY):\n if not os.path.exists(directory):\n os.mkdir(directory)", "def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n return Path(directory)", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)", "def make_dir(path=None):\n\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n exit(\"\\nOSError: You can not use that directory!\\n\")", "def mkdir(dir):\n\tif not os.path.exists(dir):\n\t\tos.makedirs(dir)", "def createDirectory(directory=DIRECTORY):\n if not os.path.exists(directory):\n os.mkdir(directory)", "def ensure_dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_directory(path, name):\n new_path = os.path.join(path, name)\n if not os.path.isdir(new_path):\n subprocess.run(['mkdir', new_path])", "def mkDir(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n # In a race between two threads, this thread may have lost,\n # in which case the directory will now exist. Otherwise this\n # is a real exception.\n if not os.path.exists(path):\n raise", "def create_or_clean_directory(dir):\n\tif not os.path.exists(dir):\n\t\tprint(\"The path \\\"\" + dir + \"\\\" does not exist\")\n\t\tprint(\"creating directory \\\"\" + dir + \"\\\"\")\n\t\tos.makedirs(dir)\n\telse: #Directory exists, but we want to clean it before use\n\t\tprint(dir + \" already exists. Cleaning before use...\")\n\t\tshutil.rmtree(dir)\n\t\tos.makedirs(dir)", "def create_directory_if_needed(directory_name):\n if not os.path.isdir(directory_name):\n os.makedirs(directory_name)", "def _ensure_dir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def make_directory(name: str):\n try:\n os.mkdir(name)\n except:\n pass", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def create_dir(newdir):\n try:\n os.makedirs(newdir)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(newdir):\n pass\n else:\n raise", "def ensure_dir_exists(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def create_not_existing_directory(\n directory: str\n):\n p = pathlib.Path(directory)\n if not p.is_dir():\n print(f'Creating directory: {directory} as it does not exist')\n p.mkdir(parents=True, exist_ok=True)", "def ensure_dir(d):\n\n if not os.path.exists(d):\n os.makedirs(d, exist_ok=True)\n\n return", "def create_directory(dir:str):\n # Create directory if doesn't already exist\n # Path(dir).mkdir(parents=True, exist_ok=True)\n try:\n os.makedirs(dir)\n print(\"Created directory\",dir)\n except OSError as e:\n print(\"Directory exists\",dir)", "def mkdir_p(path):\n try:\n os.makedirs(path) # , exist_ok=True\n except OSError:\n pass", "def mkdir(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def dlt_create_dir(path): \n shutil.rmtree(path,ignore_errors=True)\n os.makedirs(path, exist_ok = True)", "def create_dir(directory):\n if not os.path.isdir(directory):\n os.makedirs(directory)", "def mkdir(path):\n try: \n os.mkdir(path)\n except OSError:\n if not os.path.isdir(path):\n raise", "def create_dir(dir):\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n except OSError:\n print('Error: Cannot create directory named \\\"' + dir + '\\\"')", "def create_file_directory():\n\n # Verify if directory exist.\n # If yes, delete it and every thing inside and create it again.\n # If not, just create it.\n\n if os.path.isdir('./file'):\n\n shutil.rmtree('./file')\n\n os.mkdir('./file')", "def mkdirp(d):\r\n try:\r\n os.makedirs(d)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST:\n pass\n else:\n raise", "def mkdir(path):\n try:\n os.mkdir(path)\n except FileExistsError:\n pass", "def create_directory(directory_name):\n directory = \"./\" + directory_name + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)", "def dirmaker(dirp):\n try:\n if not os.path.exists(dirp):\n os.makedirs(dirp)\n except:\n pass", "def _ensure_dir_exists(self, directory):\n directory = directory.strip()\n if not Path(directory).exists():\n os.mkdir(directory)", "def mkdir(path):", "def ensure_dir( dirName ):\r\n if not os.path.exists( dirName ):\r\n os.makedirs( dirName )", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST:\n pass\n else: raise", "def prepare_dir(path, empty=False):\n\n def create_dir(path):\n \"\"\"\n Creates a directory\n :param path: string\n :return: nothing\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n if not os.path.exists(path):\n create_dir(path)", "def makeDir(dir_path):\n if os.path.exists(dir_path): return\n dir_path = os.path.realpath(dir_path)\n dir_path = os.path.normpath(dir_path)\n if os.path.exists(os.path.dirname(dir_path)):\n os.mkdir(dir_path)\n else:\n makeDir(os.path.dirname(dir_path))\n os.mkdir(dir_path)", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def ensure_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n pass", "def mkdir_p(path):\n\n if os.path.exists(path):\n return\n\n par = os.path.split(path)[0]\n if os.path.exists(par):\n os.mkdir(path)\n getLogger(__name__).debug('created directory: %s' % path)\n else:\n mkdir_p(par)\n os.mkdir(path)", "def make_directory(self):\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory)", "def mkdir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def mkdir(path: str):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_dir_exists(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)", "def make_dir(directory):\n try:\n os.makedirs(directory)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def _ensure_dir(directory):\r\n try:\r\n os.makedirs(directory)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise", "def mkdir(directory):\n\n if os.path.exists(directory):\n if os.path.isfile(directory):\n message = \"Unable to created directory '%s': A file with that name already exists\"\n raise PyBuilderException(message, directory)\n return\n os.makedirs(directory)" ]
[ "0.84880376", "0.84691334", "0.8421431", "0.8365199", "0.83478045", "0.83478045", "0.83265615", "0.8294003", "0.8283774", "0.82814693", "0.8278838", "0.8278838", "0.82774234", "0.827556", "0.8255593", "0.824047", "0.82380694", "0.82136655", "0.82106537", "0.8207876", "0.82056063", "0.81956303", "0.81956303", "0.81913203", "0.8184836", "0.8175001", "0.8167243", "0.81547165", "0.815446", "0.81499165", "0.81328726", "0.81250036", "0.81164676", "0.8098221", "0.8095563", "0.8095563", "0.8095563", "0.8095563", "0.80944717", "0.8089686", "0.8089686", "0.8089686", "0.8089686", "0.8083649", "0.8083038", "0.8075744", "0.8059369", "0.80538636", "0.80377805", "0.80237395", "0.80175626", "0.80120003", "0.80119556", "0.8009235", "0.8007938", "0.800252", "0.7989244", "0.79604524", "0.7956629", "0.79554534", "0.79517347", "0.79517347", "0.79517347", "0.79517347", "0.79517347", "0.79512376", "0.794494", "0.79413193", "0.7940404", "0.793988", "0.7922695", "0.7922051", "0.79213685", "0.7917829", "0.79094374", "0.7907283", "0.79032737", "0.7897006", "0.789085", "0.78884095", "0.78860205", "0.7883318", "0.786757", "0.7860479", "0.78580356", "0.7857526", "0.78427976", "0.783193", "0.7826251", "0.7824294", "0.7821189", "0.78120226", "0.7811785", "0.78083974", "0.7799158", "0.7799144", "0.77991015", "0.7796383", "0.77953196", "0.7789857", "0.77878726" ]
0.0
-1
download sequencing file from SRA archive requires local install of SRA tools in path requires verification of filenames and paths
def download_SRA(SRA): print("Downloading SRA archive") output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT) print("Extracting FASTQ data") output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_sra_files(remote_location, local_location = '', max_recursion = 3, verbose = False):\n\n downloaded_files = list();\n\n def printv(*args):\n if(verbose):\n print(*args);\n sys.stdout.flush();\n\n printv(\"Reading folder: \", remote_location);\n\n req = urllib2.Request(remote_location);\n\n response = urllib2.urlopen(req);\n\n the_page = response.read();\n\n entries = the_page.split('\\r\\n');\n\n #Identify sub folders\n folders = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == 'd'): #if directory flag\n folders.append(spl_entry[-1]);\n\n\n for folder in folders:\n dl_files = download_sra_files(remote_location + '/' + folder, local_location, max_recursion - 1, verbose);\n downloaded_files.extend(dl_files);\n\n #Identify SRA files\n files = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == '-' and #Not a directory\n spl_entry[-1].lower().endswith('.sra')): #Has extension '.sra'\n\n files.append(spl_entry[-1]);\n\n if(len(files) > 0):\n printv(\"Identified sra files: \");\n for file_name in files:\n printv(\" \", file_name);\n\n abs_local_location = os.path.abspath(local_location);\n\n if(not os.path.isdir(abs_local_location)):\n os.makedirs(abs_local_location);\n\n for file_name in files:\n\n printv(\"Downloading \", file_name);\n\n file_str = remote_location + '/' + file_name;\n\n req = urllib2.Request(file_str);\n response = urllib2.urlopen(req);\n\n dest_file_name = abs_local_location + os.sep + file_name;\n dest_file = open(dest_file_name, 'wb');\n shutil.copyfileobj(response, dest_file)\n dest_file.close();\n downloaded_files.append(dest_file_name);\n\n return downloaded_files;", "def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path", "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None", "def dascasi_download():\n p = argparse.ArgumentParser(description=\"download DASC all-sky camera data\")\n p.add_argument(\"site\", choices=[\"EAA\", \"FYU\", \"KAK\", \"PKR\", \"TOO\", \"VEE\"])\n p.add_argument(\n \"startend\", help=\"start/end times UTC e.g. 2012-11-03T06:23 2012-11-03T07\", nargs=2\n )\n p.add_argument(\"odir\", help=\"directory to write downloaded FITS to\")\n p.add_argument(\"-w\", \"--wavelen\", help=\"request specific wavelength(s)\", nargs=\"+\")\n p.add_argument(\"-host\", default=\"ftp://optics.gi.alaska.edu\")\n p = p.parse_args()\n\n # host = \"ftp://mirrors.arsc.edu/AMISR/PKR/DASC/RAW/\"\n download(p.startend, p.site, p.odir, p.host, p.wavelen)", "def _download_scn_asf(params):\n pid = params[0]\n product_file_id = params[1]\n remote_url = params[2]\n db_info_obj = params[3]\n scn_lcl_dwnld_path = params[4]\n asf_user = params[5]\n asf_pass = params[6]\n success = False\n\n eodd_wget_downloader = eodatadown.eodatadownutils.EODDWGetDownload()\n start_date = datetime.datetime.now()\n try:\n success = eodd_wget_downloader.downloadFile(remote_url, scn_lcl_dwnld_path, username=asf_user,\n password=asf_pass, try_number=\"10\", time_out=\"60\")\n except Exception as e:\n logger.error(\"An error has occurred while downloading from ASF: '{}'\".format(e))\n end_date = datetime.datetime.now()\n\n if success and os.path.exists(scn_lcl_dwnld_path):\n logger.debug(\"Set up database connection and update record.\")\n db_engine = sqlalchemy.create_engine(db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == pid).one_or_none()\n if query_result is None:\n logger.error(\"Could not find the scene within local database: \" + product_file_id)\n else:\n query_result.Downloaded = True\n query_result.Download_Start_Date = start_date\n query_result.Download_End_Date = end_date\n query_result.Download_Path = scn_lcl_dwnld_path\n ses.commit()\n ses.close()\n logger.info(\"Finished download and updated database: {}\".format(scn_lcl_dwnld_path))\n else:\n logger.error(\"Download did not complete, re-run and it should try again: {}\".format(scn_lcl_dwnld_path))", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def elar_download(bundle_id, phpsessid, extension):\n\n # check for validity of ID\n try:\n soasID = bundle_id.split(\"oai:soas.ac.uk:\")[1]\n except IndexError: # bundle_id does not start with oai:soas.ac.uk:, so we are not interested\n print(\"not a SOAS file\", soasID)\n return\n # prepare request\n url = \"https://elar.soas.ac.uk/Record/%s\" % soasID\n cookies = {\"PHPSESSID\": phpsessid}\n print(\"checking\", url)\n # retrieve catalog page\n with requests.Session() as s:\n r = s.post(url, cookies=cookies)\n html = r.text\n # extract links to ELAN files\n try:\n links = fromstring(html).findall(\".//tbody/tr/td/a\")\n locations = {\n a.attrib[\"href\"] for a in links if a.attrib[\"href\"].endswith(extension)\n }\n except AttributeError: # not an ELAN file\n print(\"files are not accessible\")\n return\n # dowload identified files\n if locations == []:\n print(\"files are not accessible\")\n return\n for location in locations:\n download_url = location\n bs = location.split(\"/\")[-1].split('-b-')\n if len(bs) == 1:\n collectionname = 'no_collection'\n basename = bs[0]\n else:\n collectionname = bs[0]\n basename = '-b-'.join(bs[1:])\n filepath = os.path.join('elar', collectionname, basename)\n if len(filepath) > 150:\n filepath = os.path.join('elar', collectionname, \"%s.%s\" % (hash(basename[:-4]),extension))\n print(\" downloading %s as %s:\" % (location, filepath))\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n save_file(s, filepath, download_url, cookies)", "def download_from_archive(filename, sub_path='raw_files', env_var='DRAGONS_TEST'):\n # Find cache path and make sure it exists\n root_cache_path = os.getenv(env_var)\n\n if root_cache_path is None:\n raise ValueError(f'Environment variable not set: {env_var}')\n\n root_cache_path = os.path.expanduser(root_cache_path)\n\n if sub_path is not None:\n cache_path = os.path.join(root_cache_path, sub_path)\n\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n\n # Now check if the local file exists and download if not\n local_path = os.path.join(cache_path, filename)\n if not os.path.exists(local_path):\n tmp_path = download_file(URL + filename, cache=False)\n shutil.move(tmp_path, local_path)\n\n # `download_file` ignores Access Control List - fixing it\n os.chmod(local_path, 0o664)\n\n return local_path", "def cli(date, path, mission):\n download.main(path, mission, date)", "def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')", "def _download_karakas():\n #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'\n import urllib\n print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def _download_karakas():\n #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'\n import urllib\n print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def seq_download(name, organism=\"Homo sapiens\", gaba=False):\n\n subunits = {\n \"Alpha-1\": \"Gabra1\",\n \"Alpha-2\": \"Gabra2\",\n \"Alpha-3\": \"Gabra3\",\n \"Alpha-4\": \"Gabra4\",\n \"Alpha-5\": \"Gabra5\",\n \"Alpha-6\": \"Gabra6\",\n \"Beta-1\": \"Gabrb1\",\n \"Beta-2\": \"Gabrb2\",\n \"Beta-3\": \"Gabrb3\",\n \"Gamma-1\": \"Gabrg1\",\n \"Gamma-2\": \"Gabrg2\",\n \"Gamma-3\": \"Gabrg3\",\n \"Delta\": \"Gabrd\",\n \"Pi\": \"Gabrp\",\n \"Rho-1\": \"Gabrr1\",\n \"Rho-2\": \"Gabrr2\",\n \"Rho-3\": \"Gabrr3\",\n \"Epsilon\": \"Gabre\",\n \"Theta\": \"Gabrq\"\n }\n if gaba:\n results = search(subunits[name])\n else:\n results = search(name)\n results = results[results[\"Organism\"].str.contains(organism, na=False)]\n if len(results):\n if gaba:\n target = results[results[\"Gene names\"].str.contains(subunits[name].upper())][\"Entry\"].max()\n else:\n target = results[results[\"Gene names\"].str.contains(name)][\"Entry\"].max()\n response = urlopen(f\"https://www.uniprot.org/uniprot/{target}.fasta\").read().decode(\"utf-8\")\n with open(\"Temp_seq.fasta\", \"w\") as file:\n file.write(response)\n seq = SeqIO.read(\"Temp_seq.fasta\", \"fasta\")\n os.remove(\"Temp_seq.fasta\")\n\n return seq\n\n else:\n return -1", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def download_latex(self):\n try:\n # $ Set the Arxiv Object to ensure Proper extraction\n identity,paper = self.extract_meta_from_remote(self.paper_id)\n self.identity = identity\n\n if not dir_exists(self.paper_root_path):\n os.makedirs(self.paper_root_path)\n # $ Download the paper. \n downloaded_data = arxiv.download(paper,dirpath=self.paper_root_path,slugify=lambda paper: paper.get('id').split('/')[-1],prefer_source_tarfile=True)\n return downloaded_data\n except Exception as e:\n raise ArxivAPIException(self.paper_id,str(e))", "def download_scn(self, unq_id):\n if not os.path.exists(self.baseDownloadPath):\n raise EODataDownException(\"The download path does not exist, please create and run again.\")\n\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id,\n EDDSentinel1ASF.Downloaded == False).filter(\n EDDSentinel1ASF.Remote_URL is not None).all()\n ses.close()\n success = False\n if query_result is not None:\n if len(query_result) == 1:\n record = query_result[0]\n logger.debug(\"Building download info for '\" + record.Remote_URL + \"'\")\n scn_lcl_dwnld_path = os.path.join(self.baseDownloadPath,\n \"{}_{}\".format(record.Product_File_ID, record.PID))\n if not os.path.exists(scn_lcl_dwnld_path):\n os.mkdir(scn_lcl_dwnld_path)\n out_filename = record.Remote_FileName\n _download_scn_asf([record.PID, record.Product_File_ID, record.Remote_URL, self.db_info_obj,\n os.path.join(scn_lcl_dwnld_path, out_filename), self.asfUser, self.asfPass])\n success = True\n elif len(query_result) == 0:\n logger.info(\"PID {0} is either not available or already been downloaded.\".format(unq_id))\n else:\n logger.error(\"PID {0} has returned more than 1 scene - must be unique something really wrong.\".\n format(unq_id))\n raise EODataDownException(\"There was more than 1 scene which has been found - \"\n \"something has gone really wrong!\")\n else:\n logger.error(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n raise EODataDownException(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n return success", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download(self, download) -> None:\n path_cifarh = path.join(self.root, self.filename_cifarh)\n path_cifar = path.join(self.root, self.filename_cifar)\n is_there = path.isfile(path_cifarh) and path.isfile(path_cifar)\n if is_there:\n print(\"Files already exist.\")\n if download == \"force\" or not is_there:\n download_and_extract_archive(\n self.url_cifar, self.root, filename=self.filename_cifar\n )\n download_and_extract_archive(\n self.url_cifarh, self.root, filename=self.filename_cifarh\n )", "def main() -> None:\n run_time = datetime.datetime.now()\n datetime_string = run_time.strftime(\"%Y%m%d_%H%M%S\")\n\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-l\",\n \"--logfolder\",\n type=str,\n default=\"ia_downloader_logs\",\n help=(\n \"Folder to write logs to (if not specified, folder 'ia_downloader_logs' will be used in\"\n \" same directory as this script)\"\n ),\n )\n\n subparsers = parser.add_subparsers(\n help=(\n \"Either 'download' files associated with an Internet Archive identifier, or 'verify' a\"\n \" previously-completed download was successful and files match expected MD5 hash values\"\n ),\n dest=\"command\",\n required=True,\n )\n\n download_parser = subparsers.add_parser(\"download\")\n download_parser.add_argument(\n \"identifiers\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) Archive.org identifiers (e.g.\"\n \" 'gov.archives.arc.1155023'). If specifying a collection (and you wish to download all\"\n \" items within the collection), use the prefix 'collection:' (e.g. 'collection:nasa')\"\n ),\n )\n download_parser.add_argument(\"output_folder\", type=str, help=\"Folder to download files to\")\n download_parser.add_argument(\n \"-t\",\n \"--threads\",\n type=check_argument_int_greater_than_one,\n default=5,\n help=(\n \"Number of download threads (i.e. how many downloads to perform simultaneously)\"\n \" (default is 5)\"\n ),\n )\n download_parser.add_argument(\n \"-v\",\n \"--verify\",\n default=False,\n action=\"store_true\",\n help=\"Perform an MD5 hash check on each file as downloads complete\",\n )\n download_parser.add_argument(\n \"-r\",\n \"--resume\",\n default=False,\n action=\"store_true\",\n help=(\n \"Attempt to resume downloads using already-downloaded data if a connection error occurs\"\n ),\n )\n download_parser.add_argument(\n \"-s\",\n \"--split\",\n type=check_argument_int_greater_than_one,\n default=1,\n help=(\n \"To increase per-file download speeds, split files above 10MB into provided number of\"\n \" chunks, and reconstruct on completion\"\n ),\n )\n download_parser.add_argument(\n \"-f\",\n \"--filefilters\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) file name filters; only files that contain any of the\"\n \" provided filter strings (case insensitive) will be downloaded. If multiple filters\"\n \" are provided, the search will be an 'OR' (i.e. only one of the provided strings needs\"\n \" to hit)\"\n ),\n )\n download_parser.add_argument(\n \"-c\",\n \"--credentials\",\n type=str,\n nargs=2,\n help=(\n \"Email address and password (as separate strings) for Internet Archive account\"\n \" (required for download of some Internet Archive items)\"\n ),\n )\n download_parser.add_argument(\n \"--hashfile\",\n type=str,\n help=(\n \"Output path to write file containing hash metadata to (if not specified, file will\"\n \" be created in the output folder)\"\n ),\n )\n download_parser.add_argument(\n \"--cacherefresh\",\n default=False,\n action=\"store_true\",\n help=\"Flag to update any cached Internet Archive metadata from previous script executions\",\n )\n\n verify_parser = subparsers.add_parser(\"verify\")\n verify_parser.add_argument(\n \"data_folders\",\n type=str,\n nargs=\"+\",\n help=\"Path to folder containing previously downloaded data\",\n )\n verify_parser.add_argument(\n \"-i\",\n \"--identifiers\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) Archive.org identifiers (e.g.\"\n \" 'gov.archives.arc.1155023') - to be used if only certain item(s) in the target\"\n \" folder(s) are to be verified\"\n ),\n )\n verify_parser.add_argument(\n \"--hashfile\",\n type=str,\n help=(\n \"Path to file containing hash metadata from previous download using this script (if not\"\n \" specified, cached data from previous script execution will be used)\"\n ),\n )\n verify_parser.add_argument(\n \"-f\",\n \"--filefilters\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) file name filters; only files that contain any of the\"\n \" provided filter strings (case insensitive) will be verified. If multiple filters\"\n \" are provided, the search will be an 'OR' (i.e. only one of the provided strings needs\"\n \" to hit)\"\n ),\n )\n verify_parser.add_argument(\n \"--nopaths\",\n default=False,\n action=\"store_true\",\n help=(\n \"If files are no longer in the same relative paths, perform lookup based only on\"\n \" whether MD5 hashes are present in the data set (rather than also checking where those\"\n \" files are stored)\"\n ),\n )\n\n args = parser.parse_args()\n\n # Set up logging\n log_subfolders = [\"logs\", \"cache\"]\n for log_subfolder in log_subfolders:\n pathlib.Path(os.path.join(args.logfolder, log_subfolder)).mkdir(parents=True, exist_ok=True)\n log, counter_handler = prepare_logging(\n datetime_string,\n os.path.join(args.logfolder, log_subfolders[0]),\n \"ia_downloader\",\n dict(vars(args)),\n )\n log.info(\n \"Internet Archive is a non-profit organisation that is experiencing unprecedented service\"\n \" demand. Please consider making a donation: https://archive.org/donate\"\n )\n log.info(\"Logs will be stored in folder '{}'\".format(args.logfolder))\n\n try:\n if args.command == \"download\":\n if args.credentials is not None:\n try:\n internetarchive.configure(args.credentials[0], args.credentials[1])\n except internetarchive.exceptions.AuthenticationError:\n log.error(\n \"Authentication error raised for supplied email address and password -\"\n \" check these were entered correctly (if the password has spaces, it must\"\n \" be wrapped in quotation marks)\"\n )\n return\n if args.hashfile is not None:\n log.info(\n \"Internet Archive metadata will be written to hash file at '{}'\".format(\n args.hashfile\n )\n )\n if args.threads > 5 or args.split > 5:\n log.info(\n \"Reducing download threads to 5, to optimise script performance and reduce\"\n \" Internet Archive server load\"\n )\n args.threads = min(args.threads, 5)\n args.split = min(args.split, 5)\n if args.split > 1:\n if args.threads > 1:\n log.info(\n \"While using file splitting, only one file will be downloaded at a time so\"\n \" as to not overwhelm Internet Archive servers\"\n )\n args.threads = 1\n hashfile_file_handler = None\n if args.hashfile:\n hashfile_file_handler = open(args.hashfile, \"w\")\n\n for identifier in args.identifiers:\n download(\n identifier=identifier,\n output_folder=args.output_folder,\n hash_file=hashfile_file_handler,\n thread_count=args.threads,\n resume_flag=args.resume,\n verify_flag=args.verify,\n split_count=args.split,\n file_filters=args.filefilters,\n cache_parent_folder=os.path.join(args.logfolder, log_subfolders[1]),\n cache_refresh=args.cacherefresh,\n )\n\n if args.hashfile:\n hashfile_file_handler.close()\n\n elif args.command == \"verify\":\n verify(\n hash_file=args.hashfile,\n data_folders=args.data_folders,\n no_paths_flag=args.nopaths,\n hash_flag=True,\n cache_parent_folder=os.path.join(args.logfolder, log_subfolders[1]),\n identifiers=args.identifiers,\n file_filters=args.filefilters,\n )\n\n if counter_handler.count[\"WARNING\"] > 0 or counter_handler.count[\"ERROR\"] > 0:\n log.warning(\n \"Script complete; {} warnings/errors occurred requiring review (see log entries\"\n \" above, replicated in folder '{}')\".format(\n counter_handler.count[\"WARNING\"] + counter_handler.count[\"ERROR\"],\n args.logfolder,\n )\n )\n else:\n log.info(\"Script complete; no errors reported\")\n\n except KeyboardInterrupt:\n log.warning(\n \"KeyboardInterrupt received, quitting immediately (any in-progress downloads or\"\n \" verifications have been terminated)\"\n )\n except Exception:\n log.exception(\"Exception occurred:\")", "def download(self, download_path):\n return", "def download(path):\n\treturn send_from_directory(\"results\", path, as_attachment=True)", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))", "def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)", "def _download(self, revision: str, path: str) -> scm.DownloadResult:\n command = f'{self.command} {revision}'\n if path:\n command += f' {path}'\n content = internals.run(command)\n return scm.DownloadResult(revision, path, content)", "def download_fastq():\n\n mkdir(FASTQ_DIR)\n\n template = \"\"\"fastq-dump --split-files --gzip {}\"\"\"\n\n printp(\"\"\"\\n#\\n# download all the fastq files\\n#\"\"\")\n printp(\"\"\"\\n# drmr:label fastq-download\"\"\")\n printp(\"\"\"\\n# drmr:job time_limit=2h working_directory={}\"\"\".format(FASTQ_DIR))\n\n for library, info in DATA.items():\n printp(template.format(get_srr(library)))\n printp(template.format(get_input_control_srr(library)))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "def download() -> Path:\n rts_downloader.download()\n rts_gmlc_dir = Path(rts_downloader.rts_download_path) / \"RTS-GMLC\"\n return rts_gmlc_dir", "def swissprot_fasta_downloader(database_directory: Path) -> Path:\n # Create protein folder\n merged_db_folder = database_directory / \"protein_db\"\n merged_db_folder.mkdir(parents=True, exist_ok=True)\n # Get release number\n rel_url = (\n f\"ftp://ftp.uniprot.org/pub/databases/uniprot/\"\n f\"current_release/knowledgebase/complete/reldate.txt\")\n release_file = urllib.request.urlopen(rel_url)\n release_number = release_file.readline().decode('utf-8').split()[3]\n # Write release information into file\n with open(merged_db_folder / \"uniprot_release.txt\", 'w') as relinfo:\n relinfo.write(\"Release number (date) {}\".format(release_number))\n # Download fasta files\n logger.info(\"Downloading SwissProt Fasta files\")\n # Overwrite existing temporal files\n output_fasta = merged_db_folder / \"uniprot_sprot.fasta.gz\"\n if output_fasta.is_file():\n output_fasta.unlink()\n # Download swissprot\n fasta_url = (\n f\"ftp://ftp.uniprot.org/pub/databases/uniprot/\"\n f\"current_release/knowledgebase/complete/uniprot_sprot.fasta.gz\")\n wget.download(fasta_url, out=str(output_fasta))\n decompressed_fasta = merged_db_folder / \"uniprot_sprot.fasta\"\n with gzip.open(output_fasta, 'rt') as compressed_fh, \\\n open(decompressed_fasta, 'w') as decompressed_fh:\n copyfileobj(compressed_fh, decompressed_fh)\n output_fasta.unlink()\n logger.info(\"Finished\")\n\n return output_fasta", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def download_extracted_files(a1000):\n hash_value = demisto.getArg('hash')\n try:\n response = a1000.download_extracted_files(hash_value)\n except Exception as e:\n return_error(str(e))\n\n filename = hash_value + '.zip'\n command_results = CommandResults(\n readable_output=f\"## ReversingLabs A1000 download extraced files \\nExtracted files are available for download \"\n f\"under the name {filename}\"\n )\n\n file_result = fileResult(filename, response.content, file_type=EntryType.FILE)\n\n return [command_results, file_result]", "def _download_archive(self):\n _logger.debug('Downloading archive...')\n response = urlopen(self.url)\n\n with open(self._archive_full_path, 'wb') as archive_file:\n chunk_size = 1024 * 1024 # 1 MB\n chunk = response.read(chunk_size)\n\n while chunk:\n archive_file.write(chunk)\n chunk = response.read(chunk_size)\n\n _logger.debug('Archive {name} has been successfully downloaded.'.format(name=self.archive_name))", "def download_files(path):\n return edgar.download_index(path,2019,skip_all_present_except_last=False)", "def download():\n raise NotImplementedError", "def download(date, shared_args):\n # year with century, zero padded month, then full date\n # TODO fix merra url to include new format strings\n url = settings.MERRA_URL % (date.strftime('%Y'), date.strftime('%m'),\n date.strftime('%Y%m%d'))\n\n filename = url_download(url, settings.MERRA_DIR, shared_args, auth=settings.MERRA_LOGIN)\n return filename", "def _download_file(self, report_date):\n fdate = report_date.strftime('%Y-%m-%d')\n ddate = '/'.join(fdate.split('-')[:-1])\n link = FILE_URL % (ddate, fdate)\n name = os.path.basename(urlparse(link).path)\n \n try:\n print ' Accessing %s.' % name\n r = requests.get(link, stream=True)\n r.raise_for_status()\n except RequestException as e:\n status = r.status_code\n \n if status == 404:\n pass\n if status >= 500:\n print ' - Unable to download %s: %s\\n' % (name, e)\n self.failed += 1\n else:\n print ' - Downloading %s.' % name\n fpath = os.path.join(self.path, name)\n \n with open(fpath, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n \n self.counts += 1\n print ' - Saved %s.' % name", "def download_file(year: str, month: str, career: str, kind: str = \"ativo\") -> str:\n\n career = career.lower()\n downloaded_files = []\n existing_files = []\n res = \"\"\n for file_ in CGU_FILES[career][kind]:\n url = f\"{URL_CGU_DOWNLOADS}/{year}{month}_{file_}\"\n try:\n division = file_.split(\"_\")[-1] if career == \"civil\" else None\n if not file_exists(year, month, career, kind, division):\n print(f\"Downloading {url}\")\n sleep(10)\n req = requests.get(url, stream=True, timeout=90)\n req.raise_for_status()\n filename = (\n req.headers.get(\"Content-Disposition\")\n .split(\"=\")[-1]\n .replace('\"', \"\")\n )\n saved = save_file(filename, req.content)\n unzipped_file = unzip_salary_file(saved, year, month, career, kind)\n downloaded_files.append(unzipped_file)\n else:\n print(f\"Arquivo {url} já existe\")\n existing_files.append(file_)\n res = f\"Arquivos baixados: {', '.join(downloaded_files)} \\nArquivos existentes: {', '.join(existing_files)}\"\n except requests.exceptions.ConnectionError as err:\n res = f\"Erro de conexão: {err}\"\n # pylint: disable=line-too-long\n # Erro de conexão: HTTPConnectionPool(host='www.portaltransparencia.gov.br', port=80): Max retries\n # exceeded with url: /download-de-dados/servidores/202202_Reserva_Reformas_Militares (Caused by\n # NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f9cd019cd90>: Failed to establish\n # a new connection: [Errno -3] Temporary failure in name resolution')\n except requests.exceptions.HTTPError as err:\n res = f\"Arquivo inexistente: {url.split('/')[-1]}.zip - {err}\"\n # pylint: disable=line-too-long\n # Erro no download: 404 Client Error: Not Found for url:\n # https://www.portaltransparencia.gov.br/download-de-dados/servidores/202202_Reserva_Reformas_Militares\n\n print(res)\n return res", "def _download_mirbase(args, version=\"CURRENT\"):\n if not args.hairpin or not args.mirna:\n logger.info(\"Working with version %s\" % version)\n hairpin_fn = op.join(op.abspath(args.out), \"hairpin.fa.gz\")\n mirna_fn = op.join(op.abspath(args.out), \"miRNA.str.gz\")\n if not file_exists(hairpin_fn):\n cmd_h = \"wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$\" % (version, hairpin_fn)\n do.run(cmd_h, \"download hairpin\")\n if not file_exists(mirna_fn):\n cmd_m = \"wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$\" % (version, mirna_fn)\n do.run(cmd_m, \"download mirna\")\n else:\n return args.hairpin, args.mirna", "def download(path):\n\n # Check if directory exists\n if not os.path.isdir(path + \"birdvox_dcase_20k\"):\n print(\"Creating birdvox_dcase_20k Directory\")\n os.mkdir(path + \"birdvox_dcase_20k\")\n base = \"https://zenodo.org/record/1208080/files/\"\n filename = \"BirdVox-DCASE-20k.zip\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n url = base + filename + \"?download=1\"\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)\n url = \"https://ndownloader.figshare.com/files/10853300\"\n filename = \"data_labels.csv\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)", "def test_download_simfile(self):\n scrape_category.download_simfile(self.simfile, self.dest,\n tidy=False,\n use_logfile=True,\n extract=True,\n link=self.link)\n\n # There should now be three files - a download log, a zip, and\n # an unzipped simfile.\n self.check_saved_files(log=True, unzipped=True, zipped=True)\n\n records = {\"100\": self.simfile}\n updated_records = scrape_category.update_records_from_log(records, self.dest)\n assert len(updated_records) == 1\n assert \"100\" in updated_records\n # The records should be updated to reflect where the simfile\n # was actually saved\n assert updated_records[\"100\"].name == \"foo\"", "def sdssDownload(band, location, size, path):\n\n debug = 0\n\n \n # Build the URL to get image metadata\n \n url = \"http://montage.ipac.caltech.edu/cgi-bin/ArchiveList/nph-archivelist?survey=SDSSDR7+\" \\\n + urllib.parse.quote_plus(band) \\\n + \"&location=\" \\\n + urllib.parse.quote_plus(location) \\\n + \"&size=\" \\\n + str(size) + \"&units=deg&mode=JSON\"\n \n if debug:\n print('DEBUG> url = \"' + url + '\"')\n \n \n # Retrieve the image metadata and convert\n # the JSON to a Python dictionary\n \n fjson = urllib.request.urlopen(url)\n \n data = json.load(fjson)\n \n if debug:\n print(\"DEBUG> data: \")\n print(data)\n \n nimages = len(data)\n \n if debug:\n print(\"DEBUG> nimages = \" + str(nimages))\n \n \n # We need to check the given directory, \n # whether it exists, whether it is writeable,\n # etc. We'll do it by trying to create it,\n # then trying to write the image data it.\n \n rtn = {} \n \n try:\n \n if not os.path.exists(path):\n os.makedirs(path)\n \n except:\n rtn['status'] = 1\n rtn['msg' ] = 'Cannot create output directory.'\n return rtn \n \n \n # Retrieve all the images into the data directory\n\n try:\n for index in range(0,nimages):\n \n datafile = path + \"/\" + data[index]['file']\n url = data[index]['url']\n archivefile = url\n archivefile = archivefile.replace('http://das.sdss.org','/home/idies/workspace/sdss_das/das2')\n\n if debug:\n print('copy file ' + archivefile + ' to ' + datafile)\n\n copyfile(archivefile, datafile)\n\n except:\n \n rtn['status'] = 1\n rtn['msg' ] = 'Error reading or writing data'\n return rtn\n \n \n # Success\n \n rtn['status'] = 0\n rtn['count' ] = nimages\n return rtn", "def download_result_archive(run_id):\n from robflask.service import service\n with service() as api:\n ioBuffer = api.runs().get_result_archive(run_id=run_id)\n return send_file(\n ioBuffer.open(),\n as_attachment=True,\n attachment_filename='run.tar.gz',\n mimetype='application/gzip'\n )", "def download_special(pxdataset, data_dir):\n # PXD004074 (Tsr1) --------------------------------------------------------\n if pxdataset.pxid == \"PXD004074\":\n tsr1_filename = \"Rappsilber_Cook_CLMS_Tsr1_fasta.zip\"\n tsr1_zip = os.path.join(data_dir, tsr1_filename)\n pxdataset.pxget(tsr1_filename, data_dir)\n\n with zipfile.ZipFile(tsr1_zip, \"r\") as fname:\n fname.extractall(data_dir)\n\n # PXD010222 (PPARg_LBD) ---------------------------------------------------\n if pxdataset.pxid == \"PXD010222\":\n ppar_seq = [\n \">wef|PV4545|PPARg-LBD_human GST-tagged PPARgamma LBD\",\n \"MAPILGYWKIKGLVQPTRLLLEYLEEKYEEHLYERDEGDKWRNKKFELGLEFPNLPYYIDGD\",\n \"VKLTQSMAIIRYIADKHNMLGGCPKERAEISMLEGAVDIRYGVSRIAYSKDFETLKVDFLSK\",\n \"LPEMLKMFEDRLCHKTYLNGDHVTHPDFMLYDALDVVLYMDPMCLDAFPKLVCFKKRIEAIP\",\n \"QIDKYLKSSKYIALWPLQGWQATFGGGDHPPKSDLVPRHNQTSLYKKAGTMQLNPESADLRA\",\n \"LAKHLYDSYIKSFPLTKAKARAILTGKTTDKSPFVIYDMNSLMMGEDKIKFKHITPLQEQSK\",\n \"EVAIRIFQGCQFRSVEAVQEITEYAKSIPGFVNLDLNDQVTLLKYGVHEIIYTMLASLMNKD\",\n \"GVLISEGQGFMTREFLKSLRKPFGDFMEPKFEFAVKFNALELDDSDLAIFIAVIILSGDRPG\",\n \"LLNVKPIEDIQDNLLQALELQLKLNHPESSQLFAKLLQKMTDLRQIVTEHVQLLQVIKKTET\",\n \"DMSLHPLLQEIYKDL\"\n ]\n\n ppar_path = os.path.join(data_dir, \"pparg.fasta\")\n with open(ppar_path, \"w\") as fasta:\n fasta.writelines([l + \"\\n\" for l in ppar_seq])", "def download_gtsrb(root: str):\n url_training = \"https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB-Training_fixed.zip\"\n url_test = \"https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Test_Images.zip\"\n url_test_gt = \"https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/GTSRB_Final_Test_GT.zip\"\n download_and_extract_archive(url_training, download_root=root)\n download_and_extract_archive(url_test, download_root=root)\n download_and_extract_archive(url_test_gt, download_root=root)", "def download_files(self):", "def download_hess_dr1_data():\n download_data_files(FILENAMES_HESS_DR1)", "def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def download(self) -> None:\n os.makedirs(self.root, exist_ok=True)\n\n for subset in self.subsets:\n if self._check_subset_integrity(subset):\n print(f\"{subset} already downloaded and verified\")\n continue\n path = os.path.join(self.root, subset + \".tar.gz\")\n\n already_present = os.path.isfile(path)\n if not already_present:\n subset_url = self.openslr_url + subset + \".tar.gz\"\n with requests.get(subset_url, stream=True) as r:\n r.raise_for_status()\n with open(path, \"wb\") as f:\n shutil.copyfileobj(r.raw, f)\n\n archive_md5 = self.data_files[subset][\"archive_md5\"]\n if utils.checksum_file(path, \"md5\") != archive_md5:\n raise utils.DownloadError(f\"invalid checksum for {path}\")\n\n with tarfile.open(path, mode=\"r|gz\") as tar:\n tar.extractall(self.root)\n\n if not already_present:\n os.remove(path)", "def download_sample(a1000):\n hash_value = demisto.getArg('hash')\n\n try:\n response = a1000.download_sample(hash_value)\n except Exception as e:\n return_error(str(e))\n\n command_results = CommandResults(\n readable_output=f\"## ReversingLabs A1000 download sample \\nRequested sample is available for download under \"\n f\"the name {hash_value}\"\n )\n\n file_result = fileResult(hash_value, response.content, file_type=EntryType.FILE)\n\n return [command_results, file_result]", "def get_rmt_file(uri, creds, sFile):\n\n import urllib\n try:\n urllib.urlretrieve(uri, sFile)\n return True\n\n except:\n return False", "def download_files_in_drs_manifest(\n hostname,\n auth,\n infile,\n output_dir,\n show_progress=True,\n unpack_packages=True,\n delete_unpacked_packages=False,\n) -> None:\n _download(\n hostname,\n auth,\n infile,\n output_dir,\n show_progress,\n unpack_packages,\n delete_unpacked_packages,\n )", "def download_file(driver, link, filename):\n download_path = os.path.join(os.environ['HOME'], \"Downloads\", filename)\n # TODO: copy cookies, user agent, ect to session\n s = requests.session()\n r = s.get(link, stream=True)\n with open(download_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n return download_path", "def test_download_and_unlink(self):\n scrape_category.get_simfile_from_ziv(self.simfile, self.link, self.dest)\n assert os.path.exists(os.path.join(self.dest, \"sim100.zip\"))\n\n scrape_category.unlink_zip(self.simfile, self.dest)\n assert not os.path.exists(os.path.join(self.dest, \"sim100.zip\"))", "def download(url, path):\n response = requests.get(url)\n\n if response.ok:\n print(\"response is ok file is downloading ... \")\n # start to download file from url.\n with open(path, \"wb\") as f:\n f.write(response.content)\n else:\n print(\"Error!\", response.status_code)\n return False\n\n print(\"File downloaded succusfully.\")\n return True", "def download(progid, date):\n logger = log.getLogger('obslog.download')\n\n if not re.match(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}$', progid):\n logger.error('This does not look like a program ID: %s', progid)\n raise SystemExit\n\n obslog = date + '_' + progid + '_obslog.txt'\n url = 'https://archive.gemini.edu/file/' + obslog\n logger.debug('URL: %s', url)\n logger.info('Downloading %s...', obslog)\n urllib.urlretrieve(url, obslog)\n return", "def download_file(self, parsed_event, input_dir_path):", "def download_file(url, path):\n file_name = path + url.split(\"/\")[-1]\n req = requests.get(url)\n zipped_info = req.content\n print(file_name)\n if not os.path.isfile(file_name):\n print(\"file doesnt exist, writing\", file_name)\n with open(file_name, 'wb') as f:\n f.write(zipped_info)\n else:\n print(\"file exists\", file_name)", "def download_autoreduced(request, instrument, ipts):\n # Start a new transaction\n transaction = remote_view_util.transaction(request, start=True)\n if transaction is None:\n \n breadcrumbs = Breadcrumbs()\n breadcrumbs.append_experiment_list(instrument)\n \n template_values = {'message':\"Could not connect to Fermi and establish transaction\",\n 'back_url': reverse('catalog_experiments', args=[instrument]),\n 'breadcrumbs': breadcrumbs,}\n template_values = remote_view_util.fill_template_values(request, **template_values)\n template_values = catalog_view_util.fill_template_values(request, **template_values)\n return render_to_response('remote/failed_connection.html',\n template_values)\n\n file_name = \"%s_%s.zip\" % (instrument.upper(), ipts)\n code = 'import os\\n'\n code += 'import zipfile\\n'\n code += 'output_zip_file = zipfile.ZipFile(\"%s\", \"w\")\\n' % file_name\n code += 'for f in os.listdir(\"/SNS/%s/%s/shared/autoreduce\"):\\n' % (instrument.upper(), ipts.upper())\n code += ' output_zip_file.write(\"/SNS/%s/%s/shared/autoreduce/\"+f, f)\\n' % (instrument.upper(), ipts.upper())\n code += 'output_zip_file.close()\\n'\n jobID = remote_view_util.submit_job(request, transaction, code)\n\n return redirect(reverse('catalog_download_link', args=[jobID, file_name]))", "def download_file(download_url, save_path):\n url = \"https://www.encodeproject.org/\" + download_url\n urllib.request.urlretrieve(url, save_path)", "def download(s, snap):\n\n id = snap['id']\n name = snap['sender']\n ts = str(snap['sent']).replace(':', '-')\n\n result = s.get_media(id)\n\n if not result:\n return False\n\n ext = s.is_media(result)\n filename = '{}+{}+{}.{}'.format(ts, name, id, ext)\n path = PATH + filename\n with open(path, 'wb') as fout:\n fout.write(result)\n return True", "def wind3dp_single_download(file, path=None):\n\n # add a OS-specific '/' to end end of 'path'\n if path:\n if not path[-1] == os.sep:\n path = f'{path}{os.sep}'\n else:\n path = sunpy.config.get('downloads', 'download_dir') + os.sep\n\n data = file.split('_')[1] # e.g. 'sfsp'\n year = file.split('_')[3][:4]\n base = f\"https://sprg.ssl.berkeley.edu/wind3dp/data/wi/3dp/{data}/{year}/\"\n\n url = base+'/'+file\n\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=False)\n except requests.HTTPError:\n print(f'No corresponding data found at {url}')\n downloaded_file = []\n\n return downloaded_file", "def download_command(self, args):\n self._validate_common(args)\n self._set_manifests(args)\n\n manifest = self._manager._remote\n manifest.load()\n\n records = self._get_matching_records(args, manifest)\n\n if not len(records):\n sys.exit(\"No matching items found.\")\n\n for record in records:\n try:\n self._manager.download(record['_type'], **record)\n print('Successfully downloaded file: {}'.format(record['_path']))\n except exceptions.ImmutableManifestError as e:\n if args.no_update:\n print('Asset already exists; will not download: {}'.format(record['_path']))\n else:\n raise e\n\n if len(records) > 1:\n print('All files successfully downloaded. Thank you.')", "def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");", "def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def main():\n # the url for african daily and global daily\n african_dialy_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/africa_daily/tifs/p25/\"\n global_daily_url = \"https://data.chc.ucsb.edu/products/CHIRPS-2.0/global_daily/tifs/p25/\"\n\n\n each_year_list = GetRasterYears(url=african_dialy_url)\n new_path = makenewdir(each_year_list)\n years_new_list = fecthrasterurl(url=african_dialy_url)\n downloadwithwget(each_year_list, years_new_list, new_path)", "def _download_file(self, artifact_path, local_path):\n full_path = self.base_artifact_path / artifact_path\n with self.managed_folder.get_file(str(full_path)) as remote_file:\n with open(local_path, \"wb\") as local_file:\n for line in remote_file:\n local_file.write(line)", "def get_genome_download_link(self, name, mask=\"soft\", **kwargs):\n genome = self.genomes[safe(name)]\n division, is_vertebrate = self.get_division(name)\n\n # base directory of the genome\n ftp = \"http://ftp.ensemblgenomes.org\"\n if is_vertebrate:\n ftp = \"http://ftp.ensembl.org\"\n version = self.get_version(name, kwargs.get(\"version\"))\n div_path = \"\" if is_vertebrate else f\"/{division}\"\n lwr_name = genome[\"name\"]\n ftp_directory = f\"{ftp}/pub/release-{version}{div_path}/fasta/{lwr_name}/dna\"\n\n # this assembly has its own directory\n if name == \"GRCh37\":\n ftp_directory = genome[\"genome\"].format(version)\n\n # specific fasta file\n cap_name = lwr_name.capitalize()\n asm_name = re.sub(r\"\\.p\\d+$\", \"\", safe(genome[\"assembly_name\"]))\n mask_lvl = {\"soft\": \"_sm\", \"hard\": \"_rm\", \"none\": \"\"}[mask]\n asm_lvl = \"toplevel\" if kwargs.get(\"toplevel\") else \"primary_assembly\"\n version_tag = \"\" if version > 30 else f\".{version}\"\n\n ftp_file = f\"{cap_name}.{asm_name}{version_tag}.dna{mask_lvl}.{asm_lvl}.fa.gz\"\n\n # combine\n link = f\"{ftp_directory}/{ftp_file}\"\n if check_url(link, 2):\n return link\n\n # primary assemblies do not always exist\n if asm_lvl == \"primary_assembly\":\n link = link.replace(\"primary_assembly\", \"toplevel\")\n if check_url(link, 2):\n return link\n\n raise GenomeDownloadError(\n f\"Could not download genome {name} from {self.name}.\\n\"\n \"URL is broken. Select another genome or provider.\\n\"\n f\"Broken URL: {link}\"\n )", "def download_and_prepare(self):\n self._download_and_prepare()", "def download_data(origin_time, net, sta, loc, chan):\n \n dataDir_get = '/import/netapp-m-02-bay200/mseed_online/archive/'\n \n fileName = \".\".join((net, sta, \".\" + chan + \".D\",\n origin_time.strftime(\"%Y.%j\")))\n filePath = os.path.join(dataDir_get, origin_time.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName)\n o_time2 = origin_time + 86400\n fileName2 = \".\".join((net, sta, \".\" + chan + \".D\",\n o_time2.strftime(\"%Y.%j\")))\n filePath2 = os.path.join(dataDir_get, o_time2.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName2)\n\n if os.path.isfile(filePath):\n if origin_time.hour > 21:\n st = Stream()\n st.extend(read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600))\n st.extend(read(filePath2, \n starttime = UTCDateTime(o_time2.year, o_time2.month, \n o_time2.day, 0, 0),\n endtime = origin_time + 3 * 3600))\n st.merge(method=-1)\n else:\n st = read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600)\n else:\n print \"++++ cannot find the following file: \\n %s \\n++++\" % filePath\n\n if not st:\n raise RotationalProcessingException('Data not available for this'\n ' event...')\n st.trim(starttime=origin_time-180, endtime=origin_time+3*3600)\n\n print 'Download of', st[0].stats.station, st[0].stats.channel, \\\n 'data successful!'\n\n return st", "def download_and_extract(down_dir=download_dir, url=tuda_url):\n\n wget.download(url, down_dir) \n tar_filepath = os.path.join(down_dir, \"german-speechdata-package-v2.tar.gz\")\n #with tarfile.open(tar_filepath, \"r\") as tar:\n # tar.extractall(down_dir)", "def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename", "def download_snaps(s):\n\n existing = get_downloaded()\n\n snaps = s.get_snaps()\n for snap in snaps:\n id = snap['id']\n if id[-1] == 's' or id in existing:\n print 'Skipping:', id\n continue\n\n result = download(s, snap)\n\n if not result:\n print 'FAILED:', id\n else:\n print 'Downloaded:', id", "def get_drms_files(self):\n import drms\n client = drms.Client(email=self.email,verbose=True)\n fmt = '%Y.%m.%d_%H:%M'\n self.t_qstr = self.series+'[{0}_TAI-{1}_TAI@{2}]'.format(self.start.strftime(fmt),self.end.strftime(fmt),self.cadence) \n\n\n #create wavelength query string\n self.w_qstr = '[' \n for i in self.wav: self.w_qstr = self.w_qstr+'{0},'.format(int(i.value))\n #remove last , and add bracket\n self.w_qstr = self.w_qstr[:-1]+']'\n \n #make the series string\n self.s_qstr = '{'+self.segment+'}'\n\n #the full query\n self.qstr = self.t_qstr+self.w_qstr+self.s_qstr\n\n #IF ERRORS WITH URL ERROR IT IS BECAUSE THE DOWNLOAD FILE SIZE IS TOO LARGE\n #export the data file list \n self.expt = client.export(self.qstr)\n#create an array of indexes to download\n index = np.arange(np.size(self.expt.urls.url))\n# get file from JSOC\n #set directory to current if no path set\n outf = self.expt.download(self.odir,index,fname_from_rec=True)", "def download(uri: str) -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # create destination dirs\n destination = project_dir / 'data' / 'raw'\n destination.mkdir(exist_ok=True, parents=True)\n\n # download the file\n urllib.request.urlretrieve(uri, destination / \"original.zip\")", "def download_hsc_spectra(self, spectra, download_dir=None, cache=True, curl_flag=False):\n\n # if spectra is not a Table, put it in a list\n if isinstance(spectra, Row):\n spectra = [spectra]\n\n # set up the download directory and paths\n if not download_dir:\n download_dir = '.'\n\n if curl_flag: # don't want to download the files now, just the curl script\n\n downloadFile = \"mastDownload_\" + time.strftime(\"%Y%m%d%H%M%S\")\n\n urlList = []\n pathList = []\n for spec in spectra:\n if spec['SpectrumType'] < 2:\n urlList.append('https://hla.stsci.edu/cgi-bin/getdata.cgi?config=ops&dataset={0}'\n .format(spec['DatasetName']))\n\n else:\n urlList.append('https://hla.stsci.edu/cgi-bin/ecfproxy?file_id={0}'\n .format(spec['DatasetName']) + '.fits')\n\n pathList.append(downloadFile + \"/HSC/\" + spec['DatasetName'] + '.fits')\n\n descriptionList = [\"\"]*len(spectra)\n productTypeList = ['spectrum']*len(spectra)\n\n service = \"Mast.Bundle.Request\"\n params = {\"urlList\": \",\".join(urlList),\n \"filename\": downloadFile,\n \"pathList\": \",\".join(pathList),\n \"descriptionList\": list(descriptionList),\n \"productTypeList\": list(productTypeList),\n \"extension\": 'curl'}\n\n response = self.service_request_async(service, params)\n bundlerResponse = response[0].json()\n\n localPath = download_dir.rstrip('/') + \"/\" + downloadFile + \".sh\"\n self._download_file(bundlerResponse['url'], localPath, head_safe=True)\n\n status = \"COMPLETE\"\n msg = None\n url = None\n\n if not os.path.isfile(localPath):\n status = \"ERROR\"\n msg = \"Curl could not be downloaded\"\n url = bundlerResponse['url']\n else:\n missingFiles = [x for x in bundlerResponse['statusList'].keys()\n if bundlerResponse['statusList'][x] != 'COMPLETE']\n if len(missingFiles):\n msg = \"{} files could not be added to the curl script\".format(len(missingFiles))\n url = \",\".join(missingFiles)\n\n manifest = Table({'Local Path': [localPath],\n 'Status': [status],\n 'Message': [msg],\n \"URL\": [url]})\n\n else:\n base_dir = download_dir.rstrip('/') + \"/mastDownload/HSC\"\n\n if not os.path.exists(base_dir):\n os.makedirs(base_dir)\n\n manifestArray = []\n for spec in spectra:\n\n # localPath = base_dir + \"/HSC\"# + spec['DatasetName'] + \".fits\"\n\n if spec['SpectrumType'] < 2:\n dataUrl = 'https://hla.stsci.edu/cgi-bin/getdata.cgi?config=ops&dataset=' \\\n + spec['DatasetName']\n else:\n dataUrl = 'https://hla.stsci.edu/cgi-bin/ecfproxy?file_id=' \\\n + spec['DatasetName'] + '.fits'\n\n localPath = base_dir + '/' + spec['DatasetName'] + \".fits\"\n\n status = \"COMPLETE\"\n msg = None\n url = None\n\n try:\n self._download_file(dataUrl, localPath, cache=cache, head_safe=True)\n\n # check file size also this is where would perform md5\n if not os.path.isfile(localPath):\n status = \"ERROR\"\n msg = \"File was not downloaded\"\n url = dataUrl\n\n except HTTPError as err:\n status = \"ERROR\"\n msg = \"HTTPError: {0}\".format(err)\n url = dataUrl\n\n manifestArray.append([localPath, status, msg, url])\n\n manifest = Table(rows=manifestArray, names=('Local Path', 'Status', 'Message', \"URL\"))\n\n return manifest", "def download_and_install(self, version=\"latest\", os_name=None, bitness=None, show_progress_bar=True):\n filename_with_path = self.download(version,\n os_name=os_name,\n bitness=bitness,\n show_progress_bar=show_progress_bar)\n filename = os.path.split(filename_with_path)[1]\n if filename.lower().endswith(\".tar.gz\"):\n extract_dir = os.path.join(self.get_download_path(version), filename[:-7])\n elif filename.lower().endswith(\".zip\"):\n extract_dir = os.path.join(self.get_download_path(version), filename[:-4])\n else:\n error_message = \"Unknown archive format: {0}\".format(filename)\n logger.error(error_message)\n raise RuntimeError(error_message)\n if not os.path.isdir(extract_dir):\n os.makedirs(extract_dir)\n logger.debug(\"Created directory: {0}\".format(extract_dir))\n if filename.lower().endswith(\".tar.gz\"):\n with tarfile.open(os.path.join(self.get_download_path(version), filename), mode=\"r:*\") as tar:\n tar.extractall(extract_dir)\n logger.debug(\"Extracted files: {0}\".format(\", \".join(tar.getnames())))\n elif filename.lower().endswith(\".zip\"):\n with zipfile.ZipFile(os.path.join(self.get_download_path(version), filename), mode=\"r\") as driver_zipfile:\n driver_zipfile.extractall(extract_dir)\n driver_filename = self.get_driver_filename(os_name=os_name)\n for root, dirs, files in os.walk(extract_dir):\n for curr_file in files:\n if curr_file == driver_filename:\n actual_driver_filename = os.path.join(root, curr_file)\n break\n if os_name is None:\n os_name = platform.system()\n if os_name in ['Darwin', 'Linux']:\n symlink_src = actual_driver_filename\n symlink_target = os.path.join(self.link_path, driver_filename)\n if os.path.islink(symlink_target):\n if os.path.samefile(symlink_src, symlink_target):\n logger.info(\"Symlink already exists: {0} -> {1}\".format(symlink_target, symlink_src))\n return tuple([symlink_src, symlink_target])\n else:\n logger.warning(\"Symlink {0} already exists and will be overwritten.\".format(symlink_target))\n os.unlink(symlink_target)\n os.symlink(symlink_src, symlink_target)\n logger.info(\"Created symlink: {0} -> {1}\".format(symlink_target, symlink_src))\n st = os.stat(symlink_src)\n os.chmod(symlink_src, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n return tuple([symlink_src, symlink_target])\n elif os_name == \"Windows\":\n src_file = actual_driver_filename\n dest_file = os.path.join(self.link_path, driver_filename)\n if os.path.isfile(dest_file):\n logger.info(\"File {0} already exists and will be overwritten.\".format(dest_file))\n shutil.copy2(src_file, dest_file)\n return tuple([src_file, dest_file])", "def download_optional_inputs(flywheel_basedir, sub_dir, ses_dir, rootdir):\n print('Looking for manifest-defined anatomical files')\n t1_anat_dir = os.path.join(flywheel_basedir, 'input', 't1w_anatomy')\n if os.path.isdir(t1_anat_dir):\n t1_file = os.listdir(t1_anat_dir)\n if t1_file:\n t1_file = os.path.join(t1_anat_dir, t1_file[0])\n anat_dir = os.path.join(rootdir, sub_dir, ses_dir, 'anat')\n if not os.path.isdir(anat_dir):\n os.mkdir(anat_dir)\n dest_file = os.path.join(anat_dir, sub_dir + '_' + ses_dir + '_T1w.nii.gz')\n if os.path.exists(dest_file):\n print('Found downloaded T1 file - overwriting!')\n os.remove(dest_file)\n os.remove(dest_file.replace('.nii.gz', '.json'))\n shutil.copyfile(t1_file, dest_file)\n\n t2_anat_dir = os.path.join(flywheel_basedir, 'input', 't2w_anatomy')\n if os.path.isdir(t2_anat_dir):\n t2_file = os.listdir(t2_anat_dir)\n if t2_file:\n anat_dir = os.path.join(rootdir, sub_dir, ses_dir, 'anat')\n if not os.path.isdir(anat_dir):\n os.mkdir(anat_dir)\n t2_file = os.path.join(t2_anat_dir, t2_file[0])\n dest_file = os.path.join(anat_dir, sub_dir + '_' + ses_dir + '_T2w.nii.gz')\n if os.path.exists(dest_file):\n print('Found downloaded T2 file - overwriting!')\n os.remove(dest_file)\n os.remove(dest_file.replace('.nii.gz', '.json'))\n shutil.copyfile(t2_file, dest_file)", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def fetch_nspl():\n nspl_url = \"https://www.arcgis.com/sharing/rest/content/items/4df8a1a188e74542aebee164525d7ca9/data\"\n\n if os.path.exists(nspl_target) is True:\n logging.info(\"Already collected NSPL\")\n else:\n os.makedirs(nspl_target, exist_ok=True)\n req = requests.get(nspl_url)\n zipf = ZipFile(BytesIO(req.content)).extractall(nspl_target)", "def download (httpfile, path_unzip = None, outfile = None) :\n if path_unzip is None : path_unzip = GetPath ()\n file = _check_source (httpfile, path_unzip = path_unzip, outfile = outfile)\n return file", "def https_download_file(**data):\n import os\n import requests\n\n ##minimal data inputs payload\n server_url = data.get('server_url', '')\n file_name = data.get('file_name', '')\n file_path = data.get('file_path', '')\n headers = data.get('headers', '')\n ##extra data inputs payload\n ##\n ##\n\n if server_url==None:\n raise(NameError('No `server URL` specified'))\n \n if file_name==None:\n raise(NameError('No `file_name` specified'))\n\n file_url = os.path.join(server_url,file_name)\n\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n\n full_name = os.path.join(file_path,file_name)\n \n if not os.path.isfile(full_name):\n r = requests.get(file_url, headers=headers)\n if not r.status_code==200: \n raise r.raise_for_status()\n open(full_name , 'wb').write(r.content)\n\n return full_name", "def _download_from_web(*, ds_name: str, ds_path: Path):\n import cgi\n import zipfile\n import httpx\n from tqdm import tqdm\n\n url = DATASET_OPTIONS[ds_name]['web']\n if ds_path.exists():\n print('Dataset directory already exists; remove it if you wish to '\n 're-download the dataset')\n return\n\n ds_path.mkdir(parents=True, exist_ok=True)\n\n with httpx.Client() as client:\n with client.stream('GET', url=url) as response:\n if not response.is_error:\n pass # All good!\n else:\n raise RuntimeError(\n f'Error {response.status_code} when trying '\n f'to download {url}')\n\n\n header = response.headers['content-disposition']\n _, params = cgi.parse_header(header)\n # where to store the archive\n outfile = ds_path / params['filename']\n remote_file_size = int(response.headers['content-length'])\n\n with open(outfile, mode='wb') as f:\n with tqdm(desc=params['filename'], initial=0,\n total=remote_file_size, unit='B',\n unit_scale=True, unit_divisor=1024,\n leave=False) as progress:\n num_bytes_downloaded = response.num_bytes_downloaded\n\n for chunk in response.iter_bytes():\n f.write(chunk)\n progress.update(response.num_bytes_downloaded -\n num_bytes_downloaded)\n num_bytes_downloaded = (response\n .num_bytes_downloaded)\n\n assert outfile.suffix == '.zip'\n\n with zipfile.ZipFile(outfile) as zip:\n for zip_info in zip.infolist():\n path_in_zip = Path(zip_info.filename)\n # omit top-level directory from Zip archive\n target_path = str(Path(*path_in_zip.parts[1:]))\n if str(target_path) in ('.', '..'):\n continue\n if zip_info.filename.endswith('/'):\n (ds_path / target_path).mkdir(parents=True, exist_ok=True)\n continue\n zip_info.filename = target_path\n print(f'Extracting: {target_path}')\n zip.extract(zip_info, ds_path)\n\n outfile.unlink()", "def download_mp3_by_ref(s, username, passwd, ref, path=None):\n\n s = login(s, username, passwd)\n s = search_by_ref(s, ref)\n result = download_mp3(s, path, ref)\n if result == 1:\n return 1\n s.driver.close()", "def get_data():\n\n url = 'http://www.cs.cmu.edu/~dbamman/data/booksummaries.tar.gz'\n target_path = 'books.tar.gz'\n if not os.path.isfile(target_path):\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n open(target_path, 'wb').write(response.content)\n\n # open tar zip file file\n file = tarfile.open(target_path)\n # extracting the tar zip file\n file.extractall('.')\n file.close()\n return True", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def _download_single(url, to, id):\n if os.path.exists(to):\n error_flags[id] = 1\n return\n\n try:\n request = rq.Request(url=url, headers=forge_agent_header)\n info = rq.urlopen(request).read()\n\n except urllib.error.URLError as e:\n print(url, 'urllib error')\n error_flags[id] = 2\n return\n\n except Exception as e:\n print(url, e)\n error_flags[id] = 2\n return\n\n with open(to, \"wb\") as file:\n print(url, 'writing')\n file.write(info)\n\n error_flags[id] = 1", "def start(self):\n # check whether ThermoRawFileParser.exe is available\n assert Path(self._thermorawfileparser_path).is_file(), \\\n 'Executable to ThermoRawFileParser not found.'\n\n # access PRIDE repository by accession\n req = requests.get(f'https://www.ebi.ac.uk/pride/ws/archive/v2/files/byProject?accession={self._accession}',\n headers={'Accept': 'application/json'})\n\n # check whether API request contains success notification\n assert req.status_code == 200, \\\n f'Unsuccessful PRIDE access via accession (HTTP response status code {req.status_code}).'\n\n # find .sdrf file(s)\n req = req.json()\n files = [file['value'] for accession in req\n for file in accession['publicFileLocations']\n if 'ftp' in file['value'] and 'sdrf' in file['value'].lower()]\n\n assert len(files) > 0, 'PRIDE accession does not contain SDRF file(s).'\n\n self.sdrf_files = len(files)\n\n # create directory for acession\n Path(self._accession).mkdir(parents=False, exist_ok=True)\n # load .fasta map to acquire according .fasta darabase(s)\n if not Path(f'databases/{self.FASTA_MAP_FILE}').is_file():\n self.create_fasta_map(f'{self.UNIPROT_FASTA_BASIS_PATH}/README')\n self.fasta_map = pd.read_csv(f'databases/{self.FASTA_MAP_FILE}', sep=';')\n\n # download and iterate every .sdrf file found for appropriate accession\n for idx, file in enumerate(files):\n print(f'Processing .sdrf file ({idx + 1}/{len(files)}) from {self._accession}')\n with closing(request.urlopen(file)) as r:\n # download .sdrf\n sdrf = '{}/{}'.format(self._accession, file.split('/')[-1])\n print(sdrf)\n with open(sdrf, 'wb') as f:\n shutil.copyfileobj(r, f)\n with open(sdrf, 'r') as csv_file:\n entries = [entry for entry in csv.DictReader(csv_file, delimiter='\\t')]\n for i in range(0, len(entries)):\n entries[i] = dict((k.lower(), v) for k, v in entries[i].items()) ## transform everything to lower case!\n #entries = [entry.lower() for entry in entries] ## transform everything to lower case\n\n col_names = list(entries[0].keys())\n col_names = [entry.lower() for entry in col_names]\n\n if not all([col in col_names # list(entries[0].keys()\n for col in self.prerequisite_sdrf_cols]):\n print('ERROR: SDRF file does not provide prerequisite information '\n '(column names). SDRF file is skipped!')\n break\n\n #col_names = list(entries[0].keys())\n for entry_idx, entry in enumerate(entries):\n sdrf_infos = self._read_config_sdrf(entry, col_names)\n #\n if not self.use_same_fasta or entry_idx == 0:\n fasta_db = self._receive_fasta_database(sdrf_infos['organism'])\n if fasta_db:\n # if needed, create new directory\n if self._separate_sdrf_entries:\n sample_name = '/{}'.format(sdrf_infos['name'])\n sample_name = sample_name.replace(\" \", \"\") ### remove whitespace\n Path(f'{self._accession}{sample_name}').mkdir(parents=False, exist_ok=True)\n else:\n sample_name = ''\n file_name = sdrf_infos['file name']\n print('\\nProcessing {} ({}/{})'.format(sdrf_infos['name'],\n entry_idx + 1, len(entries)))\n # download .raw file\n with closing(request.urlopen(sdrf_infos['uri'])) as r:\n with open(f'{self._accession}{sample_name}/{file_name}', 'wb') as f:\n shutil.copyfileobj(r, f)\n # convert .raw to .mgf using ThermoRawFileParser.exe\n arguments = f'{self._thermorawfileparser_path} ' \\\n f'-i={self._accession}/{sample_name}/{file_name} ' \\\n f'-o={self._accession}/{sample_name} ' \\\n f'-f=0'\n subprocess.call(arguments, stdout=self._FNULL, stderr=self._FNULL, shell=False)\n # determine path to created .mgf file\n\n mgf_file_name = file_name.replace('raw', 'mgf')\n mgf_file_name = mgf_file_name.replace('RAW', 'mgf')\n\n mgf_file = '{}{}/{}'.format(self._accession,\n sample_name,\n mgf_file_name)\n # start search engine search\n self._search_engine.search(cwd=self._cwd,\n database=fasta_db,\n sdrf_entry=sdrf_infos,\n mgf_file=mgf_file)\n # perform FDR on results\n if self._use_search_engine_specific_fdr:\n self._search_engine.fdr()\n else:\n self.fdr()\n else:\n print('There is no associated FASTA database.')", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"git@github.com:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def get_sasa(topology, trajectory, dssp_loc=master_dssp_location,skip=None):\n\n\tdssp_loc = dssp_loc\n\tDSSP={'A':{}}\n\tuniverse = MDAnalysis.Universe(topology, trajectory)\n\n\t#set the chain name here. this will only work for MDAnalysis 0.16\n\tchain_name=universe.add_Segment(segid='A')\n\tuniverse.residues[...].segments=chain_name\n\n\tprotein=universe.select_atoms(\"protein\")\n\tdiff_res=[]\n\t#this attempt to identify chain breaks will only work if the resids\n\t#... in the chains are not numbered consecutively\n\tfor i in range(len(protein.resnums)):\n\t\tif protein.resnums[i]-protein.resnums[i-1]<0 and i!=0:\n\t\t\tdiff_res.append(i)\n\tif len(diff_res)>=1:\n\t\tchain_sep=diff_res.pop(0)\n\t\tchain_end=len(protein.resnums)\n\t\tbchain=protein[chain_sep:chain_end]\n\t\tbchain.set_segids('B')\n\t\tDSSP['B']={}\n\n\tfor ts in universe.trajectory:\n\t\tif skip:\n\t\t\tuniverse.trajectory.skip=skip\n\t\tsys.stdout.flush()\n\t\tsys.stdout.write('\\rsasa [step {0}] '.format(\n\t\t\tuniverse.trajectory.frame))\n\t\twriter=MDAnalysis.Writer(\"tmp.pdb\")\n\t\twriter.write(protein)\n\t\twriter.close()\n\t\tparser=bp.PDBParser()\n\t\tstructure=parser.get_structure('tmp','tmp.pdb')\n\t\tdssp=bp.DSSP(structure[0],'tmp.pdb',dssp_loc)\n\t\tfor key in dssp.keys():\n\t\t\tif 0:\n\t\t\t\tresobj=dssp[key][0]\n\t\t\t\tresname=dssp[key][0].resname\n\t\t\t\tresidx=resobj.id[1]\n\t\t\t\tchain=key[0]\n\t\t\t\tsecondary_structure=resobj.xtra['SS_DSSP']\n\t\t\t\trel_sasa=resobj.xtra['EXP_DSSP_RASA']\n\t\t\t\tabs_sasa=resobj.xtra['EXP_DSSP_ASA']\n\t\t\t\tphi=resobj.xtra['PHI_DSSP']\n\t\t\t\tpsi=resobj.xtra['PSI_DSSP']\n\t\t\tresobj=dssp[key]\n\t\t\tresname=residue_codes_reverse[resobj[1]]\n\t\t\tresidx=key[1][1]\n\t\t\tchain=key[0]\n\t\t\tsecondary_structure=resobj[2]\n\t\t\trel_sasa=resobj[3]\n\t\t\tabs_sasa=resobj[3]*dssp.residue_max_acc[resname]\n\t\t\tphi=resobj[4]\n\t\t\tpsi=resobj[5]\n\t\t\tif residx in DSSP[chain] and DSSP[chain][residx]['resname']==resname:\n\t\t\t\tDSSP[chain][residx]['dssp'].append(secondary_structure)\n\t\t\t\tDSSP[chain][residx]['rel_sasa'].append(rel_sasa)\n\t\t\t\tDSSP[chain][residx]['abs_sasa'].append(abs_sasa)\n\t\t\t\tDSSP[chain][residx]['phi'].append(phi)\n\t\t\t\tDSSP[chain][residx]['psi'].append(psi)\n\t\t\t\tDSSP[chain][residx]['time'].append(ts.time)\n\t\t\telse:\n\t\t\t\tDSSP[chain][residx]={'dssp':[secondary_structure],'phi':[phi],'time':[ts.time],\n\t\t\t\t\t\t\t\t\t 'psi':[psi],'rel_sasa':[rel_sasa],'chain':chain,\n\t\t\t\t\t\t\t\t\t 'abs_sasa':[abs_sasa],'resname':resname}\n\treturn DSSP", "def download(form, year=None):\n if form == \"2552-96\" and year is not None:\n r = requests.get(f\"http://downloads.cms.gov/Files/hcris/HOSPFY{year}.zip\")\n elif form == \"2552-10\" and year is not None:\n r = requests.get(f\"http://downloads.cms.gov/Files/hcris/HOSP10FY{year}.zip\")\n elif form == \"2552-96\" and year is None:\n r = requests.get(\"http://downloads.cms.gov/files/hcris/HOSP-REPORTS.ZIP\")\n elif form == \"2552-10\" and year is None:\n r = requests.get(\"http://downloads.cms.gov/files/hcris/hosp10-reports.zip\")\n\n # Read content stream of Zip file and extract to data directory\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(f\"{data_dir}/\")\n z.close()", "def download_data():\n # Download Unihan meta data for radical-stroke analysis\n os.system(' mkdir Unihan')\n os.system(' curl -O http://unicode.org/Public/UCD/latest/ucd/Unihan.zip')\n os.system(' apt-get -y install unzip')\n os.system(' unzip Unihan.zip -d Unihan/')\n os.system(' rm Unihan.zip')\n\n data_path = 'Unihan/Unihan_RadicalStrokeCounts.txt'\n assert(os.path.isfile(data_path))\n\n return data_path", "def download(exam, out, name_question, sid_question, compact):\n exam_json, template_questions, email_to_data_map, total = examtool.api.download.download(exam)\n examtool.api.download.export(template_questions, email_to_data_map, total, exam, out, name_question, sid_question, compact)", "async def download_file(\n location_id: LocationID,\n file_id: StorageFileID,\n user_id: UserID,\n link_type: LinkType = LinkType.PRESIGNED,\n):", "def download_archive(self):\n\n def time_convert(structure):\n \"\"\"\n :param structure: tuple representation of time\n :return: GitHub archive time\n \"\"\"\n \n \n join_number_to_zero = lambda number: (\"\" if number > 9 else \"0\") + str(number)\n\n return \"%s-%s-%s-%s\" % (\n structure.tm_year, join_number_to_zero(structure.tm_mon), join_number_to_zero(structure.tm_mday),\n structure.tm_hour)\n\n current_time = self.get_time()\n self.logger.debug(__name__ + \": \" + \"current time: \" + str(gmtime(current_time)))\n\n difference = -25200\n #timezone difference in seconds between GMT and west coast of USA\n\n downloading_time = int(timegm(self.config[\"last_connection_time\"])) + 3600\n self.logger.debug(__name__ + \": \" + \"downloading time: \" + str(gmtime(downloading_time)))\n\n if downloading_time > current_time - 7200:\n self.logger.info(__name__ + \": \" + \"unable to download file (time limiting).\")\n return\n\n downloading_time += difference\n\n json_file_name = self.download_file(time_convert(gmtime(downloading_time)))\n\n self.config[\"last_connection_time\"] = gmtime(downloading_time - difference)\n self.logger.debug(__name__ + \": \" + \"last_connection_time: \" + str(self.config[\"last_connection_time\"]))\n\n return json_file_name", "def download_fastq_files(fastq1_s3_path, fastq2_s3_path, working_dir):\n fastq_folder = os.path.join(working_dir, 'fastq')\n\n try:\n os.mkdir(fastq_folder)\n except Exception as e:\n pass\n\n local_fastq1_path = download_file(fastq1_s3_path, fastq_folder)\n local_fastq2_path = download_file(fastq2_s3_path, fastq_folder)\n\n # Isaac requires the fastqs to be symlinked as lane1_read1.fastq.gz and lane1_read2.fastq.gz\n os.symlink(local_fastq1_path, os.path.join(fastq_folder, 'lane1_read1.fastq.gz'))\n os.symlink(local_fastq2_path, os.path.join(fastq_folder, 'lane1_read2.fastq.gz'))\n\n return fastq_folder" ]
[ "0.6527539", "0.63202286", "0.6183208", "0.61473656", "0.6141469", "0.5976712", "0.5897661", "0.58915883", "0.58283", "0.5815497", "0.5812849", "0.58102864", "0.58102864", "0.5790022", "0.57642645", "0.5758532", "0.57328504", "0.57257515", "0.57255936", "0.5705985", "0.5704997", "0.5703746", "0.570062", "0.5697409", "0.5690241", "0.56886864", "0.56876963", "0.568679", "0.56622994", "0.5649596", "0.5627821", "0.5609948", "0.5609174", "0.55861276", "0.55821157", "0.5579976", "0.55779976", "0.557481", "0.55728674", "0.5568536", "0.5567929", "0.5567371", "0.5552527", "0.55320835", "0.55317825", "0.55286944", "0.5528169", "0.55034864", "0.5494793", "0.5494793", "0.5480504", "0.54783076", "0.5458411", "0.5431711", "0.5421514", "0.54171497", "0.5413836", "0.5409427", "0.54055786", "0.54002", "0.53990585", "0.5392193", "0.53880656", "0.53817475", "0.5376734", "0.53744745", "0.5366496", "0.53646326", "0.53444827", "0.5333019", "0.5330452", "0.5323808", "0.531603", "0.5304019", "0.529673", "0.5287958", "0.52827066", "0.52823216", "0.5279211", "0.5272092", "0.526579", "0.5251975", "0.5251975", "0.52519673", "0.52504015", "0.5249379", "0.5245559", "0.5244266", "0.5242128", "0.5239785", "0.523484", "0.5233544", "0.5228284", "0.5215574", "0.5214769", "0.52095956", "0.5207797", "0.5205394", "0.52046114", "0.51976097" ]
0.77889556
0
put path to indices / pass paths as arg, e.g. STAR_DIR
def build_indices(genome_fasta, genome_gtf, rRNA_fasta, transcriptome_fasta): if not os.path.exists("data/indices"): os.mkdir("data/indices") # 1. Bowtie index print("Building Bowtie index") if not os.path.exists(BOWTIE_DIR): os.mkdir(BOWTIE_DIR) cmd_bowtie = 'bowtie-build' + ' ' + genome_fasta + ' ' + BOWTIE_DIR+'/yeast' output = subprocess.run(cmd_bowtie, shell=True) cmd_rRNA = 'bowtie-build' + ' ' + rRNA_fasta + ' ' + BOWTIE_DIR+'/rRNA' output = subprocess.run(cmd_rRNA, shell=True) # 2. STAR index print("Building STAR index") if not os.path.exists(STAR_DIR): os.mkdir(STAR_DIR) cmd_STAR = 'STAR' + ' ' + '--runThreadN' + ' ' + '4' + ' ' + '--runMode' + ' ' + 'genomeGenerate' + ' ' + '--genomeDir' + ' ' + STAR_DIR + ' ' + '--genomeFastaFiles' + ' ' + genome_fasta + ' ' + '--sjdbGTFfile' + ' ' + genome_gtf #+ ' ' + '--sjdbOverhang' + ' ' + 'max(ReadLength)-1' output = subprocess.run(cmd_STAR, shell=True) # run build transcriptome fasta. if not os.path.exists(STAR_TRANSCRIPTOME_DIR): os.mkdir(STAR_TRANSCRIPTOME_DIR) cmd_STAR = 'STAR' + ' ' + '--runThreadN' + ' ' + '4' + ' ' + '--runMode' + ' ' + 'genomeGenerate' + ' ' + '--genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--genomeFastaFiles' + ' ' + transcriptome_fasta # + ' ' + '--sjdbGTFfile' + ' ' + genome_gtf #+ ' ' + '--sjdbOverhang' + ' ' + 'max(ReadLength)-1' output = subprocess.run(cmd_STAR, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indexes_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(dataset_path(dataset, work_dir), consts.INDEXES_DIR)", "def index_STAR(args):\n\n # make STAR index folder for merged path\n merged_STAR_watson_index = os.path.join(args.output_dir,'STAR_merged_watson')\n merged_STAR_crick_index = os.path.join(args.output_dir,'STAR_merged_crick')\n if not os.path.exists(merged_STAR_watson_index):\n os.mkdir(merged_STAR_watson_index)\n os.mkdir(merged_STAR_crick_index)\n ref_merged_watson = os.path.join(merged_STAR_watson_index, '%s.merged.watson.fa' % args.species)\n ref_merged_crick = os.path.join(merged_STAR_crick_index, '%s.merged.crick.fa' % args.species)\n\n #make STAR index folder for joined path\n joined_STAR_watson_index = os.path.join(args.output_dir,'STAR_joined_watson')\n joined_STAR_crick_index = os.path.join(args.output_dir,'STAR_joined_crick')\n if not os.path.exists(joined_STAR_watson_index):\n os.mkdir(joined_STAR_watson_index)\n os.mkdir(joined_STAR_crick_index)\n ref_joined_watson = os.path.join(joined_STAR_watson_index, '%s.joined.watson.fa' % args.species)\n ref_joined_crick = os.path.join(joined_STAR_crick_index, '%s.joined.crick.fa' % args.species)\n\n #get file handle for input reference file\n try:\n file_handle = open(args.reference, 'r')\n joined_len = 0\n merged_len = 0\n joined_count = 0\n merged_count = 0\n ref_merged_watson_handle = open(ref_merged_watson, 'w')\n ref_merged_crick_handle = open(ref_merged_crick, 'w')\n ref_joined_watson_handle = open(ref_joined_watson, 'w')\n ref_joined_crick_handle = open(ref_joined_crick, 'w')\n seq = ''\n for line in file_handle:\n if line.startswith('>'):\n if seq != '':\n if 'NNNNNNNN' in seq.upper():\n joined_len += len(seq)\n joined_count += 1\n ref_joined_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_joined_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n else:\n merged_len += len(seq)\n merged_count += 1\n ref_merged_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_merged_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n seq = ''\n header = line\n else:\n seq += line.rstrip('\\n')\n #update for fixing the last line while reading ref_genome\n if 'NNNNNNNN' in seq.upper():\n\t joined_len += len(seq)\n\t joined_count += 1\n\t ref_joined_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n\t ref_joined_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n else:\n\t merged_len += len(seq)\n\t merged_count += 1\n\t ref_merged_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n\t ref_merged_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n # write final sequence, this is always merged\n merged_len += len(seq)\n merged_count += 1\n ref_merged_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_merged_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n # close file handles\n ref_joined_watson_handle.close()\n ref_joined_crick_handle.close()\n ref_merged_watson_handle.close()\n ref_merged_crick_handle.close()\n except TypeError:\n file_handle = open(args.refgenome, 'r')\n joined_len = 0\n merged_len = 0\n joined_count = 0\n merged_count = 0\n ref_merged_watson_handle = open(ref_merged_watson, 'w')\n ref_merged_crick_handle = open(ref_merged_crick, 'w')\n ref_joined_watson_handle = open(ref_joined_watson, 'w')\n ref_joined_crick_handle = open(ref_joined_crick, 'w')\n seq = ''\n for line in file_handle:\n if line.startswith('>'):\n if seq != '':\n joined_len += len(seq)\n joined_count += 1\n ref_joined_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_joined_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n merged_len += len(seq)\n merged_count += 1\n ref_merged_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_merged_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n seq = ''\n header = line\n else:\n seq += line.rstrip('\\n')\n\n joined_len += len(seq)\n joined_count += 1\n ref_joined_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_joined_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n # write final sequence, this is always merged\n merged_len += len(seq)\n merged_count += 1\n try:\n ref_merged_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_merged_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n except ValueError:\n ref_merged_watson_handle = open(ref_merged_watson, 'a')\n ref_merged_crick_handle = open(ref_merged_crick, 'a')\n ref_merged_watson_handle.write(header + seq.upper().replace('C', 'T') + '\\n')\n ref_merged_crick_handle.write(header + seq.upper().replace('G', 'A') + '\\n')\n # close file handles\n ref_joined_watson_handle.close()\n ref_joined_crick_handle.close()\n ref_merged_watson_handle.close()\n ref_merged_crick_handle.close()\n\n #iterate over input lines and write to references\n\n #MAKE LIST for indexes to be made\n index_list = [(joined_len, joined_count, joined_STAR_watson_index, ref_joined_watson),\n (joined_len, joined_count, joined_STAR_crick_index, ref_joined_crick),\n (merged_len, merged_count, merged_STAR_watson_index, ref_merged_watson),\n (merged_len, merged_count, merged_STAR_crick_index, ref_merged_crick)]\n #calculate parameters for indexing reference for merged and joined reads.\n for (genome_len, no_clusters, genome_dir, ref) in index_list:\n if genome_len != 0:\n index_cmd = 'STAR --runThreadN %s --runMode genomeGenerate --genomeDir %s'%(args.threads,genome_dir)\n fasta_file = [file for file in os.listdir(genome_dir) if file.endswith('.fa')][0]\n index_cmd += ' --genomeFastaFiles %s'%os.path.join(genome_dir,fasta_file)\n genomeSAindexNbases = min(14, math.log(genome_len,2)/2 - 1)\n index_cmd += ' --genomeSAindexNbases %i'%genomeSAindexNbases\n genomeChrBinNbits = min(18, math.log(genome_len/no_clusters, 2))\n index_cmd += ' --genomeChrBinNbits %i' % genomeChrBinNbits\n log = 'making STAR index of %s'%(ref)\n if 'Genome' not in os.listdir(genome_dir):\n run_subprocess([index_cmd], args, log)\n return args", "def setPath(*args):", "def index(args):\n import ruido\n directory = args.dir\n vectors = glob.glob(os.path.join(directory, '**', '*.json'), recursive=True)\n rasters = glob.glob(os.path.join(directory, '**', '*.tiff'), recursive=True)\n if args.verbose:\n print(\"Indexing %s\" % directory)\n print(\"Vectors: %s\" % vectors)\n print(\"Rasters: %s\" % rasters)\n\n for vector in vectors:\n with open(vector, 'r') as v:\n for item_raw in v:\n item = item_raw.strip(u'\\u001e')\n ruido.add(os.path.join(\".index\", vector), item)\n\n return \"[]\"", "def option_index(args):\n print(\"= MAKE INDEX =\")\n print()\n print(\"Database folder:\\t{}\".format(args.folder))\n if not os.path.isdir(args.folder):\n raise OSError(\"No such directory!\")\n print(\"Index file:\\t\\t{}\".format(args.indexfile))\n\n indexer.create_index_from_folder(args.folder, args.indexfile)", "def index_path(self):\n\t\treturn os.path.normpath(self.output + \"/\" + self.resultset_index)", "def remap_index_fn(ref_file):\n return path.join(path.dirname(path.dirname(ref_file)), \"star\")", "def updateIndex(ix, pool_path):\n \n logger.debug('updating search index')\n writer = ix.writer()\n \n exercise_list = [f.name for f in os.scandir(pool_path) if f.is_dir()]\n for ex in exercise_list:\n if ex == '.search_index':\n continue\n task_file = os.path.abspath(os.path.join(pool_path, ex, 'task.tex'))\n if os.path.isfile(task_file):\n logger.info('parsing ' + task_file)\n metaData, task_texcode = parseTaskFile(task_file)\n else:\n logger.warning(ex + ' does not include a task.tex file. skipping entry')\n continue\n \n solution_file = os.path.abspath(os.path.join(pool_path, ex, 'solution.tex'))\n if os.path.isfile(solution_file):\n with open(solution_file, 'r') as f:\n solution_texcode = f.read()\n else:\n logger.warning(ex + ' does not include a solution.tex file')\n solution_texcode = ''\n \n if metaData['date'] == '':\n lastupdate = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)\n else:\n lastupdate = parse_date(metaData['date'])\n\n writer.add_document(\n folder_name=ex,\n task=task_texcode,\n solution=solution_texcode,\n language=metaData['language'],\n maintainer=metaData['author'],\n lastupdate=lastupdate,\n keywords=re.sub(r',\\s+', ',', metaData['keywords'])\n )\n\n writer.commit()", "def add_lgit(args, parent_dir):\n # Convert all the name from the arguments to relative path\n path_list = [handle_path(name) for name in args.filenames[::-1]]\n # Get the infos from the index file\n index_dict = get_index_dictionary(parent_dir)\n if index_dict is None:\n return\n # Create a file descriptor for index file\n index_file_path = join(parent_dir, \".lgit/index\")\n descriptor = os.open(index_file_path, os.O_WRONLY)\n while path_list:\n # Pop each path and execute the fitting function\n current_path = path_list.pop()\n # If the path doesn't exist, print an error message\n if not exists(current_path):\n pathspec_error(current_path)\n elif basename(current_path) == \".lgit\" and isdir(current_path):\n continue\n elif isfile(current_path):\n add_file(current_path, parent_dir, descriptor, index_dict)\n elif isdir(current_path):\n add_directory(current_path, parent_dir, path_list)\n # Close the file descriptor\n os.close(descriptor)", "def index(args):\n\n logging.info('Starting indexing sequences in %s' % args.sequences)\n logging.error('TODO: Implement indexing!')", "def run_snarl_indexing(job, context, inputGraphFileIDs, graph_names, index_name=None, include_trivial=False):\n \n assert(len(inputGraphFileIDs) == len(graph_names))\n \n # Decide on an index output extension.\n extension = '.trivial.snarls' if include_trivial else '.snarls'\n \n if len(inputGraphFileIDs) > 1:\n # We have been given multiple chromosome graphs. Since snarl indexing\n # can take a lot of memory, we are going to process each one separately\n # and then concatenate the results.\n \n RealtimeLogger.info(\"Breaking up snarl computation for {}\".format(str(graph_names)))\n \n snarl_jobs = []\n for file_id, file_name in zip(inputGraphFileIDs, graph_names):\n # For each input graph, make a child job to index it.\n snarl_jobs.append(job.addChildJobFn(run_snarl_indexing, context, [file_id], [file_name],\n include_trivial=include_trivial,\n cores=context.config.snarl_index_cores,\n memory=context.config.snarl_index_mem,\n disk=context.config.snarl_index_disk))\n \n # Make a job to concatenate the indexes all together \n concat_job = snarl_jobs[0].addFollowOnJobFn(run_concat_files, context, [job.rv() for job in snarl_jobs],\n index_name + extension if index_name is not None else None,\n cores=context.config.snarl_index_cores,\n memory=context.config.snarl_index_mem,\n disk=context.config.snarl_index_disk)\n \n for i in range(1, len(snarl_jobs)):\n # And make it wait for all of them\n snarl_jobs[i].addFollowOn(concat_job)\n \n return concat_job.rv()\n \n else:\n # Base case: single graph\n \n RealtimeLogger.info(\"Starting snarl computation {} trivial snarls...\".format('with' if include_trivial else 'without'))\n start_time = timeit.default_timer()\n \n # Define work directory for docker calls\n work_dir = job.fileStore.getLocalTempDir()\n\n # Download the one graph\n graph_id = inputGraphFileIDs[0]\n graph_filename = graph_names[0]\n job.fileStore.readGlobalFile(graph_id, os.path.join(work_dir, graph_filename))\n\n # Where do we put the snarls?\n snarl_filename = os.path.join(work_dir, (index_name if index_name is not None else \"part\") + extension)\n\n # Now run the indexer.\n RealtimeLogger.info(\"Computing snarls for {}\".format(graph_filename))\n\n cmd = ['vg', 'snarls', graph_filename]\n if include_trivial:\n cmd += ['--include-trivial']\n with open(snarl_filename, \"wb\") as snarl_file:\n try:\n # Compute snarls to the correct file\n context.runner.call(job, cmd, work_dir=work_dir, outfile=snarl_file)\n except:\n # Dump everything we need to replicate the indexing\n logging.error(\"Snarl indexing failed. Dumping files.\")\n context.write_output_file(job, os.path.join(work_dir, graph_filename))\n raise\n \n if index_name is not None:\n # Checkpoint index to output store\n snarl_file_id = context.write_output_file(job, snarl_filename)\n else:\n # Just save the index as an intermediate\n snarl_file_id = context.write_intermediate_file(job, snarl_filename)\n \n \n end_time = timeit.default_timer()\n run_time = end_time - start_time\n RealtimeLogger.info(\"Finished computing snarls. Process took {} seconds.\".format(run_time))\n\n return snarl_file_id", "def _get_index_file_path(location):\n return os.path.join(location, args.index_file_name)", "def _read_indices(path):\n paths = sorted(tf.io.gfile.glob('%s-*-of-*_index.json' % path))\n all_indices = []\n for path in paths:\n json_str = epath.Path(path).read_text()\n # parse it back into a proto.\n shard_index = json.loads(json_str)\n all_indices.append(list(shard_index['index']))\n return [os.path.basename(p) for p in paths], all_indices", "def paths(self, paths):\r\n self._paths = paths\r\n self._extract()", "def index_toggle_parse_args(parser):\n parser.add_argument(\"--gcsa_index\", dest=\"indexes\", default=[], action=\"append_const\", const=\"gcsa\",\n help=\"Make a gcsa index for each output graph\")\n parser.add_argument(\"--xg_index\", dest=\"indexes\", action=\"append_const\", const=\"xg\",\n help=\"Make an xg index for each output graph\")\n parser.add_argument(\"--gbwt_index\", dest=\"indexes\", action=\"append_const\", const=\"gbwt\",\n help=\"Make a GBWT index alongside the xg index for each output graph\")\n parser.add_argument(\"--snarls_index\", dest=\"indexes\", action=\"append_const\", const=\"snarls\",\n help=\"Make an snarls file for each output graph\")\n parser.add_argument(\"--trivial_snarls_index\", dest=\"indexes\", action=\"append_const\", const=\"trivial_snarls\",\n help=\"Make a trivial-inclusive snarls file for each output graph\")\n parser.add_argument(\"--distance_index\", dest=\"indexes\", action=\"append_const\", const=\"distance\",\n help=\"Make a (minimum) distance index for each output graph\")\n parser.add_argument(\"--minimizer_index\", dest=\"indexes\", action=\"append_const\", const=\"minimizer\",\n help=\"Make a minimizer index for each output graph\")\n parser.add_argument(\"--id_ranges_index\", dest=\"indexes\", action=\"append_const\", const=\"id_ranges\",\n help=\"Make chromosome id ranges tables (so toil-vg map can optionally split output by chromosome)\")\n parser.add_argument(\"--alt_path_gam_index\", dest=\"indexes\", action=\"append_const\", const=\"alt-gam\",\n help=\"Save alt paths from vg into an indexed GAM\")\n parser.add_argument(\"--xg_alts\", dest=\"indexes\", action=\"append_const\", const=\"xg_alts\",\n help=\"Include alt paths in xg index\")\n parser.add_argument(\"--all_index\", dest=\"indexes\", action=\"store_const\",\n const=[\"gcsa\", \"xg\", \"gbwt\", \"snarls\", \"trivial_snarls\", \"distance\", \"minimizer\", \"id_ranges\"],\n help=\"Equivalent to --gcsa_index --xg_index --gbwt_index --snarls_index --trivial_snarls_index \"\n \"--distance_index --minimizer_index --id_ranges_index\")", "def _SetupIndexes(self, _open=open):\n pass", "def index_all_files(self, root_dir):\n pass", "def index_subdirectory(directory, class_indices, follow_links, formats):\n dirname = os.path.basename(directory)\n valid_files = iter_valid_files(directory, follow_links, formats)\n labels = []\n filenames = []\n for root, fname in valid_files:\n labels.append(class_indices[dirname])\n absolute_path = os.path.join(root, fname)\n relative_path = os.path.join(\n dirname, os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n return filenames, labels", "def buildIndex(filename, currentTime, baseDir):\n pathToFolder = baseDir + 'Collections/IndriIndices/'\n if not os.path.exists(pathToFolder):\n os.makedirs(pathToFolder)\n INDRI_BUILD_INDEX = '/mnt/bi-strg3/v/zivvasilisky/ziv/env/indri/indri/bin/IndriBuildIndex'\n CORPUS_PATH = filename\n CORPUS_CLASS = 'trectext'\n MEMORY = '1G'\n INDEX = pathToFolder + currentTime\n STEMMER = 'krovetz'\n run_bash_command(INDRI_BUILD_INDEX + ' -corpus.path='+CORPUS_PATH + ' -corpus.class='+CORPUS_CLASS + ' -index='+INDEX + ' -memory='+MEMORY + ' -stemmer.name=' + STEMMER)\n return INDEX", "def run_indexing(job, context, inputGraphFileIDs,\n graph_names, index_name, chroms,\n vcf_phasing_file_ids = [], tbi_phasing_file_ids = [],\n bwa_fasta_id=None,\n gbwt_id = None, node_mapping_id = None,\n wanted = set(),\n gbwt_prune=False, gbwt_regions=[],\n dont_restore_paths=[],\n coalesce_regions=[]):\n \n # Coalesce the chroms, so we have some sets of chroms that live in the same\n # graph file.\n chroms, chrom_names = apply_coalesce(chroms, coalesce_regions=coalesce_regions)\n \n # Make a master child job\n child_job = Job()\n job.addChild(child_job)\n \n # And one job for all the per-chromosome xg jobs\n chrom_xg_root_job = Job()\n child_job.addChild(chrom_xg_root_job)\n \n # And inside it make one job for the main whole-graph xg construction that has to come after it\n xg_root_job = Job()\n chrom_xg_root_job.addFollowOn(xg_root_job)\n \n RealtimeLogger.info(\"Running indexing: {}.\".format({\n 'graph_names': graph_names,\n 'index_name': index_name,\n 'chroms': chroms if len(chroms) < 100 else f'{len(chroms)} items',\n 'vcf_phasing_file_ids': vcf_phasing_file_ids,\n 'tbi_phasing_file_ids': tbi_phasing_file_ids,\n 'gbwt_id': gbwt_id,\n 'node_mapping_id': node_mapping_id,\n 'wanted': wanted,\n 'gbwt_prune': gbwt_prune,\n 'bwa_fasta_id': bwa_fasta_id,\n 'coalesce_regions': coalesce_regions if max([len(x) for x in coalesce_regions] + [0]) < 100 else '(many)'\n }))\n\n # This will hold the index to return\n indexes = {}\n if gbwt_id:\n indexes['gbwt'] = gbwt_id\n elif 'gbwt' in wanted:\n # We need to do the xg so we can make the GBWT.\n # TODO: write a codepath that makes the GBWT without making the XG\n wanted.add('xg')\n\n # We shouldn't accept any phasing files when not making a GBWT index with them.\n assert(len(vcf_phasing_file_ids) == 0 or ('gbwt' in wanted))\n \n if 'minimizer' in wanted:\n # The minimizer index has some dependencies\n wanted.add('xg')\n if not gbwt_id:\n wanted.add('gbwt')\n \n if 'distance' in wanted:\n # The distance index also has some dependencies\n wanted.add('xg')\n wanted.add('trivial_snarls')\n \n # We guarantee that if 'gbwt' is in indexes, then there is (a promise for)\n # an actual GBWT.\n\n if 'xg' in wanted or 'gcsa' in wanted:\n indexes['chrom_xg'] = []\n indexes['chrom_gbwt'] = []\n \n if 'gbwt' in wanted and len(vcf_phasing_file_ids) > 0:\n # We want to make a GBWT, and we can in fact make a GBWT.\n # That's the only case we want per-chromosome XGs for anymore.\n \n # In its current state, vg prune requires chromosomal xgs, so we must make\n # these xgs if we're doing any kind of gcsa indexing. Also, if we're making\n # a gbwt, we do that at the same time (merging later if more than one graph).\n #\n # TODO: This is the *only* way to make the GBWT, actually. Write\n # code to make it separately.\n if not chroms or len(chroms) == 1:\n chroms = [index_name]\n indexes['chrom_xg'] = []\n indexes['chrom_gbwt'] = []\n \n # Check our input phasing VCF set for plausibility\n if len(vcf_phasing_file_ids) != len(tbi_phasing_file_ids):\n # Each VCF needs an index\n raise RuntimeError(\"Found {} phasing VCFs and {} indexes; counts must match!\".format(\n len(vcf_phasing_file_ids), len(tbi_phasing_file_ids)))\n \n if len(vcf_phasing_file_ids) > len(chroms):\n # We can only handle no VCFs, one VCF, or one VCF per chromosome until we run out of VCFs.\n # So what we can't handle is more VCFs than chromosomes\n RealtimeLogger.error(\"Chromosomes: {}\".format(chroms))\n RealtimeLogger.error(\"VCFs: {}\".format(vcf_phasing_file_ids))\n raise RuntimeError(\"Found too many ({}) phasing VCFs for {} chromosomes\".format(\n len(vcf_phasing_file_ids), len(chroms)))\n \n \n for i, chrom in enumerate(chroms):\n # For each chromosome\n \n # Find the phasing VCF\n if len(vcf_phasing_file_ids) == 0:\n # There may be 0\n vcf_id = None\n tbi_id = None\n elif len(vcf_phasing_file_ids) == 1:\n # There may be one for all chromosomes\n vcf_id = vcf_phasing_file_ids[0]\n tbi_id = tbi_phasing_file_ids[0]\n elif i < len(vcf_phasing_file_ids):\n # Otherwise the VCFs and chromosomes correspond in order, until the VCFs are depleted.\n # There is one for this chromosome\n vcf_id = vcf_phasing_file_ids[i]\n tbi_id = tbi_phasing_file_ids[i]\n else:\n # We have run out of VCFs for chromosomes to be in\n vcf_id = None\n tbi_id = None\n \n # Make a job to index just this chromosome and produce a\n # per-chromosome xg, gbwt, and threads file. Since there may be\n # thousands of chromosomes (including e.g. decoys) in a\n # whole-genome reference, keep these files as intermediates and\n # don't put them in the outstore, unless we're only doing one contig.\n xg_chrom_index_job = chrom_xg_root_job.addChildJobFn(run_cat_xg_indexing,\n context, [inputGraphFileIDs[i]],\n [graph_names[i]],\n chrom_names[i] if len(chroms) > 1 else index_name,\n vcf_id, tbi_id,\n make_gbwt=('gbwt' in wanted),\n gbwt_regions=gbwt_regions, intermediate=(len(chroms) > 1),\n include_alt_paths=('xg_alts' in wanted),\n cores=context.config.gbwt_index_cores,\n memory=context.config.gbwt_index_mem,\n disk=context.config.gbwt_index_disk,\n preemptable='gbwt' not in wanted or context.config.gbwt_index_preemptable)\n indexes['chrom_xg'].append(xg_chrom_index_job.rv(0))\n indexes['chrom_gbwt'].append(xg_chrom_index_job.rv(1))\n\n if len(chroms) > 1:\n # Once all the per-chromosome GBWTs are done and we are ready to make the whole-graph GBWT, merge them up\n indexes['gbwt'] = xg_root_job.addChildJobFn(run_merge_gbwts, context, indexes['chrom_gbwt'],\n index_name,\n cores=context.config.xg_index_cores,\n memory=context.config.xg_index_mem,\n disk=context.config.xg_index_disk).rv()\n else:\n # There's only one chromosome, so the one per-chromosome GBWT becomes the only GBWT\n indexes['gbwt'] = indexes['chrom_gbwt'][0]\n \n # now do the whole genome xg (without any gbwt)\n if 'chrom_xg' in indexes and len(indexes['chrom_xg']) == 1:\n # We made per-chromosome XGs and we have exactly one.\n # our first chromosome is effectively the whole genome (note that above we\n # detected this and put in index_name so it's saved right (don't care about chrom names))\n indexes['xg'] = indexes['chrom_xg'][0]\n elif 'xg' in wanted:\n # Build an xg index for the whole genome.\n \n xg_index_job = xg_root_job.addChildJobFn(run_cat_xg_indexing,\n context, inputGraphFileIDs,\n graph_names, index_name,\n None, None,\n make_gbwt=False,\n include_alt_paths=('xg_alts' in wanted),\n cores=context.config.xg_index_cores,\n memory=context.config.xg_index_mem,\n disk=context.config.xg_index_disk)\n \n indexes['xg'] = xg_index_job.rv(0)\n\n\n gcsa_root_job = Job()\n # gcsa follows from chrom_xg jobs only if per-chromosome gbwts are needed for per-chromosome pruning\n if gbwt_prune:\n chrom_xg_root_job.addFollowOn(gcsa_root_job)\n else:\n child_job.addChild(gcsa_root_job)\n \n if 'gcsa' in wanted:\n # We know we made the per-chromosome indexes already, so we can use them here to make the GCSA \n # todo: we're only taking in a genome gbwt as input, because that's all we write\n if ('chrom_gbwt' not in indexes or indexes['chrom_gbwt'] == []) and 'gbwt' in indexes:\n # We lack per-chromosome GBWTs but we have a whole genome one we can use\n indexes['chrom_gbwt'] = indexes['gbwt'] * len(inputGraphFileIDs)\n gcsa_job = gcsa_root_job.addChildJobFn(run_gcsa_prep, context, inputGraphFileIDs,\n graph_names, index_name, \n indexes.get('chrom_gbwt', []) if gbwt_prune else [],\n node_mapping_id,\n remove_paths=dont_restore_paths,\n cores=context.config.misc_cores,\n memory=context.config.misc_mem,\n disk=context.config.misc_disk)\n indexes['gcsa'] = gcsa_job.rv(0)\n indexes['lcp'] = gcsa_job.rv(1)\n \n if len(inputGraphFileIDs) > 1 and 'id_ranges' in wanted:\n # Also we need an id ranges file in parallel with everything else\n indexes['id_ranges'] = child_job.addChildJobFn(run_id_ranges, context, inputGraphFileIDs,\n graph_names, index_name, chroms,\n cores=context.config.misc_cores,\n memory=context.config.misc_mem,\n disk=context.config.misc_disk).rv()\n \n if 'snarls' in wanted:\n # Also we need a snarl index in parallel with everything else\n indexes['snarls'] = child_job.addChildJobFn(run_snarl_indexing, context, inputGraphFileIDs,\n graph_names, index_name,\n cores=context.config.snarl_index_cores,\n memory=context.config.snarl_index_mem,\n disk=context.config.snarl_index_disk).rv()\n \n if 'trivial_snarls' in wanted:\n # Also we need a snarl index with trivial snarls in parallel with everything else.\n # Make sure to save the job so things can wait on it.\n trivial_snarls_job = child_job.addChildJobFn(run_snarl_indexing, context, inputGraphFileIDs,\n graph_names, index_name, include_trivial=True,\n cores=context.config.snarl_index_cores,\n memory=context.config.snarl_index_mem,\n disk=context.config.snarl_index_disk)\n \n indexes['trivial_snarls'] = trivial_snarls_job.rv()\n \n if 'distance' in wanted:\n # We need a distance index, based on the XG and the trivial snarls, which we know are being computed.\n # Run it after our XG\n distance_job = xg_root_job.addFollowOnJobFn(run_distance_indexing, context, indexes['xg'],\n indexes['trivial_snarls'], index_name,\n cores=context.config.distance_index_cores,\n memory=context.config.distance_index_mem,\n disk=context.config.distance_index_disk)\n # Make sure it waits for trivial snarls\n trivial_snarls_job.addFollowOn(distance_job)\n \n indexes['distance'] = distance_job.rv()\n \n if 'minimizer' in wanted and 'gbwt' in indexes:\n # We need a minimizer index, based on the GBWT (either provided or\n # computed) and the XG (which we know is being computed).\n \n # If there's no GBWT available, we can't compute a minimizer index.\n \n # Run it after our XG.\n # We know that, if the GBWT is being computed, it also happens under the XG job.\n # TODO: change that.\n minimizer_job = xg_root_job.addFollowOnJobFn(run_minimizer_indexing, context, indexes['xg'],\n indexes['gbwt'], index_name,\n cores=context.config.minimizer_index_cores,\n memory=context.config.minimizer_index_mem,\n disk=context.config.minimizer_index_disk)\n \n indexes['minimizer'] = minimizer_job.rv()\n \n\n if bwa_fasta_id:\n # We need to index a reference FASTA for BWA\n indexes['bwa'] = child_job.addChildJobFn(run_bwa_index, context, bwa_fasta_id,\n cores=context.config.bwa_index_cores, memory=context.config.bwa_index_mem,\n disk=context.config.bwa_index_disk).rv()\n\n if 'alt-gam' in wanted:\n alt_extract_job = child_job.addChildJobFn(run_alt_path_extraction, context, inputGraphFileIDs,\n graph_names, None,\n cores=context.config.chunk_cores,\n memory=context.config.chunk_mem,\n disk=context.config.chunk_disk)\n \n indexes['alt-gam'] = alt_extract_job.addFollowOnJobFn(run_gam_indexing, context, alt_extract_job.rv(),\n index_name,\n cores=context.config.snarl_index_cores,\n memory=context.config.snarl_index_mem,\n disk=context.config.snarl_index_disk).rv()\n \n return indexes", "def index_files():\n\n print(\"Indexing files\")\n\n for root, _, files in os.walk(image_directory):\n for item in files:\n for file_type in file_types:\n if file_type in item:\n images_in_directory.append(os.path.join(root, item))\n\n print(f'Finished indexing {len(images_in_directory)} files')\n\n pass", "def index_reference(reference_fasta):\n index_basename = os.path.splitext(os.path.basename(reference_fasta))[0]\n index_outdir = os.path.join(os.getcwd(), 'index_files')\n if not os.path.exists(index_outdir):\n os.mkdir(index_outdir)\n index_outpath = os.path.join(index_outdir, index_basename)\n sp.check_call(f\"bowtie2-build -q --threads 4 {reference_fasta} {index_outpath}\", shell=True)", "def index(path):\n return render_template('index.jinja2')", "def build_index():\n pass", "def create_new_index(self, path: str):\n if path.endswith(\"/\"):\n path = path[:-1]\n self.file_index = ([(root, files)\n for root, dirs, files in os.walk(path)\n if files])\n self.modified_time = os.path.getmtime(path)\n \n with open(os.path.join(\n INDEX_DIR, path.replace(\"/\", \"_\") + \".pkl\"\n ), \"wb\") as f:\n pickle.dump((self.file_index, self.modified_time), f)", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def __init__(self, *paths):\r\n self.paths = paths", "def create_idx(for_this_file, put_here):\n file_name = for_this_file.split('/')[-1]\n idx_dir = '/uufs/chpc.utah.edu/common/home/horel-group/archive/' + put_here\n if not os.path.exists(idx_dir):\n os.makedirs(idx_dir)\n idx_name = idx_dir + file_name + '.idx'\n os.system('wgrib2 ' + for_this_file + ' -t -var -lev -ftime > ' + idx_name)\n print \"created idx file:\", idx_name", "def set_paths(self, paths):\n self.paths = paths", "def example(*paths):\n\n return normpath(join(dirname(__file__), '..', 'examples', *paths))", "def set_paths_gen(self, paths_gen): #w:\r\n self.paths_gen = paths_gen", "def build_index(self, folder):\n self.__start_indexing()\n for chunk in sorted(os.listdir(folder)):\n path = folder + \"/\" + chunk\n if os.path.isdir(path):\n for dir in sorted(os.listdir(path)):\n filedir = path + \"/\" + dir\n for anns_file in sorted(os.listdir(filedir)):\n self.index_file(filedir + \"/\" + anns_file)\n self.__end_indexing()", "def index_indexes(folder,saveAs=\"index2.html\"):\n indexes=[]\n saveAs=os.path.abspath(os.path.join(folder,saveAs))\n for subFolder in glob.glob(folder+\"/*/\"):\n if os.path.exists(subFolder+\"/SWH2P/index.html\"):\n indexes.append(os.path.abspath(subFolder+\"/SWH2P/index.html\"))\n\n html='<html><body><h1>Automatic Index</h1><ul>'\n for item in sorted(indexes):\n html+='<li><a href=\"%s\">%s</a>'%(item,\n os.path.basename(os.path.dirname(os.path.dirname(item))))\n html+='</ul></body></html>'\n with open(saveAs,'w') as f:\n f.write(html)\n print(\"saved\",saveAs)\n webbrowser.open(saveAs)", "def index_parse_args(parser):\n \n parser.add_argument(\"--gcsa_index_cores\", type=int,\n help=\"number of threads during the gcsa indexing step\")\n parser.add_argument(\"--xg_index_cores\", type=int,\n help=\"number of threads during the xg indexing step\")\n parser.add_argument(\"--gbwt_index_cores\", type=int,\n help=\"number of threads during the gbwt indexing step\") \n\n parser.add_argument(\"--index_name\", type=str, default='index',\n help=\"name of index files. <name>.xg, <name>.gcsa etc.\")\n\n parser.add_argument(\"--gcsa_opts\", type=str,\n help=\"Options to pass to gcsa indexing.\")\n \n parser.add_argument(\"--minimizer_opts\", type=str,\n help=\"Options to pass to minimizer indexing.\")\n\n parser.add_argument(\"--vcf_phasing\", nargs='+', type=make_url, default=[],\n help=\"Import phasing information from VCF(s) into xg (or GBWT with --gbwt_index)\")\n parser.add_argument(\"--vcf_phasing_regions\", nargs='+', default=[],\n help=\"Hint the relevant chrom:start-end regions to the GBWT indexer, for subregion graphs\")\n parser.add_argument(\"--gbwt_input\", type=make_url,\n help=\"Use given GBWT for GCSA2 pruning\")\n parser.add_argument(\"--gbwt_prune\", action='store_true',\n help=\"Use gbwt for gcsa pruning\")\n parser.add_argument(\"--force_phasing\", type=lambda x:bool(util.strtobool(x)), default=None,\n help=\"If 'True', randomly phase unphased variants and discard unresolveable overlaps for GBWT\")", "def create_index():", "def index_writer_init(idx_dir=\"Wiki_index\"):\n try:\n assert type(idx_dir) is str\n except AssertionError:\n raise TypeError\n\n try:\n assert idx_dir != \"\"\n except AssertionError:\n raise ValueError\n\n # Creazione dello schema dei documenti da indicizzare\n schema: Schema = Schema(title=TEXT(stored=True),\n identifier=ID(stored=True, unique=True),\n content=TEXT(stored=True, analyzer=StemmingAnalyzer()))\n\n # Verifica dell'esistenza della cartella dell'indice\n if not path.exists(idx_dir):\n # In caso la cartella non esista viene creata\n mkdir(idx_dir)\n\n # Creazione dell'indice all'interno della cartella designata\n index = create_in(idx_dir, schema)\n\n # La funzione restituisce un oggetto in grado di inserire (scrivere) documenti all'interno dell'indice\n return index.writer()", "def create_index(index, clientdir=DEFAULT_CLIENTDIR):\n return subprocess.run([\n 'devpi', 'index', '--clientdir', clientdir, '-c', index])", "def paths(self, paths):\n\n self._paths = paths", "def path_entries(self):", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def setByPathAndIndex(self, keys, index, value):\n self.getByPath(keys[:-1])[keys[-1]][index] = value", "def __init__(self, paths):\n self.paths = paths", "def __init__(self):\n root_dir = os.path.dirname(os.path.abspath(__file__))\n self.base_dir = root_dir + \"/data/index/\" # base directory location for all indexes", "def index_files(self, input_dir, output_dir):\n self.lucene = Lucene(output_dir)\n self.lucene.open_writer()\n for path, dirs, _ in os.walk(input_dir):\n for dir in sorted(dirs):\n for _, _, files in os.walk(os.path.join(input_dir, dir)):\n for fn in sorted(files):\n print \"Indexing \", os.path.join(input_dir + dir, fn), \"...\"\n self.index_file(os.path.join(input_dir + dir, fn))\n # closes Lucene index\n self.lucene.close_writer()", "def mk_index_dir(self):\n try:\n os.makedirs(self.params[\"index_path\"])\n except FileExistsError:\n pass", "def test_path2():\n path = [ (np.pi/10, 0.3, 1)] * 20\n execute_path(path, True)", "def index(self, name, file, passages, index_name=\"default\"):\n raise NotImplementedError()", "def run_alt_path_extraction(job, context, inputGraphFileIDs, graph_names, index_name):\n \n assert(len(inputGraphFileIDs) == len(graph_names))\n \n if len(inputGraphFileIDs) > 1:\n # We have been given multiple chromosome graphs. \n \n RealtimeLogger.info(\"Breaking up alt path GAM computation for {}\".format(str(graph_names)))\n \n sub_jobs = []\n for i, (file_id, file_name) in enumerate(zip(inputGraphFileIDs, graph_names)):\n # For each input graph, make a child job to index it.\n sub_jobs.append(job.addChildJobFn(run_alt_path_extraction, context, [file_id], [file_name],\n index_name + '.{}'.format(i) if index_name else None,\n cores=context.config.chunk_cores,\n memory=context.config.chunk_mem,\n disk=context.config.chunk_disk))\n \n # Make a job to concatenate the indexes all together \n concat_job = sub_jobs[0].addFollowOnJobFn(run_concat_files, context, [job.rv() for job in sub_jobs],\n index_name + '_alts.gam' if index_name is not None else None,\n memory=context.config.chunk_mem,\n disk=context.config.chunk_disk)\n \n for i in range(1, len(sub_jobs)):\n # And make it wait for all of them\n sub_jobs[i].addFollowOn(concat_job)\n \n return concat_job.rv()\n \n else:\n # Base case: single graph\n \n start_time = timeit.default_timer()\n \n # Define work directory for docker calls\n work_dir = job.fileStore.getLocalTempDir()\n\n # Download the one graph\n graph_id = inputGraphFileIDs[0]\n graph_filename = graph_names[0]\n job.fileStore.readGlobalFile(graph_id, os.path.join(work_dir, graph_filename))\n\n # Where do we put the gam?\n gam_filename = os.path.join(work_dir, \"{}_alts.gam\".format(index_name if index_name is not None else \"part\"))\n\n cmd = ['vg', 'paths', '-v', graph_filename, '-Q', '_alt_', '-X']\n with open(gam_filename, 'wb') as gam_file:\n try:\n # Compute snarls to the correct file\n context.runner.call(job, cmd, work_dir=work_dir, outfile=gam_file)\n except:\n # Dump everything we need to replicate the indexing\n logging.error(\"Alt path gam extraction failed. Dumping files.\")\n context.write_output_file(job, os.path.join(work_dir, graph_filename))\n raise\n \n if index_name is not None:\n # Checkpoint index to output store\n gam_file_id = context.write_output_file(job, gam_filename)\n else:\n # Just save the index as an intermediate\n gam_file_id = context.write_intermediate_file(job, gam_filename)\n \n \n end_time = timeit.default_timer()\n run_time = end_time - start_time\n RealtimeLogger.info(\"Finished GAM extraction. Process took {} seconds.\".format(run_time))\n\n return gam_file_id", "def abspath(self, *args):\n return os.path.join(self._spool, *args)", "def get_index(self, *args, **dargs):\n pass", "def gravarArquivoIndices(indices):\n arq = open(\"arquivoIndices.txt\", \"w\")\n for i in indices.indices:\n linha = i.codigo + \",\" + str(i.indice) + \",\" + str(i.excluido) + \"\\n\"\n arq.write(linha)\n arq.close()\n return", "def index(args):\n for level in ['family', 'language', 'dialect']:\n if args.args[0] in [level, 'all']:\n make_index(level)", "def index_object(idxs=None):", "def locate(self, *args, **kwargs):\n paths, path_string = [], \"\"\n self._locate(*args, paths=paths, path_string=path_string, **kwargs)\n return paths", "def locate(self, *args, **kwargs):\n paths, path_string = [], \"\"\n self._locate(*args, paths=paths, path_string=path_string, **kwargs)\n return paths", "def index(self, path):\n try:\n indices = [int(x) if x.isdigit() else x for x in split(r'[\\/\\[\\]]+', path[1:])]\n return reduce(lambda x, y: x[y], indices, self.document)\n except:\n return None", "def _index_fn(fn: str, index_mapping_dir: str) -> str:\n if index_mapping_dir:\n # Remove leading \"/\" and \"..\".\n while fn.startswith((\"/\", \"..\")):\n if fn.startswith(\"..\"):\n fn = fn.lstrip(\"..\")\n if fn.startswith(\"/\"):\n fn = fn.lstrip(\"/\")\n idx_fn = f\"{os.path.join(index_mapping_dir, fn)}.{__idx_suffix__}\"\n # Create parent directory if needed.\n os.makedirs(os.path.dirname(idx_fn), exist_ok=True)\n else:\n idx_fn = f\"{fn}.{__idx_suffix__}\"\n return idx_fn", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def input_dir(path):\n global datasets\n\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n raise IOError('Incorrect input_dir specified: no such directory')\n for dataset_name in datasets:\n dataset_path = os.path.join(path, '%s_set.hdf' % dataset_name)\n if not os.path.exists(dataset_path):\n raise IOError('Incorrect input_dir specified:'\n ' %s set file not found' % dataset_path)\n return path", "def _create_index_file(\n root_dir, location, image_files, video_files, dirs, force_no_processing=False):\n # Put together HTML as a list of the lines we'll want to include\n # Issue #2 exists to do this better than HTML in-code\n header_text = 'imageMe: {0} [{1} image(s)] [{2} video(s)]'.format(\n location, str(len(image_files)), str(len(video_files))\n )\n html = [\n '<!DOCTYPE html>',\n '<html>',\n ' <head>',\n ' <title>imageMe</title>'\n ' <style>',\n ' html, body {margin: 0; padding: 0;}',\n ' .table {align: center;}',\n ' .content {',\n ' padding: 3em;',\n ' padding-left: 4em;',\n ' padding-right: 4em;',\n ' }',\n ' .image {max-width: 100%; border-radius: 0.3em;}',\n ' td {width: ' + str(100.0 / args.column) + '%;}',\n ' </style>',\n ' </head>',\n ' <body>',\n ' <div class=\"content\">',\n ' <h2 class=\"header\">' + header_text + '</h2>'\n ]\n\n # Populate the present subdirectories - this includes '..' unless we're at\n # the top level\n directories = []\n if root_dir != location:\n directories = ['..']\n directories += dirs\n if len(directories) > 0:\n html.append('<hr>')\n # For each subdirectory, include a link to its index file\n for directory in directories:\n link = directory + '/' + args.index_file_name\n html += [\n ' <h3>',\n ' <a href=\"' + link + '\">' + directory + '</a>',\n ' </h3>'\n ]\n\n files = sorted(image_files + video_files)\n if args.separate_image_and_video:\n files = image_files + [None] + video_files\n\n # Populate the gallery table\n if files:\n # Counter to cycle down through table rows\n table_column_count = 1\n html += ['<hr>', '<table>']\n\n # For each file, potentially create a new <tr> and create a new <td>\n for file in files:\n if table_column_count == 1:\n html.append('<tr>')\n\n if file in video_files:\n html += [\n '<td>',\n ' <video controls preload width=\"100%\">',\n ' <source src=\"' + file + '\">',\n ' Your browser does not support HTML5 video.'\n ' </video>',\n '</td>'\n ]\n\n if file in image_files:\n img_src = _get_thumbnail_src_from_file(\n location, file, force_no_processing\n )\n link_target = _get_image_link_target_from_file(\n location, file, force_no_processing\n )\n html += [\n '<td>',\n ' <a href=\"' + link_target + '\">',\n ' <img class=\"image\" src=\"' + img_src + '\">',\n ' </a>',\n '</td>'\n ]\n\n if table_column_count == args.column or file == None:\n table_column_count = 0\n html.append('</tr>')\n\n table_column_count += 1\n\n if table_column_count != 1:\n html += ['</tr>']\n html += ['</table>']\n\n html += [\n ' </div>',\n ' </body>',\n '</html>'\n ]\n\n # Actually create the file, now we've put together the HTML content\n index_file_path = _get_index_file_path(location)\n print('Creating index file %s' % index_file_path)\n index_file = open(index_file_path, 'w')\n index_file.write('\\n'.join(html))\n index_file.close()\n\n # Return the path for cleaning up later\n return index_file_path", "def build_index(fasta_fp, index_fp):\n subprocess.call([\" \".join([\"bowtie2-build\", fasta_fp, index_fp])],\n shell=True)", "def set_directories(args):\n global READS_DIR\n READS_DIR = args.reads_dir\n if not os.path.isdir(READS_DIR):\n sys.exit(\"%s not a directory\" % READS_DIR)\n READS_DIR = os.path.abspath(READS_DIR) + \"/\"\n os.chdir(READS_DIR)", "def index_directory(directory,\n labels,\n formats,\n class_names=None,\n shuffle=True,\n seed=None,\n follow_links=False):\n if labels is None:\n # in the no-label case, index from the parent directory down.\n subdirs = ['']\n class_names = subdirs\n else:\n subdirs = []\n for subdir in sorted(os.listdir(directory)):\n if os.path.isdir(os.path.join(directory, subdir)):\n subdirs.append(subdir)\n if not class_names:\n class_names = subdirs\n else:\n if set(class_names) != set(subdirs):\n raise ValueError(\n 'The `class_names` passed did not match the '\n 'names of the subdirectories of the target directory. '\n 'Expected: %s, but received: %s' %\n (subdirs, class_names))\n class_indices = dict(zip(class_names, range(len(class_names))))\n\n # Build an index of the files\n # in the different class subfolders.\n pool = multiprocessing.pool.ThreadPool()\n results = []\n filenames = []\n\n for dirpath in (os.path.join(directory, subdir) for subdir in subdirs):\n results.append(\n pool.apply_async(index_subdirectory,\n (dirpath, class_indices, follow_links, formats)))\n labels_list = []\n for res in results:\n partial_filenames, partial_labels = res.get()\n labels_list.append(partial_labels)\n filenames += partial_filenames\n if labels not in ('inferred', None):\n if len(labels) != len(filenames):\n raise ValueError('Expected the lengths of `labels` to match the number '\n 'of files in the target directory. len(labels) is %s '\n 'while we found %s files in %s.' % (\n len(labels), len(filenames), directory))\n else:\n i = 0\n labels = np.zeros((len(filenames),), dtype='int32')\n for partial_labels in labels_list:\n labels[i:i + len(partial_labels)] = partial_labels\n i += len(partial_labels)\n\n if labels is None:\n print('Found %d files.' % (len(filenames),))\n else:\n print('Found %d files belonging to %d classes.' %\n (len(filenames), len(class_names)))\n pool.close()\n pool.join()\n file_paths = [os.path.join(directory, fname) for fname in filenames]\n\n if shuffle:\n # Shuffle globally to erase macro-structure\n if seed is None:\n seed = np.random.randint(1e6)\n rng = np.random.RandomState(seed)\n rng.shuffle(file_paths)\n rng = np.random.RandomState(seed)\n rng.shuffle(labels)\n return file_paths, labels, class_names", "def join(self, path, *paths):", "def update_path():\n #TODO update path information\n pass", "def set_paths(self, paths):\n self._paths = paths\n self._paths_set = True", "def __createIndexFile(self, dimensions):\n target = os.path.join(self.workingDir, self.get( 'index_filename'))\n self.info(\"Creating index file {}\".format(target))\n text = \"\"\n for i in range(0,dimensions):\n text+=\"1 \"\n\n util.createScript(target, text)\n return target", "def run_xg_indexing(job, context, inputGraphFileIDs, graph_names, index_name,\n vcf_phasing_file_id = None, tbi_phasing_file_id = None,\n make_gbwt=False, gbwt_regions=[],\n intermediate=False, include_alt_paths=False):\n \n RealtimeLogger.info(\"Starting xg indexing...\")\n start_time = timeit.default_timer()\n \n # Define work directory for docker calls\n work_dir = job.fileStore.getLocalTempDir()\n\n # Scratch directory for indexing\n index_temp_dir = os.path.join(work_dir, 'index-temp')\n os.makedirs(index_temp_dir)\n \n RealtimeLogger.info(\"inputGraphFileIDs: {}\".format(str(inputGraphFileIDs)))\n RealtimeLogger.info(\"graph_names: {}\".format(str(graph_names)))\n # Our local copy of the graphs\n graph_filenames = []\n for i, graph_id in enumerate(inputGraphFileIDs):\n graph_filename = os.path.join(work_dir, graph_names[i])\n job.fileStore.readGlobalFile(graph_id, graph_filename)\n graph_filenames.append(os.path.basename(graph_filename))\n\n # If we have a separate GBWT it will go here\n gbwt_filename = os.path.join(work_dir, \"{}.gbwt\".format(index_name))\n # And if we ahve a separate thread db it will go here\n thread_db_filename = os.path.join(work_dir, \"{}.threads\".format(index_name))\n \n # Get the vcf file for loading phasing info\n if vcf_phasing_file_id:\n phasing_file = os.path.join(work_dir, 'phasing.{}.vcf.gz'.format(index_name))\n job.fileStore.readGlobalFile(vcf_phasing_file_id, phasing_file)\n job.fileStore.readGlobalFile(tbi_phasing_file_id, phasing_file + '.tbi')\n phasing_opts = ['-v', os.path.basename(phasing_file)]\n \n if make_gbwt:\n # Write the haplotype index to its own file\n phasing_opts += ['--gbwt-name', os.path.basename(gbwt_filename)]\n \n for region in gbwt_regions:\n phasing_opts += ['--region', region]\n\n if context.config.force_phasing:\n # We need to discard overlaps also to really get rid of haplotype breaks.\n phasing_opts += ['--force-phasing', '--discard-overlaps']\n else:\n phasing_opts = []\n \n # Where do we put the XG index?\n xg_filename = \"{}.xg\".format(index_name)\n\n # Now run the indexer.\n RealtimeLogger.info(\"XG Indexing {}\".format(str(graph_filenames)))\n\n command = ['vg', 'index', '--threads', str(job.cores), '--xg-name', os.path.basename(xg_filename)]\n command += phasing_opts + graph_filenames\n command += ['--temp-dir', os.path.join('.', os.path.basename(index_temp_dir))]\n\n if include_alt_paths:\n command += ['--xg-alts']\n \n try:\n context.runner.call(job, command, work_dir=work_dir)\n except:\n # Dump everything we need to replicate the index run\n logging.error(\"XG indexing failed. Dumping files.\")\n\n for graph_filename in graph_filenames:\n context.write_output_file(job, os.path.join(work_dir, graph_filename))\n if vcf_phasing_file_id:\n context.write_output_file(job, phasing_file)\n context.write_output_file(job, phasing_file + '.tbi')\n\n raise\n\n # Determine if we want to checkpoint index to output store\n write_function = context.write_intermediate_file if intermediate else context.write_output_file\n xg_file_id = write_function(job, os.path.join(work_dir, xg_filename))\n \n gbwt_file_id = None\n thread_db_file_id = None\n if make_gbwt and vcf_phasing_file_id:\n # Also save the GBWT if it was generated\n gbwt_file_id = write_function(job, gbwt_filename)\n \n end_time = timeit.default_timer()\n run_time = end_time - start_time\n RealtimeLogger.info(\"Finished XG index. Process took {} seconds.\".format(run_time))\n\n # TODO: convert to a dict\n return (xg_file_id, gbwt_file_id)", "def image_path_from_index(self, index):\n raise NotImplementedError", "def make_index(fasta_file, name=\"human.index\"):\n params = {\n \"mods-spec\": \"C+0,K+229.162932,3M+15.99492,3N+0.9840155848\",\n \"nterm-peptide-mods-spec\": \"X+229.162932\",\n \"nterm-protein-mods-spec\": \"1X+42.01056\",\n \"enzyme\": \"trypsin/p\",\n \"missed-cleavages\": str(MISSED_CLEAVAGES),\n \"output-dir\": \"index-out\",\n }\n\n if not os.path.isdir(name):\n search.tide_index(fasta_file, name, **params)\n\n return name", "def index_xml(directory, db):\n xml.index_directory(directory, db)", "def index(backend_name, ids):\n # Load the backend\n backend = get_backend(backend_name)\n # Handle each ID\n for id in ids:\n # See if the volume is already in there\n if config.index.archives(id=id):\n click.secho(f\"{id} already indexed\", fg=\"yellow\")\n continue\n # Fetch the volume's metadata and make an Archive object\n metadata = backend.archive_retrieve_meta(id)\n archive = Archive.from_json(metadata)\n # Index it\n config.index.add_archive(archive, backend_name)\n click.secho(f\"{archive.id} added, with {len(archive.files)} files\")", "def create_web_output_paths() -> None:\n create_path_and_index(\"\")\n create_path_and_index(\"photos/\")\n create_path_and_index(\"video/\")\n create_path_and_index(\"references/\")\n create_path_and_index(\"names/\")\n create_path_and_index(\"art/\")\n create_path_and_index(\"morphology/\")\n create_path_and_index(\"maps/\")\n create_path_and_index(\"images/\")\n create_path_and_index(\"images/flag-icon-css/\")\n create_path_and_index(\"images/flag-icon-css/css/\")\n create_path_and_index(\"images/flag-icon-css/flags/\")\n create_path_and_index(\"images/flag-icon-css/flags/4x3/\")\n create_path_and_index(\"locations/\")\n create_path_and_index(\"locations/keys/\")\n create_path_and_index(\"js/\")\n create_path_and_index(\"sizes/\")\n create_path_and_index(\"handedness/\")", "def index_args():\n return {}", "def get_paths(args):\n log, rest = get_log_path(args)\n out, _ = get_out_path(args)\n temp, _ = get_temp_path(args)\n return log, out, temp, rest", "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path,'query',\n index)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "def index(self,\n path_in: str,\n path_out: str,\n # path_terms: str\n ) -> Tuple[Dict[str, int], Dict[int, str]]:\n self._docs_processed = 0\n self._start_time = time.time()\n\n # terms = set()\n # with open(path_terms, 'r', encoding='utf8') as fin:\n # for line in fin:\n # terms.add(line.strip('\\n'))\n\n word_to_idx = {}\n idx_to_word = {}\n i = 0\n corpus_idx = []\n for doc in get_docs(path_in):\n doc_idx = []\n for sent in doc:\n for word in sent:\n if word not in word_to_idx:\n word_to_idx[word] = i\n idx_to_word[i] = word\n i += 1\n idx_sent = [word_to_idx[word] for word in sent]\n doc_idx.append(idx_sent)\n corpus_idx.append(doc_idx)\n # doc_idx = []\n self._docs_processed += 1\n self._update_cmd_counter()\n\n if self._docs_processed % self._file_write_threshhold == 0:\n self._update_cmd_time_info()\n self.write_corpus(corpus_idx, path_out)\n corpus_idx = []\n\n self._update_cmd_time_info(end=True)\n self.write_corpus(corpus_idx, path_out)\n self._already_written_to_file = False\n return word_to_idx, idx_to_word", "def ids_to_index(self, ids):\n index = (ids[0]*self._div + ids[1])*self.batch_per_file +ids[2]\n return(index)", "def _load_image_set_index(self, anno_filepath):\n # Check\n assert os.path.exists(anno_filepath), \\\n 'Path does not exist: {}'.format(anno_filepath)\n # Open and read\n with open(anno_filepath) as f:\n # format: imgidx x1 y1 x2 y2 label_list\n # whre label list look like this: 0 0 0 0 1 0 0 (assume here has six action classes)\n image_index = [x.strip().split()[0] for x in f.readlines()]\n # \n return image_index", "def test_outpath_multi(tmpdir):\n base = glob.glob(\"%s/dummy/mm0\" % DATA_DIR)[0]\n paths = sorted(glob.glob(base + \"/*.ufo\"))\n # the reference font is modified in-place, make a temp copy first\n referenceSrc = py.path.local(paths[0])\n referenceDst = tmpdir / referenceSrc.basename\n referenceSrc.copy(referenceDst)\n reference = str(referenceDst)\n inpaths = paths[1:]\n outpaths = [str(tmpdir / basename(p)) for p in inpaths]\n\n psautohint(inpaths + ['-o'] + outpaths + ['-r', reference])", "def run_gcsa_indexing(job, context, prune_ids, graph_names, index_name, mapping_id):\n \n RealtimeLogger.info(\"Starting gcsa indexing...\")\n start_time = timeit.default_timer() \n\n # Define work directory for docker calls\n work_dir = job.fileStore.getLocalTempDir()\n\n # Scratch directory for indexing\n index_temp_dir = os.path.join(work_dir, 'index-temp')\n os.makedirs(index_temp_dir)\n\n # Track disk used for files, so we can sensibly limit disk used for GCSA scratch\n disk_used = 0\n\n # Download all the pruned graphs. \n prune_filenames = []\n \n for graph_i, prune_id in enumerate(prune_ids):\n prune_filename = os.path.join(work_dir, remove_ext(os.path.basename(graph_names[graph_i]), '.vg') + '.prune.vg')\n job.fileStore.readGlobalFile(prune_id, prune_filename)\n prune_filenames.append(prune_filename)\n disk_used += prune_id.size\n\n # Download the mapping_id\n mapping_filename = None\n if mapping_id:\n mapping_filename = os.path.join(work_dir, 'node_mapping')\n job.fileStore.readGlobalFile(mapping_id, mapping_filename)\n disk_used += mapping_id.size\n\n # Where do we put the GCSA2 index?\n gcsa_filename = \"{}.gcsa\".format(index_name)\n\n command = ['vg', 'index', '-g', os.path.basename(gcsa_filename)] + context.config.gcsa_opts\n command += ['--threads', str(job.cores)]\n command += ['--temp-dir', os.path.join('.', os.path.basename(index_temp_dir))]\n # TODO: can/should we guess the size of the output file and subtract that here too?\n command += ['--size-limit', str((job.disk - disk_used) // (1024**3))]\n \n if mapping_id:\n command += ['--mapping', os.path.basename(mapping_filename)]\n\n for prune_filename in prune_filenames:\n command += [os.path.basename(prune_filename)]\n\n try:\n context.runner.call(job, command, work_dir=work_dir)\n except:\n # Dump everything we need to replicate the index run\n logging.error(\"GCSA indexing failed. Dumping files.\")\n for prune_filename in prune_filenames:\n context.write_output_file(job, prune_filename)\n if mapping_id:\n context.write_output_file(job, mapping_filename)\n raise\n\n # Checkpoint index to output store\n gcsa_file_id = context.write_output_file(job, os.path.join(work_dir, gcsa_filename))\n lcp_file_id = context.write_output_file(job, os.path.join(work_dir, gcsa_filename) + \".lcp\")\n\n end_time = timeit.default_timer()\n run_time = end_time - start_time\n RealtimeLogger.info(\"Finished GCSA index. Process took {} seconds.\".format(run_time))\n\n return gcsa_file_id, lcp_file_id", "def faiss_index(vectors, ids=None):\n index = faiss.IndexFlatL2(vectors.shape[1])\n if ids:\n index = faiss.IndexIDMap(index)\n index.add_with_ids(vectors, np.array([i for i in ids]))\n else:\n index.add(vectors)\n\n return index", "def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\"", "def create_hypothetical_river_paths_map(riv_dirs,lsmask=None,use_f2py_func=True,\n use_f2py_sparse_iterator=False,nlat=360,nlong=720,\n sparse_fraction=0.5,use_new_method=False):\n\n riv_dirs = np.insert(riv_dirs,obj=0,values=np.zeros(nlong), axis=0)\n #nlat+1 because the array is now already nlat+1 elements wide so you want to place\n #the new row after the last row\n riv_dirs = np.insert(riv_dirs,obj=nlat+1,values=np.zeros(nlong), axis=0)\n if lsmask is not None:\n lsmask = np.insert(lsmask,obj=0,values=np.ones(nlong,dtype=bool), axis=0)\n #nlat+1 because the array is now already nlat+1 elements wide so you want to place\n #the new row after the last row\n lsmask = np.insert(lsmask,obj=nlat+1,values=np.ones(nlong,dtype=bool), axis=0)\n riv_dirs = np.ma.array(riv_dirs,mask=lsmask,copy=True,dtype=int).filled(0)\n else:\n riv_dirs = np.array(riv_dirs,copy=True,dtype=int)\n paths_map = np.zeros((nlat+2,nlong),dtype=np.int32,order='F')\n if use_f2py_func and use_new_method:\n additional_fortran_filenames = [\"algorithms/accumulate_flow_mod.o\",\n \"base/coords_mod.o\",\n \"algorithms/flow_accumulation_algorithm_mod.o\",\n \"base/convert_rdirs_to_indices.o\",\n \"base/doubly_linked_list_mod.o\",\n \"base/doubly_linked_list_link_mod.o\",\n \"base/subfield_mod.o\",\n \"base/unstructured_grid_mod.o\",\n \"base/precision_mod.o\"]\n additional_fortran_filepaths = [path.join(fortran_project_object_path,filename) for filename in\\\n additional_fortran_filenames]\n f2py_mngr = f2py_mg.f2py_manager(path.join(fortran_project_source_path,\n \"drivers\",\n \"accumulate_flow_driver_mod.f90\"),\n func_name=\"accumulate_flow_latlon_f2py_wrapper\",\n additional_fortran_files=additional_fortran_filepaths,\n include_path=fortran_project_include_path)\n paths_map = f2py_mngr.\\\n run_current_function_or_subroutine(np.asfortranarray(riv_dirs),\n *riv_dirs.shape)\n #Make a minor postprocessing correction\n paths_map[np.logical_and(np.logical_or(riv_dirs == 5,\n riv_dirs == 0),\n paths_map == 0)] = 1\n else:\n if use_f2py_func:\n f2py_kernel = f2py_mg.f2py_manager(path.join(fortran_source_path,\n 'mod_iterate_paths_map.f90'),\n func_name='iterate_paths_map')\n iterate_paths_map_function = f2py_kernel.run_current_function_or_subroutine\n else:\n iterate_paths_map_function = iterate_paths_map\n while iterate_paths_map_function(riv_dirs,paths_map,nlat,nlong):\n remaining_points = paths_map.size - np.count_nonzero(paths_map)\n if use_f2py_sparse_iterator and remaining_points/float(paths_map.size) < sparse_fraction:\n f2py_sparse_iterator = f2py_mg.f2py_manager(path.join(fortran_source_path,\n 'mod_iterate_paths_map.f90'),\n func_name='sparse_iterator')\n f2py_sparse_iterator.run_current_function_or_subroutine(riv_dirs,paths_map,nlat,nlong)\n break\n return paths_map[1:-1,:]", "def index_search(files, index, terms):\n\n\n termlist = set()\n\n for i in range(len(terms)):\n for j in range(len(terms[i].split(\" \"))):\n\n termlist.add(terms[i].split(\" \")[j])\n\n indexlist = [index[w] for w in termlist]\n\n intersect = list(set.intersection(*indexlist))\n\n return [files[x] for x in intersect]", "def build_single_file_index(cls, index_path, d):\n index = json.load(open(index_path))\n info_list = cls.list_from_index_path(index_path)\n\n sub_d = d\n for entry in info_list:\n if entry[0] not in sub_d:\n sub_d[entry[0]] = {}\n if entry[1] not in sub_d[entry[0]]:\n sub_d[entry[0]][entry[1]] = {}\n sub_d = sub_d[entry[0]][entry[1]]\n\n current_dir = os.path.dirname(index_path)\n rel_dirname = os.path.relpath(current_dir, paths.db_root)\n if 'files' in index:\n for name, file in list(index['files'].items()):\n sub_d[name] = os.path.join(rel_dirname, file)\n if 'info' in index:\n sub_d.update(index['info'])", "def split_input_dirs(self, paths):\n\n for path in paths:\n yield path", "def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])", "def pathfor( name, **matchdict ) :", "def select_index(index, clientdir=DEFAULT_CLIENTDIR):\n return subprocess.run(['devpi', 'use', '--clientdir', clientdir, index])", "def main(rdirs_filename,output_filename,grid_type,**grid_kwargs):\n\n rdirs = iodriver.load_field(rdirs_filename,\n iodriver.get_file_extension(rdirs_filename),\n \"Generic\", grid_type=grid_type,**grid_kwargs)\n nlat,nlong = rdirs.get_grid().get_grid_dimensions()\n paths_map = field.Field(create_hypothetical_river_paths_map(riv_dirs=rdirs.get_data(),\n lsmask=None,\n use_f2py_func=True,\n use_f2py_sparse_iterator=True,\n nlat=nlat,\n nlong=nlong),\n grid=grid_type,\n **grid_kwargs)\n iodriver.write_field(output_filename,paths_map,\n iodriver.get_file_extension(output_filename))", "def test_add_to_index(koan, assert_index_includes_added_file):\n koan.shell('')\n koan.shell('')\n koan.shell('')", "def create_indexes(create_func):\n\tfor set_name, index_path, index_name in zip(SET_NAMES, INDEX_PATHS, INDEX_NAMES):\n\t\tcreate_func(set_name, index_path, index_name)", "def pathMap(self):\n pass", "def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile", "def get_dataloaders_with_index(path=\"../../data\", batch_size=64, num_labeled=250,\n lbl_idxs=None, unlbl_idxs=None, valid_idxs=None, which_dataset='cifar10', validation=True):\n\n # Define transform to normalize data\n normalize = transforms.Normalize(\n mean=[0.4914, 0.4822, 0.4465],\n std=[0.2023, 0.1994, 0.2010],\n )\n transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n\n if which_dataset == 'cifar10':\n train_set = CustomCIFAR10(root=path, train=True, transform=transform)\n test_set = CustomCIFAR10(root=path, train=False, transform=transform)\n elif which_dataset == 'svhn':\n train_set = datasets.SVHN(root=path, split='train', download=True, transform=transform)\n test_set = datasets.SVHN(root=path, split='test', download=True, transform=transform)\n else:\n raise Exception('Not supported yet')\n\n\n # Split indexes between labeled, unlabeled and validation\n if which_dataset == 'cifar10':\n training_labels = train_set.targets\n elif which_dataset == 'svhn':\n training_labels = train_set.labels\n else :\n training_labels = train_set.targets\n\n if validation:\n train_labeled_idxs, train_unlabeled_idxs, val_idxs = labeled_unlabeled_val_split(training_labels, int(num_labeled / 10))\n else:\n train_labeled_idxs, train_unlabeled_idxs = labeled_unlabeled_split(training_labels, int(num_labeled / 10))\n val_idxs = []\n\n # If indexes are provided, use them\n if lbl_idxs is not None:\n train_labeled_idxs = lbl_idxs\n train_unlabeled_idxs = unlbl_idxs\n val_idxs = valid_idxs\n\n # Define samplers using indexes\n train_labeled_sampler = SubsetRandomSampler(train_labeled_idxs)\n train_unlabeled_sampler = SubsetRandomSampler(train_unlabeled_idxs)\n val_sampler = SubsetRandomSampler(val_idxs)\n\n # Create data loaders\n train_labeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_labeled_sampler, num_workers=0)\n train_unlabeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_unlabeled_sampler, num_workers=0)\n val_loader = DataLoader(train_set, batch_size=batch_size, sampler=val_sampler, num_workers=0)\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=0)\n\n if not validation:\n val_loader = test_loader\n\n return train_labeled_loader, train_unlabeled_loader, val_loader, test_loader, train_labeled_idxs, train_unlabeled_idxs, val_idxs", "def createModuleIndex(metadataPaths):\n merger = Modulemd.ModuleIndexMerger.new()\n for path in metadataPaths:\n i = Modulemd.ModuleIndex.new()\n i.update_from_file(path, True)\n merger.associate_index(i, 0)\n return merger.resolve()", "def create_bam_file_index(infile, outfile):\n statement = 'samtools index %(infile)s %(outfile)s'\n P.run(statement,\n job_queue = P.PARAMS['queue'],\n job_memory = P.PARAMS['memory'])", "def simple_index():\n examples = [\n benchmark.Example(\n inputs=[\n [12, 34, 56, 78],\n -2,\n ],\n output=56,\n ),\n ]\n constants = []\n description = 'Index into a tensor'\n target_program = 'in1[in2]'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_index')", "def index_siteroot(context):\n portal = getSite()\n portal.reindexObject()" ]
[ "0.6537887", "0.63956136", "0.6359709", "0.60461843", "0.58392173", "0.56591266", "0.5648433", "0.5629673", "0.56082153", "0.5570931", "0.5479381", "0.5462276", "0.5429903", "0.54241526", "0.5421316", "0.5394815", "0.5362421", "0.53275514", "0.5321562", "0.5312183", "0.5305858", "0.5297067", "0.5278812", "0.527369", "0.52294123", "0.5229323", "0.5227441", "0.52228075", "0.5213938", "0.52099156", "0.5206594", "0.520439", "0.51701194", "0.51542586", "0.51299876", "0.5127532", "0.51233464", "0.5116692", "0.5116327", "0.51086956", "0.5105839", "0.5093137", "0.50900704", "0.50894356", "0.50879174", "0.5080142", "0.5069008", "0.5064543", "0.5049861", "0.50430435", "0.5042067", "0.50199795", "0.501911", "0.5008093", "0.5008093", "0.5005845", "0.50020635", "0.49963188", "0.49921423", "0.4988158", "0.49842325", "0.498168", "0.4977084", "0.4976882", "0.49547967", "0.49535435", "0.49528468", "0.4943621", "0.4943416", "0.49424198", "0.49415055", "0.49399784", "0.4934942", "0.49251923", "0.49231493", "0.49067506", "0.48841485", "0.48729172", "0.48722395", "0.48696357", "0.48634204", "0.4858279", "0.48569384", "0.48567262", "0.4852493", "0.48513478", "0.48495162", "0.48434794", "0.48397633", "0.48389435", "0.48385924", "0.48369303", "0.48334625", "0.482412", "0.48220947", "0.48048678", "0.48027146", "0.47983277", "0.4795044", "0.4792742" ]
0.57987577
5
maps reads (bowtie to rRNA for legacy?) to extract ambiguous and uniquely mapped reads
def map_reads(SRA): #1. bowtie to rRNA print("Bowtie alignement on contaminant RNA...") cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam' output = subprocess.run(cmd_bowtie, shell=True) # 2. STAR to ref genome print("STAR alignement to yeast genome...") cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_' output = subprocess.run(cmd_STAR, shell=True) # 3. Samtools keep uniquely mapped reads and sort print("Samtools to keep uniquely mapped reads and sort...") cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam' output = subprocess.run(cmd_samtools1, shell=True) cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' output = subprocess.run(cmd_samtools2, shell=True) cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' output = subprocess.run(cmd_samtools3, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True", "def map_reads_2genes(self, reads_file):\n start1 = time()\n read_starts = self.__get_reads_pos(reads_file)\n start2 = time()\n times = 0\n for ref_gene in self.ref_genes:\n times += 1\n if times % 500 == 0:\n print 'calculated %d genes read count ...' % times\n if len(read_starts[ref_gene.chrom]) == 0:\n continue\n starts = read_starts[ref_gene.chrom]\n for es, ed in zip(ref_gene.exon_starts, ref_gene.exon_ends):\n # rd = starts[(starts > es) & (starts < ed)].size\n rd = cal_read_count(es, ed, starts)\n ref_gene.read_count += rd\n\n print 'start calculate rpkm ...'\n mapped_read_count = self.mapped_read_count\n for ref_gene in self.ref_genes:\n # calculate RPKM\n ref_gene.read_density = \\\n ref_gene.read_count * 1000 * 1000 * 1000. / (ref_gene.mRNA_length * mapped_read_count)\n print 'got reads time: %f' % (time() - start1)\n print 'map reads time: %f' % (time() - start2)", "def determine_crossmapped_reads(self, read_alignment_path):\n references_by_species = self._get_references_by_species()\n crossmapped_reads = set()\n done_replicon_comparison = []\n with pysam.AlignmentFile(read_alignment_path) as bam:\n for org, replicon_ids in references_by_species.items():\n for replicon_id in replicon_ids:\n self._read_ids = set()\n # First, collect the ids of the aligned reads of\n # this replicon\n for alignment in bam.fetch(reference=replicon_id):\n self._read_ids.add(alignment.qname)\n # Then compare them to the alignments of each\n # replicon of the other organism(s)\n for (\n comp_org,\n comp_replicon_ids,\n ) in references_by_species.items():\n # Only compare replicons of different species\n if org == comp_org:\n continue\n for comp_replicon_id in comp_replicon_ids:\n comparison = sorted([replicon_id, comp_replicon_id])\n # Check if comparison of the two replicons\n # has been done already\n if comparison in done_replicon_comparison:\n continue\n done_replicon_comparison.append(comparison)\n # Compare all read ids of the comparison\n # replicon to the query replicon read ids\n for alignment in bam.fetch(\n reference=comp_replicon_id\n ):\n if alignment.qname in self._read_ids:\n crossmapped_reads.add(alignment.qname)\n no_of_crossmapped_reads = len(crossmapped_reads)\n return crossmapped_reads", "def get_read_alignments(sam_f):\n sparser = samparser.SamParser(sam_f=sam_f, aligned_only=True, mapq=20, mismatches=1)\n \n # parse all the hits into this to make sure multi mapping hits map to the same contig\n hit_dict = {}\n ambig_reads = 0\n processed_reads = 0\n for hit in sparser.parse_sam_file():\n processed_reads += 1\n if hit_dict.get(hit['qname'], 0):\n if hit_dict[hit['qname']] != hit['rname']:\n print(\"Warning read: {} aligns to two different contigs\".format(hit['qname']), file=sys.stderr)\n ambig_reads += 1\n else:\n continue\n else:\n hit_dict[hit['qname']] = hit['rname']\n\n print(\"{} of {} processed reads were ambiguous.\".format(ambig_reads, processed_reads))\n\n # condense the hit dict into a contig dict\n contig_dict = {}\n for read, contig in hit_dict.items():\n if contig_dict.get(contig, 0):\n contig_dict[contig].append(read)\n else:\n contig_dict[contig] = [read]\n\n return contig_dict", "def _read_miraligner(fn):\n reads = defaultdict(realign)\n with open(fn) as in_handle:\n in_handle.readline()\n for line in in_handle:\n cols = line.strip().split(\"\\t\")\n iso = isomir()\n query_name, seq = cols[1], cols[0]\n chrom, reference_start = cols[-2], cols[3]\n iso.mirna = cols[3]\n subs, add, iso.t5, iso.t3 = cols[6:10]\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _parse_mut(subs), add\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n reads[query_name].set_precursor(chrom, iso)\n return reads", "def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict", "def map_to_mirbase(fastqs, bam_file, sample_id):\n read_groups = ['@RG\\\\tID:{rgid}\\\\tSM:{lb}\\\\tLB:{lb}'\\\n\t\t\t.format(rgid=sample_id+\"_\"+lane_id, lb=sample_id) for lane_id in ['L001', 'L002', 'L003', 'L004']]\n map_reads(fastqs, mirbase_reference, bam_file, read_groups, mapper='bowtie')", "def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list", "def test_extract_read_to_sample_mapping(self):\r\n\r\n labels = [\r\n 'S160_1 E86FECS01DW5V4 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0',\r\n 'S160_2 E86FECS01DW5V5 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0']\r\n\r\n expected = {'E86FECS01DW5V4': 'S160_1',\r\n 'E86FECS01DW5V5': 'S160_2'}\r\n\r\n self.assertEqual(extract_read_to_sample_mapping(labels),\r\n expected)", "def load_data_reps(fasta, bams, regions, features, strains, strains_unique, maxReads=10000, strands=\"+-\", nn=1):\n # get storage\n k = 2*nn+1\n fi = 0\n strain2idx = {s: idx for idx, s in enumerate(strains_unique)}\n region2data = {}\n for ri, (ref, pos, strand) in enumerate(regions, 1):\n if type(strand)==float: strand=\"+\" # sometimes strand is missing, assume +\n start, end = pos-1, pos\n sys.stderr.write(\" %s / %s %s:%s-%s \\r\"%(ri, len(regions), ref, start, end))\n # extend start/end by nn and end by dt_shift\n ##this is for RNA, for DNA start start needs to be -dt_shift\n parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True, \n nn, features, maxReads) for bam in bams]\n refparser = fasta2bases(fasta, ref, start, end, strands)\n for ((pos, _, _strand, refbase, mer), *calls) in zip(refparser, *parsers):\n if _strand==strand:\n sdata = [[], []] #np.hstack(c) for c in calls]\n for c, s in zip(calls, strains): sdata[strain2idx[s]].append(np.hstack(c))\n # merge replicates\n region2data[(ref, pos, strand)] = (mer, [np.vstack(sd) for sd in sdata])\n return region2data", "def test_combine_mappings(self):\r\n\r\n self.tmp_dir = mkdtemp(dir=\"./\", suffix=\"/\")\r\n\r\n combine_mappings(\r\n fasta,\r\n denoiser_mapping,\r\n denoised_seqs,\r\n otu_picker_map,\r\n self.tmp_dir)\r\n\r\n observed_otu_map = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_otu_map.txt\")))\r\n\r\n expected_otu_map = \"\"\"1:\\tS1_1\\tS1_2\\tS2_4\\tS2_5\r\n2:\\tS2_3\\tS1_6\r\n\"\"\"\r\n self.assertEqual(observed_otu_map, expected_otu_map)\r\n\r\n observed_fasta = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_all.fasta\")))\r\n expected_fasta = \"\"\">S1_1 Read1\r\nAAA\r\n>S1_2 Read2\r\nTTT\r\n>S2_3 Read3\r\nGGG\r\n\"\"\"\r\n self.assertEqual(observed_fasta, expected_fasta)", "def look_for_read_in_sim(read, sim_info):\n\t\n\tsim_ints = {}\n\t\n\n\t# look through rows of sim info for matches\n\tfor sim_row in sim_info:\n\t\t\n\t\t# look in chimeric\n\t\tif read['merged']:\n\t\t\t\n\t\t\t# if read was merged, we just want to look for either read 1 or 2 annotated as chimeric\n\t\t\tfor annotated_read in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\t\t\t\n\t\t\tfor annotated_read in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\t\n\t\telse:\n\t\t\t# if read wasn't merged, check for this specific read number\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\n\t\t# look in discordant\n\t\tif read['qname'] in sim_row['left_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_left_discord\"] = sim_row\n\t\t\t\n\t\tif read['qname'] in sim_row['right_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_right_discord\"] = sim_row\n\t\t\t\n\treturn sim_ints", "def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def map_reads(self, qc_dic):\n if self.aligner == \"hisat2\":\n build([hisat2.HisatMapW(fastq_dic=qc_dic, num_cpus=self.num_cpus,\n indexfile=self.hisat_index, workdir=self.workdir,\n kingdom=self.kingdom)],\n local_scheduler=self.local_scheduler)\n elif self.aligner in [\"STAR\", \"star\"]:\n build([star.map_starW(fastq_dic=qc_dic, num_cpus=self.num_cpus,\n stardb_dir=self.stardb_dir, workdir=self.workdir)],\n local_scheduler=self.local_scheduler)", "def _read_pyMatch(fn, precursors):\n with open(fn) as handle:\n reads = defaultdict(realign)\n for line in handle:\n query_name, seq, chrom, reference_start, end, mism, add = line.split()\n reference_start = int(reference_start)\n # chrom = handle.getrname(cols[1])\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso = isomir()\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n if len(iso.subs) > 1:\n continue\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def _read_bam(bam_fn, precursors):\n mode = \"r\" if bam_fn.endswith(\"sam\") else \"rb\"\n handle = pysam.Samfile(bam_fn, mode)\n reads = defaultdict(realign)\n for line in handle:\n chrom = handle.getrname(line.reference_id)\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n query_name = line.query_name\n if query_name not in reads:\n reads[query_name].sequence = line.query_sequence\n iso = isomir()\n iso.align = line\n iso.start = line.reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh,\r\n otu_picker_otu_map_fh, out_dir):\r\n\r\n # read in mapping from split_library file\r\n labels = imap(lambda a_b: a_b[0], parse_fasta(fasta_fh))\r\n # mapping from seq_id to sample_id\r\n sample_id_mapping = extract_read_to_sample_mapping(labels)\r\n\r\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\r\n # read in cd_hit otu map\r\n # and write out combined otu_picker+denoiser map\r\n otu_fh = open(out_dir + \"/denoised_otu_map.txt\", \"w\")\r\n for otu_line in otu_picker_otu_map_fh:\r\n otu_split = otu_line.split()\r\n\r\n otu = otu_split[0]\r\n ids = otu_split[1:]\r\n\r\n get_sample_id = sample_id_mapping.get\r\n # concat lists\r\n # make sure the biggest one is first for pick_repr\r\n all_ids = sort_ids(ids, denoiser_mapping)\r\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\r\n try:\r\n otu_fh.write(\"%s\\t\" % otu +\r\n \"\\t\".join(map(get_sample_id, all_ids)) + \"\\n\")\r\n except TypeError:\r\n # get returns Null if denoiser_mapping id not present in\r\n # sample_id_mapping\r\n print \"Found id in denoiser output, which was not found in split_libraries \" +\\\r\n \"output FASTA file. Wrong file?\"\r\n exit()\r\n\r\n fasta_out_fh = open(out_dir + \"/denoised_all.fasta\", \"w\")\r\n for label, seq in parse_fasta(denoised_seqs_fh):\r\n id = label.split()[0]\r\n newlabel = \"%s %s\" % (sample_id_mapping[id], id)\r\n fasta_out_fh.write(BiologicalSequence(seq, id=newlabel).to_fasta())", "def extract_read_to_sample_mapping(labels):\r\n sample_id_mapping = {}\r\n\r\n re = compile(r'(\\S+) (\\S+)')\r\n for label in labels:\r\n tmatch = search(re, label)\r\n sample_id = tmatch.group(1)\r\n flowgram_id = tmatch.group(2)\r\n sample_id_mapping[flowgram_id] = sample_id\r\n\r\n return sample_id_mapping", "def caricaReadsEsIn(fileInput):\n\n\tidx_gene \t= 4 \n\tidx_chrom \t= 0\n\tidx_start\t= 1\n\tidx_end\t\t= 2\n\tidx_reads\t= 6\n\n\tdictReadsEsIn = {}\n\n\tlines = [x.strip('\\n').split('\\t') for x in open(fileInput)]\n\t\n\tfor riga in lines:\n\t\tgeneName \t= riga[idx_gene]\n\t\tchrom\t\t= riga[idx_chrom]\n\t\tstart\t\t= riga[idx_start]\n\t\tend\t\t\t= riga[idx_end]\n\t\treads\t\t= riga[idx_reads]\n\n\t\tif not geneName in dictReadsEsIn:\n\t\t\tdictReadsEsIn[geneName] = {}\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\t# Il primo campo indica se il cromosoma ha almeno..\n\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..una regione con reads\n\t\telif chrom not in dictReadsEsIn[geneName]:\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\n\t\telse:\n\t\t\tdictReadsEsIn[geneName][chrom][idx_start].append(start)\n\t\t\tdictReadsEsIn[geneName][chrom][idx_end].append(end)\n\t\t\tdictReadsEsIn[geneName][chrom][3].append(reads)\n\n\t\ti = len(dictReadsEsIn[geneName][chrom][3])\n\t\tif int(dictReadsEsIn[geneName][chrom][3][i-1]) != 0:\n\t\t\tdictReadsEsIn[geneName][chrom][0] = True\t\t\t\t\t\t\t# Indica se c'e' almeno una regione esonica/intronica\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# che mappa delle reads\n\n\t# Si eliminano i cromosomi che non hanno mappato reads ne' su introni\n\t# ne' su esoni (primo value del dizionario = FALSE)\n\t#\n\tgeneKeys = dictReadsEsIn.keys()\n\tfor geneName in geneKeys:\n\t\tchromKeys = dictReadsEsIn[geneName].keys()\n\t\tfor chrom in chromKeys:\n\t\t\tif not dictReadsEsIn[geneName][chrom][0]:\n\t\t\t\tdel dictReadsEsIn[geneName][chrom]\n\t\t\t\t# Si eliminano i geni che non hanno piu' cromosomi\n\t\t\t\t#\n\t\t\t\tif not dictReadsEsIn[geneName]:\n\t\t\t\t\tdel dictReadsEsIn[geneName]\n\t\t\t\t\tprint 'Il gene %s non presenta cromosomi con reads mappanti.\\n' % geneName,\n\n\treturn dictReadsEsIn", "def align(aligner, reads):\n counter = 0\n for read in SeqIO.parse(reads, \"fasta\"): \n try:\n alignInfo = next(aligner.map(str(read.seq)))\n print(alignInfo) \n except StopIteration:\n print(read.format(\"fasta\"), end='')", "def test_fastq_map():\n cluster = clust.Clustering.from_fastq(TMP + 'map.fastq', 4, 'ACGT',\n threshold=2, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)\n assert cluster[uid1_expect].size == 5, \"%r != %r\" % (cluster[uid1_expect].size, 5)\n assert cluster[uid2_expect].size == 5, \"%r != %r\" % (cluster[uid2_expect].size, 5)", "def read_bowtie_output(self, filename):\n\t\tself.filenames.append(filename)\n\t\tmatches, read_count, phreds = {}, {}, {}\n\t\tf = open(filename)\n\t\tfor line in f:\n\t\t\traw = line.strip().split()\n\t\t\tif len(raw) == 5:\n\t\t\t\tid, strand, ref_seq_id, offset, seq = raw\n\t\t\t\tqual = [BOWTIE_PHRED_OFFSET] * len(seq) # pretend perfect quality\n\t\t\telse:\n\t\t\t\tid, strand, ref_seq_id, offset, seq, qual = raw[:6]\n\t\t\t\tqual = [ord(x) - BOWTIE_PHRED_OFFSET for x in qual]\n\t\t\tif seq in read_count:\n\t\t\t\tread_count[seq] += 1\n\t\t\t\tphreds[seq] += qual\n\t\t\telse:\n\t\t\t\tread_count[seq] = 1\n\t\t\t\tphreds[seq] = np.array(qual)\n\t\t\t\tmatches[id] = BowTieMatch(id, strand, ref_seq_id, int(offset), Seq(seq), None, None)\n\t\tprint >> sys.stderr, \"removing low quality reads with score < {0}\".format(MIN_PHRED_SCORE)\n\t\tremove_low_quality_for_matched(matches, read_count, phreds, MIN_PHRED_SCORE, None)\n\t\tfor id, m in matches.iteritems():\n\t\t\tgapped_pos = self.refmap.ungapped_to_gapped(m.ref_seq_id, m.offset)\n\t\t\tif gapped_pos not in self.M:\n\t\t\t\tself.M[gapped_pos] = []\n\t\t\tread = Read(id, seq=m.read.tostring(), ref_seq_id=m.ref_seq_id, offset=m.offset, \\\n\t\t\t\t\tcopy=read_count[m.read.tostring()])\n\t\t\tself.M[gapped_pos].append(read)", "def test_read_mapping_file_multiple(reference_multi):\n content, reference = reference_multi\n from_names = list(reference.keys())\n to_names = []\n block_names = []\n\n for k in reference:\n to_names.extend(reference[k].keys())\n for to in reference[k]:\n block_names.extend(reference[k][to].keys())\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n {(0, 'X1'): [(0, 'A')], (0, 'X2'): [(0, 'B')], (0, 'X3'): [(0, 'D')]},\n {(0, 'A'): {(0, 'X1'): 1.0}, (0, 'B'): {(0, 'X2'): 1.0}, (0, 'C'): {(0, 'X2'): 1.0}, (0, 'D'): {(0, 'X3'): 1.0}},\n [])\n mappings = vermouth.map_input.read_backmapping_file(content, force_fields)\n compare_old_new_mappings(mappings, reference)", "def remove_cds_and_remap_reads(self, cds_aln):\n super(GreedySolver, self).remove_cds_and_remap_reads(cds_aln)\n # Dictionary where key is read_id and value is cds alignment to which it maps.\n # If it does not map to any cds alignment then value is None.\n new_read_mappings = {}\n\n for aln_reg in cds_aln.aligned_regions.values():\n if aln_reg.active:\n # Find alternative cds alignment with highest coverage\n best_alt_cds_aln = None\n for alt_cds_aln in self._cds_aln_container.read2cds[aln_reg.read_id]:\n if best_alt_cds_aln == None or self._get_coverage(alt_cds_aln) > self._get_coverage(best_alt_cds_aln): \n best_alt_cds_aln = alt_cds_aln\n # Activate it in best alternative cds alignment (if there is one)\n if (best_alt_cds_aln != None):\n best_alt_cds_aln.aligned_regions[aln_reg.read_id].active = True\n # Add mapping to output dictionary\n new_read_mappings[aln_reg.read_id] = best_alt_cds_aln\n\n # Delete original cds alignment\n del self._cds_aln_container.cds_repository[cds_aln.cds]\n # Remove original cds alignment from read2cds\n for cds_alns in self._cds_aln_container.read2cds.values():\n if cds_aln in cds_alns: cds_alns.remove(cds_aln)\n\n # Force recalculation of coverage for updated cds alignments by forgeting coverage\n for updated_cds_aln in set(filter(lambda x: x != None, new_read_mappings.values())):\n del self._coverages[updated_cds_aln]\n\n return new_read_mappings", "def corrected_records(handle):\n\n seen = coll.defaultdict(set)\n for record in SeqIO.parse(handle, \"fasta\"):\n\n if not str(record.seq):\n continue\n\n # These are probably protein, so skip them\n if record.id.startswith(\"XM_\") or record.id.startswith(\"NM_\"):\n continue\n\n # Change given ids into a probably unique id\n given = record.id.replace(\",\", \"\")\n match = re.search(r\"gene RGD:(\\d+),\", record.description)\n if not match:\n raise ValueError(\"RGD fasta must state gene id: %s\", record.description)\n gene = match.group(1)\n\n match = re.search(\"locus: (.+)$\", record.description)\n if not match:\n raise ValueError(\"RGD fasta must have a locus\")\n location = match.group(1)\n\n record.id = \"{given}-{gene}-{location}\".format(\n given=given,\n gene=gene,\n location=location,\n )\n\n # Prevent writing duplicate entries\n if str(record.seq) in seen[record.id]:\n continue\n\n seen[record.id].add(str(record.seq))\n yield record", "def map_RE(self, index):\n if index is None:\n self.logger.error(\"The bowtie genome index must be specified to \"\n \"map restriction enzyme sites\")\n return None\n self.logger.info(\"Mapping restriction enyzme recognition sites\")\n # Start bowtie as a subprocess\n mapping = subprocess.Popen(\n self.arguments + [index, '-'], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # Send the raw sequence of the DpnII recognition site\n mapping.stdin.write(b'GATC')\n mapping.stdin.close()\n bed = {}\n total = 0\n # Retrieve the alignments from bowtie\n with mapping.stdout as f:\n for line in f:\n line = line.decode('UTF-8').split('\\t')\n chrom, start = line[2], int(line[3])\n stop = start + 4\n if chrom not in bed:\n bed[chrom] = []\n bed[chrom].append((start, stop))\n total += 1\n # Log mapping results\n with mapping.stderr as f:\n for line in f:\n if line[0] == '#':\n continue\n self.logger.debug(line.decode('UTF-8').rstrip('\\n'))\n # Sort chromosome list by name/number\n chroms = numpy.array(list(bed))\n chrints = []\n for i in range(chroms.shape[0]):\n try:\n chrints.append((\n str(int(chroms[i].lstrip('chr'))).rjust(2, '0'),\n chroms[i]))\n except ValueError:\n chrints.append((chroms[i], chroms[i]))\n chrints.sort()\n chroms = []\n for i in range(len(chrints)):\n chroms.append(chrints[i][1])\n self.chroms = numpy.array(chroms)\n self.chr_indices = numpy.zeros(self.chroms.shape[0] + 1,\n dtype=numpy.int32)\n if self.focus is None:\n self.logger.info(\"Defaulting to a fragment-focused analysis\")\n self.focus = 'fragments'\n if self.focus == 'fragments':\n N = total - self.chroms.shape[0]\n else:\n N = total\n # Arrange data into single array with indexed chromosomes\n self.data = numpy.zeros(N, dtype=numpy.dtype([\n ('chr', numpy.int32), ('coords', numpy.int32, (2,)),\n ('treatment', numpy.int32), ('control', numpy.int32),\n ('score', numpy.float64), ('alignable', numpy.bool)]))\n self.data['alignable'].fill(True)\n for i in range(self.chroms.shape[0]):\n chrom = self.chroms[i]\n bed[chrom] = numpy.array(bed[chrom])\n bed[chrom] = bed[chrom][numpy.argsort(bed[chrom][:, 0]), :]\n start = self.chr_indices[i]\n if self.focus == 'fragments':\n self.chr_indices[i + 1] = start + bed[chrom].shape[0] - 1\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, 0] = bed[chrom][:-1, 1]\n self.data['coords'][start:stop, 1] = bed[chrom][1:, 0]\n else:\n self.chr_indices[i + 1] = start + bed[chrom].shape[0]\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, :] = bed[chrom]\n self.data['chr'][start:stop] = i", "def map_STAR(args):\n for type in ['joined', 'merged']:\n for strand in ['watson', 'crick']:\n if strand == 'watson':\n n = 1\n else:\n n = 3\n STAR_index_dir = os.path.join(args.output_dir,'STAR_%s_%s'%(type, strand))\n cmd = \"STAR --runThreadN %s --genomeDir %s\"%(args.threads, STAR_index_dir)\n\n if type == 'merged':\n cmd += \" --readFilesIn %s\" % vars(args)['%s_%s' % (strand, type)]\n else:\n #TODO: define custom parameters for PE reads\n cmd += \" --readFilesIn %s \" % vars(args)['%s_%s_r1' % (strand, type)]\n cmd += \" %s\" % vars(args)['%s_%s_r2' % (strand, type)]\n\n cmd += \" --outSAMattributes NM MD AS --outSAMtype SAM\"\n cmd += \" --outFileNamePrefix %s\" % (os.path.join(args.output_dir,'%s_%s'%(strand, type)))\n cmd += \" --outReadsUnmapped Fastx\" #output of unmapped reads for inspection\n cmd += \" --scoreGapATAC -2 --scoreGapNoncan -2\"\n #outFilterScoreMinOverLread : float: sam as outFilterMatchNmin, but normalized to the read length (sum of mates’ lengths for paired-end reads)\n #outFilterMatchNminOverLread: float: same as outFilterScoreMin, but normalized to read length (sum of mates’ lengths for paired-end reads)\n\n # –outFilterMultimapNmax 1 int: maximum number of loci the read is allowed to map to. Alignments (all of\n # them) will be output only if the read maps to no more loci than this value.\n cmd += \" --outFilterMismatchNoverLmax 0.95\"\n # TODO: implement --alignEndsType endtoend mapping after joined reads are merged\n cmd += \"--outFilterMatchNminOverLread 0.9 --scoreGap -4 \" \\\n \" --alignEndsType EndToEnd\" \\\n \" --alignSoftClipAtReferenceEnds No\" \\\n \" --outSAMorder PairedKeepInputOrder\" \\\n \" --outFilterMultimapNmax 1\" \\\n \" --scoreInsOpen -1\" \\\n #make sure we have a bam file sorted by name\n if args.extraflags:\n cmd += ' %s' % args.extraflags\n log = \"run STAR for % strand on %s reads\"%(strand, type)\n run_subprocess([cmd],args, log)\n log = \"write final log of STAR to normal log\"\n cmd = \"cat %s \" % os.path.join(args.output_dir, '%s_%s' % (strand, type) + 'Log.final.out')\n run_subprocess([cmd], args, log)\n return args", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def run_multimapping(SRA):\n\n if not os.path.exists(\"TMP/ambiguous_reads/\"):\n os.mkdir(\"TMP/ambiguous_reads/\")\n\n cmd_STAR = 'STAR --outSAMtype BAM SortedByCoordinate --runThreadN 8 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n \n # Keep only multi-mapping reads:\n cmd_filter = 'python code/sam_STAR_mapq_filtering.py' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_Aligned.sortedByCoord.out.bam' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' + ' ' + 'all'\n output = subprocess.run(cmd_filter, shell=True)\n\n cmd_samtools2 = 'samtools index' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam'\n output = subprocess.run(cmd_samtools2, shell=True)", "def get_aui_str_mapping(input_file=None):\n input_file = os.path.join(DATA_DIR, \"umls\", \"MRCONSO.RRF\") if input_file is None else input_file\n mapping = {}\n with open(input_file, 'r') as f:\n for line in f:\n line_array = line.split(\"|\")\n if line_array[MRCONSO_SAB_INDEX] == 'MSH' and line_array[MRCONSO_SDUI_INDEX].strip() != \"\":\n mapping[line_array[MRCONSO_AUI_INDEX]] = line_array[MRCONSO_STR_INDEX]\n return mapping", "def read_activity_mappings_both(self):\n with open('oca.translate', \"r\") as file:\n for line in file:\n x = str(str(line).strip()).split(' ', 3)\n self.amappings[x[0]] = x[1]\n self.amappings2[x[0]] = x[2]", "def _update_read_allele_dictionary(self, read_id, pos, allele, allele_type, base_quality):\n ref_alignment_start, ref_alignment_stop, mapping_quality, is_reverse = self.read_info[read_id]\n # filter candidates based on read qualities\n if mapping_quality < MIN_MAP_QUALITY_FOR_CANDIDATE:\n return\n if base_quality < MIN_BASE_QUALITY_FOR_CANDIDATE:\n return\n\n if pos not in self.read_allele_dictionary:\n self.read_allele_dictionary[pos] = {}\n if (allele, type) not in self.read_allele_dictionary[pos]:\n self.read_allele_dictionary[pos][(allele, allele_type)] = 0\n\n self.read_allele_dictionary[pos][(allele, allele_type)] += 1", "def inferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping,\n sequences_mapping, protein_mapping, verbose=False, throwOnMismatch=False, fileType=None):\n import csv, os\n\n if fileType == \"simple\":\n return simpleInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n elif fileType == \"traml\":\n return tramlInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n elif fileType == \"sqmass\":\n return sqlInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n\n nomatch_found = set([])\n for file_nr, f in enumerate(aligned_pg_files):\n header_dict = {}\n if f.endswith('.gz'):\n import gzip \n filehandler = gzip.open(f,'rb')\n else:\n filehandler = open(f)\n reader = csv.reader(filehandler, delimiter=\"\\t\")\n header = next(reader)\n for i,n in enumerate(header):\n header_dict[n] = i\n\n if not \"align_origfilename\" in header_dict or not \"align_runid\" in header_dict:\n\n # Check whether we have a single mzML file and a single result\n # file. If so, simply map these to each other.\n if len(rawdata_files) == 1 and len(aligned_pg_files) == 1:\n mapping[\"0_0\"] = rawdata_files\n return\n\n print (header_dict)\n raise Exception(\"need column header align_origfilename and align_runid\")\n\n for this_row in reader:\n\n if len(this_row) == 0: \n continue\n\n # Get the transition mapping ... \n mapRow(this_row, header_dict, precursors_mapping, sequences_mapping, protein_mapping)\n\n # 1. Get the original filename (find a non-NA entry) and the corresponding run id\n aligned_fname, aligned_id = getAlignedFilename(this_row, header_dict)\n\n if aligned_id is None or aligned_id in mapping:\n continue \n\n # 2. Go through all chromatogram input files and try to find\n # one that matches the one from align_origfilename\n for rfile in rawdata_files:\n\n # 2.1 remove common file endings from the raw data\n rfile_base = os.path.basename(rfile)\n for ending in [\".sqMass\", \".filter\", \".mzML\", \".chrom\"]:\n rfile_base = rfile_base.split(ending)[0]\n\n # 2.3 Check if we have a match\n if aligned_fname == rfile_base:\n if verbose: \n print(\"- Found match:\", os.path.basename(rfile), \"->\", os.path.basename(this_row[ header_dict[\"align_origfilename\"] ]))\n mapping[aligned_id] = [rfile]\n\n if not aligned_id in mapping:\n if True:\n nomatch_found.update( [aligned_fname] )\n if throwOnMismatch:\n raise Exception(\"Mismatch, alignment filename could not be matched to input chromatogram\")\n\n if verbose:\n print(\"- No match found for :\", list(nomatch_found), \"in any of\", \\\n [os.path.basename(rfile) for rfile in rawdata_files])\n print(\"- This may be a bad sign if you expected a match here. You might have \" +\\\n \"to either rename your files to have matching filenames \" +\\\n \"or provide an input yaml file describing the matching in detail.\")", "def count_unique_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)", "def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos", "def find_fast5s_from_ids_readdb(readdb, read_ids, read_dirs, recursive=False):\n for name, fast5 in parse_read_name_map_file(readdb, read_dirs, recursive=recursive):\n if name.split(\"_\")[0] in read_ids:\n yield name, fast5", "def categorize_reads(reads='/home/arleg/ig_construction/alignment/overlapped',\n genes='/home/arleg/ig_construction/alignment/test_genes.fa',\n examples_true=True,\n ceil=101,\n score_function=pairwise2.align.localms,\n *args,\n **kwargs):\n # Initialize containers for output\n mapped = np.zeros(ceil, int)\n unmapped = np.zeros(ceil, int)\n\n # Read sequences of genes and reads\n genes = get_sequences(genes)\n reads = read_sequences(reads)\n\n # Align all genes vs all reads and score alignment.\n # Than allocate this read to mapped or unmapped for all thresholds depending on comparison of score and threshold\n for gene in genes:\n for read in reads:\n score = score_function(read, gene, *args, **kwargs)\n for threshold in range(ceil):\n if score >= threshold:\n mapped[threshold] += 1\n else:\n unmapped[threshold] += 1\n\n # If passed to function reads was really mapped than mapped is a true positives and unmapped is a false negatives,\n # otherwise unmapped is a true negatives and mapped is a false positives\n if examples_true:\n return mapped, unmapped\n return unmapped, mapped", "def seed_and_extend(read, k, h, index, genome):\n\n list_mapping_read = [] # List containing the positions tested to map the read on the genome\n #(will be used to not try to align a read twice at the same position)\n\n # Variables which will be returned\n position_mapping = len(genome) # Optimal position of mapping for the read\n nb_mismatch = int(h) + 1 # Number of mismatch in this mapping\n list_mismatch = [] # List of mismatch positions on the genome\n\n for kmer_index in range(len(read)-int(k)+1):\n kmer = read[kmer_index:kmer_index + int(k)]\n # For each kmer, tries to find the optimal position of mapping\n # for the read with this kmer as seed.\n position_mapping_kmer = len(genome)\n nb_mismatch_kmer = int(h) + 1\n list_mismatch_kmer = []\n\n list_occurences = sorted(index.get_occurences(kmer))\n\n if not list_occurences:\n continue\n\n for occurences in list_occurences:\n\n nb_mismatch_occu = 0 # For each occurence of the kmer,\n # count the number of mismatch during alignment\n\n list_mismatch_occu = [] # List of mismatch seen during alignment\n # of read with this occurence of the kmer\n\n index_char_genome = occurences - kmer_index # Index where to map in the genome\n index_char_read = 0 # Index of the character to compare\n\n if index_char_genome in list_mapping_read: # If position already tested,\n #do not test it a second time.\n continue\n else:\n list_mapping_read.append(index_char_genome) # Add this position to the list\n # so it won't be tested a second time for this read\n\n while nb_mismatch_occu <= int(h) \\\n and index_char_read < len(read) \\\n and index_char_genome < len(genome):\n if genome[index_char_genome] != read[index_char_read]:\n nb_mismatch_occu += 1\n list_mismatch_occu.append(index_char_genome)\n\n index_char_genome += 1\n index_char_read += 1\n\n\n # If the mapping of the read with this occurence of the read\n # is better than the previous one (less mismatch) : optimal values for kmer stored\n if nb_mismatch_occu < nb_mismatch_kmer:\n nb_mismatch_kmer = nb_mismatch_occu\n list_mismatch_kmer = list_mismatch_occu\n position_mapping_kmer = occurences - kmer_index\n\n # If the best mapping found for this kmer is better than the mapping\n # found with the previous kmer : optimal values for read stored\n if nb_mismatch_kmer < nb_mismatch \\\n or nb_mismatch_kmer == nb_mismatch \\\n and position_mapping_kmer < position_mapping:\n nb_mismatch = nb_mismatch_kmer\n list_mismatch = list_mismatch_kmer\n position_mapping = position_mapping_kmer\n\n return position_mapping, nb_mismatch, list_mismatch", "def test_read_mapping_file(case):\n reference = collections.defaultdict(lambda: collections.defaultdict(dict))\n for from_ff, to_ff in itertools.product(case.from_ff, case.to_ff):\n reference[from_ff][to_ff][case.name] = (\n case.mapping, case.weights, case.extra\n )\n\n ffs = case_to_dummy_ffs(case.from_ff + case.to_ff, [case.name], case.mapping,\n case.weights, case.extra)\n\n reference = vermouth.map_input._default_to_dict(reference)\n\n mappings = vermouth.map_input.read_backmapping_file(\n ['[ molecule ]'] + case.string.split('\\n'),\n ffs\n )\n compare_old_new_mappings(mappings, reference)", "def mrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnalen += int(fields[4]) - int(fields[3]) + 1\n accmatch = re.search(r'accession=([^;\\n]+)', fields[8])\n assert accmatch, 'Unable to parse mRNA accession: %s' % fields[8]\n mrnaacc = accmatch.group(1)\n elif entry.startswith('###'):\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'mature mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n values = '%s %d %.3f %.3f %.3f' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0", "def count_single_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)", "def _annotate(reads, mirbase_ref, precursors):\n for r in reads:\n for p in reads[r].precursors:\n start = reads[r].precursors[p].start + 1 # convert to 1base\n end = start + len(reads[r].sequence)\n for mature in mirbase_ref[p]:\n mi = mirbase_ref[p][mature]\n is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p])\n logger.debug((\"{r} {p} {start} {is_iso} {mature} {mi} {mature_s}\").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals()))\n if is_iso:\n reads[r].precursors[p].mirna = mature\n break\n return reads", "def matchmaker(samfile, semaphore=None):\n #reader = DictReader(samfile)\n labels = ['qname', 'flag', 'rname', 'pos', 'mapq', 'cigar', 'rnext', 'pnext',\n 'tlen', 'seq', 'qual']\n cached_rows = {}\n for line in samfile:\n if line.startswith('@'):\n continue # skip header line\n if 'HCV' not in line:\n continue # skip reads that mapped to another reference\n\n items = line.strip('\\n').split('\\t')\n row = dict(zip(labels, items[:11]))\n qname = row['qname']\n old_row = cached_rows.pop(qname, None)\n if old_row is None:\n cached_rows[qname] = row\n else:\n if semaphore is not None:\n semaphore.acquire()\n # current row should be the second read of the pair\n yield old_row, row", "def get_read_properties(line, merged_reads):\n\tparts = line.split('\\t')\n\n\tif int(parts[1]) & 64 != 0:\n\t\tread_num = \"1\"\n\telif int(parts[1]) & 128 != 0:\n\t\tread_num = \"2\"\n\telse:\n\t\traise ValueError(f\"read {read.qname} is neither read1 nor read2, but reads must be paired\")\n\t\n\tif parts[0] in merged_reads:\n\t\tmerged = True\n\telse:\n\t\tmerged = False\n\t\n\treturn {\n\t\t'qname' : parts[0],\t\n\t\t'num' : read_num,\n\t\t'merged' : merged\n\t}", "def get_text_mining_mir_dictionary():\n if logger.getEffectiveLevel() == logging.DEBUG or not os.path.exists(OUT_MIR_ALIAS_FILE):\n __create_mir_alias_dictionary__()\n\n mir_alias_to_identifier = {}\n with gzip.open(OUT_MIR_ALIAS_FILE, 'rb') as mir_alias_file:\n for line in mir_alias_file:\n tax_id, mir_id, mir_alias = line.rstrip('\\r\\n').split('\\t')\n mir_alias_to_identifier[(tax_id, mir_alias)] = mir_id\n return mir_alias_to_identifier", "def __init__(self, reads, fasta_handler, chromosome_name, region_start_position, region_end_position):\n self.region_start_position = region_start_position\n self.region_end_position = region_end_position\n self.chromosome_name = chromosome_name\n self.fasta_handler = fasta_handler\n self.reads = reads\n\n # the store which reads are creating candidates in that position\n self.coverage = defaultdict(int)\n self.rms_mq = defaultdict(int)\n self.mismatch_count = defaultdict(int)\n self.match_count = defaultdict(int)\n\n # the base and the insert dictionary for finding alleles\n self.positional_allele_dictionary = {}\n self.read_allele_dictionary = {}\n self.reference_dictionary = {}\n\n # few new dictionaries for image creation\n self.base_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.insert_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.delete_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.read_info = defaultdict(tuple)\n self.insert_length_info = defaultdict(int)\n self.delete_length_info = defaultdict(int)\n self.positional_read_info = defaultdict(list)\n\n # for image generation\n self.image_row_for_reads = defaultdict(tuple)\n self.image_row_for_ref = defaultdict(list)\n self.positional_info_index_to_position = defaultdict(tuple)\n self.positional_info_position_to_index = defaultdict(tuple)\n self.allele_dictionary = defaultdict(lambda: defaultdict(list))\n self.read_id_by_position = defaultdict(list)", "def read_activity_mappings(self):\n with open('act.translate', \"r\") as file:\n for line in file:\n x = str(str(line).strip()).split(' ', 3)\n self.amappings[x[0]] = x[1]", "def produce_rnaToProtein_refseqID_dict (inPath, outPath):\n idMap = {}\n with open(inPath, 'r') as f:\n next(f)\n for line in f:\n tax_id, gene_id, symbol, rsg, lrg, rna, t, protein, p, category = line.strip().split('\\t')\n if (len(rna) > 0) and (len(protein) > 0):\n idMap[rna] = protein\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)", "def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads", "def count_allbest_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)", "def reference_to_signal_partial_mapping(rb_map_string, reference_location, read_location, contig_name,\n ref_start, bas_start):\n\n a, b = basecall_to_reference_mapping(rb_map_string, ref_start, bas_start)\n f = h5py.File(read_location, 'r')\n grp = np.array(f.get('/Analyses/Basecall_1D_000/BaseCalled_template/Events'))\n bts = base_to_signal_mapping(grp)\n norm_sig = normalized_signal(grp)\n vectors_for_nn = np.array([], dtype=np.int64).reshape(0, cs.NN_VECTOR_LENGTH)\n\n for i in b:\n rs = i[0]\n re = i[1]\n bs = i[2]\n # R=B cast sekvencie\n ref = refrence_sequence_from_interval(reference_location, contig_name, rs, re)\n left_border = int(cs.LENGTH/2 - 2)\n right_border = int(cs.LENGTH/2 + 2)\n ref1 = np.concatenate(create_one_hot(ref))\n\n for x in range(0, len(ref)-cs.LENGTH, 5):\n start = bts[bs+x+left_border]\n end = bts[bs+x+right_border]\n number_of_signals = end - start + 1\n\n if number_of_signals < cs.SIGNAL_LENGTH:\n d = int((cs.SIGNAL_LENGTH - number_of_signals) / 2)\n signal_relevant_start = bs+x+left_border - d\n signal_relevant_end = bs+x + left_border + number_of_signals + d - 1 \\\n if number_of_signals + 2*d == cs.SIGNAL_LENGTH else \\\n bs + x + left_border + number_of_signals + d\n else:\n continue\n\n signal_relevant = []\n [signal_relevant.append(x) for x in norm_sig[signal_relevant_start:signal_relevant_end+1]]\n id_sig, std = ideal_signal_for_sequence(ref[x:x+cs.LENGTH])\n help_con = np.concatenate((ref1[4*x:4*(x+cs.LENGTH)], np.array(signal_relevant)), axis=0)\n help_con = np.concatenate((help_con, id_sig), axis=0)\n help_con = np.concatenate((help_con, [std]), axis=0)\n\n if len(help_con) != cs.NN_VECTOR_LENGTH:\n break\n vectors_for_nn = np.append(vectors_for_nn, help_con[None, :], axis=0)\n\n return vectors_for_nn", "def load_data_stoichometry(fasta, bams, regions, features, samples, fracs, \n maxReads=1000, strands=\"+-\", nn=1):\n # get storage\n k = 2*nn+1\n fi = 0\n sam = pysam.AlignmentFile(bams[0])\n region2data = {}\n sample2idx = {s: i for i, s in enumerate(samples)}; print(sample2idx)\n for ri, (ref, pos, mt) in enumerate(regions, 1):\n sys.stderr.write(\" %s / %s %s:%s \\r\"%(ri, len(regions), ref, pos))\n start, end = pos-1, pos\n # extend start/end by nn and end by dt_shift\n ##this is for RNA, for DNA start start needs to be -dt_shift\n parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True, \n nn, features, maxReads) for bam in bams]\n refparser = fasta2bases(fasta, ref, start, end, strands)\n for ((pos, _, strand, refbase, mer), *calls) in zip(refparser, *parsers):\n if strand==\"+\":\n sample2data = [np.hstack(c) for c in calls]\n # get min number of reads\n max_reads = int(min(map(len, sample2data))/3)#; print(ref, pos, mt, max_reads, [s.shape for s in sample2data])\n # first get 2 fully unmodified and 1 fully modified sample - those reads won't be used later on\n data_frac = [sample2data[sample2idx[mt]][max_reads:2*max_reads], # this will be used as 0 sample\n sample2data[sample2idx[mt]][-max_reads:], sample2data[sample2idx[\"wt\"]][-max_reads:], # those two will be training set\n ] \n # the get samples with given fractions of modified reads\n data_frac += [get_data_mix(sample2data[sample2idx[mt]], \n sample2data[sample2idx[\"wt\"]], frac, max_reads) \n for frac in fracs]\n region2data[(ref, pos)] = (mer, data_frac)\n return region2data", "def sam_parsed(sam_file):\n\n sam_file= open(sam_file)\n\n sam_dic = {}\n read_frame_dic ={}\n count = 0\n counter_1 = 0\n counter_2 = 0\n #.sam file parsed - crucial information was retrited (scaffold information)\n # start - the starting position of the locus_sequence\n #reading_frame - locus in the correct sense [0] or CR [16]\n #sequence_locus - locus sequence information\n\n for line in sam_file:\n\n if line.startswith(\"@\"):\n pass\n\n else:\n line_information = line.strip().split()\n scaffold = line_information[2]\n loci = line_information[0]\n mapping_beginning = line_information[3]\n read_frame = line_information [1]\n locus_sequence = line_information [9]\n cigar = line_information [5]\n if \"D\" in cigar or \"I\" in cigar:\n count += 1\n if \"D\" in cigar and \"I\" in cigar:\n counter_2 +=1\n a = count - counter_2\n if scaffold != \"*\":\n sam_dic[loci] = {\"scaffold\": scaffold,\n \"start\": int(mapping_beginning),\n \"reading_frame\": read_frame,\n \"sequence_locus\": locus_sequence,\n \"cigar\": cigar}\n counter_1 +=1\n print (\"Number of loci mappead on Cg: {}\".format(len(sam_dic)))\n\n print (\"Step 1 - Parse the .sam file -- Done\")\n\n #The sam_dic return a dictionary where the key is the locus(read) and the\n #value has the scaffold information, the position of the gene beginin,\n #the correct read frame of the gene, and finally the sequence of locus, in\n #the same reading frame of the Cg\n\n # \n # print (\"Number of locus with insertion or deletion \" + str(count))\n # print (\"Number of locus with insertion and deletion \" + str(counter_2))\n # print (\"Number of locus with problems \" + str(a))\n return sam_dic", "def parse_read_name_map_file(read_map, directories, recursive=False):\n if read_map.endswith(\"readdb\"):\n name_index = 0\n path_index = 1\n else:\n name_index = 1\n path_index = 0\n for dir_path in directories:\n assert os.path.isdir(dir_path), \"Path provided does not exist or is not a directory: {}\".format(dir_path)\n with open(read_map, 'r') as fh:\n for line in fh:\n split_line = line.split()\n if len(split_line) == 2:\n for dir_path in directories:\n if recursive:\n directories2 = get_all_sub_directories(dir_path)\n for dir_path2 in directories2:\n full_path = os.path.join(dir_path2, split_line[path_index])\n if os.path.exists(full_path):\n yield split_line[name_index], os.path.abspath(full_path)\n else:\n full_path = os.path.join(dir_path, split_line[path_index])\n if os.path.exists(full_path):\n yield split_line[name_index], os.path.abspath(full_path)", "def process(\n self,\n name_and_reads: Tuple[str, Iterable[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, reads = name_and_reads[0], list(name_and_reads[1])\n # Note, examples will only be included in one of the initial counters since\n # we are returning early.\n if not reads:\n self.no_reads_counter.inc()\n return\n\n # Do not error for labels that have multiple alignments to correct molecule.\n # One of the alignments may be a supplementary alignment.\n if self.is_label and len(reads) > 1:\n logging.info('Unexpected: %d labels for %s', len(reads),\n reads[0].fragment_name)\n self.multiple_alignments_counter.inc()\n\n reads_copy = copy.deepcopy(reads)\n for read in reads_copy:\n assert read.aligned_sequence\n base_index = 0\n expanded_sequence = ''\n expanded_cigar_str = ''\n new_cigar_ops = []\n if not self.is_label:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n\n for op in read.alignment.cigar:\n # Skip over ops we don't want, such as soft clips.\n if op.operation not in dc_constants.OPS_TO_CONSIDER:\n base_index += op.operation_length\n continue\n if op.operation in dc_constants.READ_ADVANCING_OPS:\n start = base_index\n end = start + op.operation_length\n expanded_sequence += read.aligned_sequence[start:end]\n base_index += op.operation_length\n if not self.is_label:\n new_pw += pw[start:end]\n new_ip += ip[start:end]\n else:\n # Add a special token in sequence where we have deletion.\n expanded_sequence += dc_constants.GAP_OR_PAD * op.operation_length\n\n new_cigar_ops.append(op)\n op_char = cigar_utils.CIGAR_OPS_TO_CHAR[op.operation]\n expanded_cigar_str += op_char * op.operation_length\n\n # Update the read sequence.\n read.aligned_sequence = expanded_sequence\n assert len(read.aligned_sequence) == len(expanded_cigar_str)\n\n # Update the read cigar to only include ops that were kept.\n del read.alignment.cigar[:]\n read.alignment.cigar.extend(new_cigar_ops)\n\n # Save pw, ip, and expanded cigar string to be used downstream.\n if not self.is_label:\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n # PW and IP won't be the same length as read.aligned_sequence here\n # because we haven't yet spaced out PW and IP based on gaps/padding.\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n expanded_cigar_str)\n yield name, reads_copy", "def combine_genome_sequences(consensus_file):\n combined_genomes = {}\n with open(consensus_file) as f:\n for line in f:\n sequence = json.loads(line)\n uuid = sequence.pop(\"sample_identifier\")\n reference_organism = sequence.pop(\"organism\").lower()\n if combined_genomes.get(uuid):\n if combined_genomes[uuid].get(reference_organism):\n combined_genomes[uuid][reference_organism][\"masked_consensus\"].append(sequence)\n else:\n combined_genomes[uuid].update({reference_organism: {\"masked_consensus\": [sequence]}})\n else:\n combined_genomes.update({uuid: {reference_organism: {\"masked_consensus\": [sequence]}}})\n f.close()\n return combined_genomes", "def check_map(infile, disable_primer_check, barcode_type=\"golay_12\",\r\n added_demultiplex_field=None, has_barcodes=True):\r\n\r\n if barcode_type == \"variable_length\":\r\n var_len_barcodes = True\r\n else:\r\n var_len_barcodes = False\r\n\r\n if barcode_type == \"0\":\r\n has_barcodes = False\r\n\r\n # hds, id_map, dsp, run_description, errors, warnings\r\n hds, mapping_data, run_description, errors, warnings = \\\r\n process_id_map(infile, has_barcodes=has_barcodes,\r\n disable_primer_check=disable_primer_check,\r\n added_demultiplex_field=added_demultiplex_field,\r\n variable_len_barcodes=var_len_barcodes)\r\n\r\n if errors:\r\n raise ValueError('Errors were found with mapping file, ' +\r\n 'please run validate_mapping_file.py to ' +\r\n 'identify problems.')\r\n\r\n id_map = {}\r\n\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]] = {}\r\n\r\n for header in range(len(hds)):\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]][hds[header]] = curr_data[header]\r\n\r\n barcode_to_sample_id = {}\r\n\r\n primer_seqs_lens = {}\r\n all_primers = {}\r\n\r\n for sample_id, sample in id_map.items():\r\n if added_demultiplex_field:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper() + \",\" +\r\n sample[added_demultiplex_field]] = sample_id\r\n else:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper()] = sample_id\r\n if not disable_primer_check:\r\n raw_primers = sample['LinkerPrimerSequence'].upper().split(',')\r\n\r\n if len(raw_primers[0].strip()) == 0:\r\n raise ValueError('No primers detected, please use the ' +\r\n '-p parameter to disable primer detection.')\r\n expanded_primers = expand_degeneracies(raw_primers)\r\n curr_bc_primers = {}\r\n for primer in expanded_primers:\r\n curr_bc_primers[primer] = len(primer)\r\n all_primers[primer] = len(primer)\r\n primer_seqs_lens[sample['BarcodeSequence']] = curr_bc_primers\r\n\r\n return hds, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers", "def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i", "def get_aui_sdui_mapping(input_file=None):\n input_file = os.path.join(DATA_DIR, \"umls\", \"MRCONSO.RRF\") if input_file is None else input_file\n mapping = {}\n with open(input_file, 'r') as f:\n for line in f:\n line_array = line.split(\"|\")\n if line_array[MRCONSO_SAB_INDEX] == 'MSH' and line_array[MRCONSO_SDUI_INDEX].strip() != \"\":\n mapping[line_array[MRCONSO_AUI_INDEX]] = line_array[MRCONSO_SDUI_INDEX]\n return mapping", "def load_data(fasta, bams, regions, features, max_reads=1000, strands=\"+-\", nn=1):\n # get storage\n k = 2*nn+1\n fi = 0\n sam = pysam.AlignmentFile(bams[0])\n region2data = {}\n for ri, (ref, pos, _) in enumerate(regions, 1):\n sys.stderr.write(\" %s / %s %s:%s \\r\"%(ri, len(regions), ref, pos))\n start, end = pos-1, pos\n # extend start/end by nn and end by dt_shift\n ##this is for RNA, for DNA start start needs to be -dt_shift\n parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True, \n nn, features, max_reads) for bam in bams]\n refparser = fasta2bases(fasta, ref, start, end, strands)\n for ((pos, _, strand, refbase, mer), *calls) in zip(refparser, *parsers):\n if strand==\"+\":\n region2data[(ref, pos)] = (mer, [np.hstack(c) for c in calls])\n return region2data", "def readMappedData(options,phase):\n whole_mapped_data={}\n mapped_data_per_size_per_register={}\n alignment_filename=options.output_directory+\"/\"+options.input_filename+\"_bowtie1.bwt\"\n fhr=open(alignment_filename,\"r\")\n for line in fhr:\n try:\n read_id, strand, chromosome, coordinate, sequence, quality, mapped_times = line.strip().split()\n except ValueError:\n print(line)\n continue\n try:\n coordinate=int(coordinate)\n mapped_times=int(mapped_times)+1\n length=len(sequence)\n except ValueError:\n print(line)\n continue\n if strand==\"-\":\n coordinate+=2\n if chromosome not in whole_mapped_data:\n whole_mapped_data[chromosome]={}\n if coordinate not in whole_mapped_data[chromosome]: \n whole_mapped_data[chromosome][coordinate]=0\n whole_mapped_data[chromosome][coordinate]+=1\n \n if phase!=length:\n continue\n if chromosome not in mapped_data_per_size_per_register:\n mapped_data_per_size_per_register[chromosome]={}\n register=coordinate % length\n if register not in mapped_data_per_size_per_register[chromosome]:\n mapped_data_per_size_per_register[chromosome][register]={}\n if coordinate not in mapped_data_per_size_per_register[chromosome][register]:\n mapped_data_per_size_per_register[chromosome][register][coordinate]=0\n mapped_data_per_size_per_register[chromosome][register][coordinate]+=1\n if mapped_data_per_size_per_register[chromosome][register][coordinate]>2:\n print(\"Trouble with alignments\",length,chromosome,register,coordinate)\n \n return whole_mapped_data,mapped_data_per_size_per_register", "def premrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnaacc = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n mrnalen = int(fields[4]) - int(fields[3]) + 1\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'pre-mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n mrnaacc = ''\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n elif '\\texon\\t' in entry:\n exoncount += 1\n elif '\\tintron\\t' in entry:\n introncount += 1\n elif '\\tfive_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr5plen += int(fields[4]) - int(fields[3]) + 1\n elif '\\tthree_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr3plen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if mrnaacc != '':\n values = '%s %d %.3f %.3f %.3f %d %d %d %d' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent,\n exoncount, introncount, utr5plen, utr3plen)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n exonlen = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0", "def mapSmallRNAReadsToGenomeUsingBowtie1(options):\n # Generate the bowtie index if one is not provided\n if options.bowtie_index==None:\n cmd=\"lib/bowtie/bowtie-build\"\n cmd+=\" --threads \"+options.CPU+\" \"\n cmd+=options.genome+\" \"\n cmd+=options.output_directory+\"/bowtie1_index\"\n os.system(cmd)\n bowtie1_index=options.output_directory+\"/bowtie1_index\"\n else:\n bowtie1_index=options.bowtie_index\n \n if os.path.exists(bowtie1_index+\".1.ebwtl\")==False:\n large_index=0\n else:\n large_index=1\n \n cmd=\"lib/bowtie/bowtie \"\n if large_index==1:\n cmd+=\" --large-index \"\n cmd+=\" -f -m \"\n cmd+=str(options.map_limit)\n cmd+=\" -v 0 -a -p \"+options.CPU+\" \"\n cmd+=bowtie1_index+\" \"\n cmd+=options.consolidated_filename+\" \"\n cmd+=\" \"+options.output_directory+\"/\"+options.input_filename+\"_bowtie1.bwt \"\n cmd+=\" 2> \"+options.output_directory+\"/\"+options.input_filename+\"_bowtie1.alignment \"\n os.system(cmd)", "def _do_mapping(self):\n pass", "def remap(bamfn, threads, bwaref):\n sai1fn = bamfn + \".1.sai\"\n sai2fn = bamfn + \".2.sai\"\n samfn = bamfn + \".sam\"\n refidx = bwaref + \".fai\"\n\n sai1args = ['bwa', 'aln', bwaref, '-q', '5', '-l', '32', '-k', '3', '-t', str(threads), '-o', '1', '-f', sai1fn, '-b1', bamfn]\n sai2args = ['bwa', 'aln', bwaref, '-q', '5', '-l', '32', '-k', '3', '-t', str(threads), '-o', '1', '-f', sai2fn, '-b2', bamfn]\n samargs = ['bwa', 'sampe', '-P', '-f', samfn, bwaref, sai1fn, sai2fn, bamfn, bamfn]\n bamargs = ['samtools', 'view', '-bt', refidx, '-o', bamfn, samfn] \n\n print \"mapping 1st end, cmd: \" + \" \".join(sai1args)\n subprocess.call(sai1args)\n print \"mapping 2nd end, cmd: \" + \" \".join(sai2args)\n subprocess.call(sai2args)\n print \"pairing ends, building .sam, cmd: \" + \" \".join(samargs)\n subprocess.call(samargs)\n print \"sam --> bam, cmd: \" + \" \".join(bamargs)\n subprocess.call(bamargs)\n\n sortbase = bamfn + \".sort\"\n sortfn = sortbase + \".bam\"\n sortargs = ['samtools','sort','-m','10000000000',bamfn,sortbase]\n print \"sorting, cmd: \" + \" \".join(sortargs)\n subprocess.call(sortargs)\n os.rename(sortfn,bamfn)\n\n indexargs = ['samtools','index',bamfn]\n print \"indexing, cmd: \" + \" \".join(indexargs)\n subprocess.call(indexargs)\n\n # cleanup\n os.remove(sai1fn)\n os.remove(sai2fn)\n os.remove(samfn)", "def GenomeReader(GenomeFile):\n GenomeScaffolds = {}\n key = []\n with open(GenomeFile, 'r') as f:\n for line in f:\n line = line.strip()\n if line.startswith(\">\"):\n NamedSeq = line.replace('>', '')\n key.append(NamedSeq)\n GenomeScaffolds[NamedSeq] = \"\"\n else:\n GenomeScaffolds[NamedSeq] += line\n return GenomeScaffolds # Returns a Dictionary object", "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def process_reads(self, ref, reads, ref_name, ref_offset):\n # A list of candidate positions mapping to their number of supporting reads.\n candidates = defaultdict(int)\n\n for read in reads:\n for ref_pos in self.process_read(ref, read, ref_offset):\n candidates[ref_pos] += 1\n return self.windows(candidates, ref_name, ref_offset)", "def get_chromosome_reads(bam):\n stats = bam.get_index_statistics()\n mapped_reads = {}\n for stat in stats:\n mapped_reads[stat[0]] = [stat[1], stat[2], stat[3]]\n if stat[2] != 0:\n warnings.warn(\"Unmapped reads found in chromosome \" + stat[0])\n\n return mapped_reads", "def get_reverse_primers(id_map):\r\n\r\n rev_primers = {}\r\n for n in id_map.items():\r\n # Generate a dictionary with Barcode:reverse primer\r\n # Convert to reverse complement of the primer so its in the\r\n # proper orientation with the input fasta sequences\r\n rev_primers[n[1]['BarcodeSequence']] =\\\r\n [str(DNASequence(curr_rev_primer).rc()) for curr_rev_primer in\r\n (n[1]['ReversePrimer']).split(',')]\r\n\r\n return rev_primers", "def find(self, read, aa=None):\n aa = aa or ['C']\n\n for i, base in enumerate(read.sequence):\n if base in aa:\n yield Landmark(self.NAME, self.SYMBOL, i, 1)", "def code_mapper(file, idx):\n with open('./I94_SAS_Labels_Descriptions.SAS') as f:\n f_content = f.read()\n f_content = f_content.replace('\\t', '')\n f_content2 = f_content[f_content.index(idx):]\n f_content2 = f_content2[:f_content2.index(';')].split('\\n')\n f_content2 = [i.replace(\"'\", \"\") for i in f_content2]\n dic = [i.split('=') for i in f_content2[1:]]\n dic = dict([i[0].strip(), i[1].strip()] for i in dic if len(i) == 2)\n return dic", "def map_secondary_2_primary_ANs(ids_2_map, Secondary_2_Primary_IDs_dict=None, read_from_flat_files=False):\n if Secondary_2_Primary_IDs_dict is None:\n ### don't read this from flat files (VERY slow) if there is a DB and low_memory then use DB\n Secondary_2_Primary_IDs_dict = get_Secondary_2_Primary_IDs_dict_from_sec(ids_2_map, read_from_flat_files)\n Secondary_2_Primary_IDs_dict_userquery = {}\n for id_ in ids_2_map:\n try:\n prim = Secondary_2_Primary_IDs_dict[id_]\n except KeyError:\n prim = False\n if prim:\n Secondary_2_Primary_IDs_dict_userquery[id_] = prim\n return Secondary_2_Primary_IDs_dict_userquery", "def build_inverse_barcode_map(seqs):\r\n inverse_map = {}\r\n map_count = defaultdict(int)\r\n for (label, seq) in seqs:\r\n (map_id, seq_id) = label.split()[:2]\r\n map_id = map_id.split(\"_\")[0]\r\n inverse_map[seq_id] = map_id\r\n map_count[map_id] += 1\r\n\r\n return (inverse_map, map_count)", "def map():", "def _assign_reads( medians, centroids ):\n log.info(\"Assigning subreads reads to the closet amplicon cluster\")\n assignments = {'5p':set(), '3p':set()}\n five_prime, three_prime = centroids\n for read, median in medians.iteritems():\n five_prime_diff = abs(median - five_prime)\n three_prime_diff = abs(median - three_prime)\n if five_prime_diff < three_prime_diff:\n assignments['5p'].add( read )\n else:\n assignments['3p'].add( read )\n return assignments", "def get_mapper(p_assemblyreport, id_from=None, id_to='sn'):\n\n # find correct colum for convertion\n id2col = {'sn':0,\n 'gb':5,\n 'rs':6,\n 'au':7,\n 'uc':9}\n to_col = id2col[id_to]\n\n # format specified by user\n if id_from:\n from_col = id2col[id_from]\n # guess format\n else:\n l_id_from = [id for id in id2col if id!= id_from]\n\n\n\n d_from2to = {}\n with open(p_assemblyreport)as f:\n for line in f:\n if line.startswith('#'):\n continue\n\n sp = line.split('\\t')\n\n\n try:\n id_to = sp[to_col]\n except:\n id_to = 'NA'\n\n # user specified the id_from\n if id_from:\n cur_id_from = sp[from_col]\n d_from2to[cur_id_from] = [id_to, id_from]\n\n # guessing mode\n else:\n for ite_id_from in l_id_from:\n cur_from_col = id2col[ite_id_from]\n\n\n try:\n cur_id_from = sp[cur_from_col]\n except:\n cur_id_from = 'NA'\n\n d_from2to[cur_id_from] = [id_to, ite_id_from]\n\n\n\n\n return d_from2to", "def TranslateRNA(rna):\n rna = rna.lower().replace('\\n', '').replace(' ', '')\n \n ### codon table ###\n \n bases = [ 'u', 'c', 'a', 'g']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n aminoacids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n codon_table = dict(zip(codons, aminoacids))\n \n ### codon lookup ### \n \n pos = 0\n protein = ''\n while pos < len(rna)-2:\n codon = rna[pos:pos+3]\n for key in codon_table:\n if codon == key:\n if codon_table[key] != '*':\n protein = protein + codon_table[key]\n pos +=3\n else:\n pos +=3\n break \n return (protein)", "def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping", "def test_read_mapping_directory(ref_mapping_directory):\n dirpath, ref_mappings = ref_mapping_directory\n from_names = list(ref_mappings.keys())\n to_names = []\n block_names = []\n mapping = {}\n weights = {}\n\n\n for k in ref_mappings:\n to_names.extend(ref_mappings[k].keys())\n for to in ref_mappings[k]:\n block_names.extend(ref_mappings[k][to].keys())\n for block_name in ref_mappings[k][to]:\n m, w, _ = ref_mappings[k][to][block_name]\n mapping.update(m)\n weights.update(w)\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n mapping, weights, [])\n\n\n mappings = vermouth.map_input.read_mapping_directory(dirpath, force_fields)\n compare_old_new_mappings(mappings, ref_mappings)", "def read_fasta_to_dictionary(genome_file):\n filename = genome_file\n dct = {}\n\n id_name = \"\"\n sequence = \"\"\n first_pass = 1\n\n read_fh = open(filename, 'r')\n for i, line in enumerate(read_fh):\n line = line.rstrip()\n if re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r',', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n\n elif re.search(r'^>(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r'(\\d+)_', \"\", id_name)\n id_name = re.sub(r'.*\\|', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n else:\n sequence += line\n dct[id_name] = sequence\n\n return dct", "def process(self,\n read: reads_pb2.Read) -> Iterable[Tuple[str, reads_pb2.Read]]:\n pacbio_molecule_name = preprocess_utils.get_pacbio_molecule_name(\n read.fragment_name)\n if pacbio_molecule_name is not None:\n yield pacbio_molecule_name, read\n else:\n raise ValueError(str(read))", "def test_get_representatives(self):\r\n\r\n result = \"\"\">1: 5\r\nABABABA\r\n>3: 1\r\nBABA\r\n>4: 1\r\nABABAA\r\n>8: 2\r\nBABBA\r\n\"\"\"\r\n seqs = self.data.iteritems\r\n mapping = self.mapping\r\n test_result = list(get_representatives(mapping, seqs()))\r\n test_result_as_fasta = \"\".join(\r\n map(lambda a: a.to_fasta(), test_result))\r\n\r\n self.assertEqual(test_result_as_fasta, result)\r\n\r\n # another example\r\n mapping = {'1': ('a', 'b', 'c'),\r\n '2': ('d', 'e', 'f')}\r\n seqs = [('1', \"ACGT\"), ('2', \"TAGC\"), ('a', \"TTTTT\")]\r\n\r\n observed = list(get_representatives(mapping, seqs))\r\n expected = [BiologicalSequence(\"ACGT\", id=\"1\"),\r\n BiologicalSequence(\"TAGC\", id='2')]\r\n self.assertEqual(observed, expected)", "def get_primers(header,\r\n mapping_data):\r\n\r\n if \"LinkerPrimerSequence\" in header:\r\n primer_ix = header.index(\"LinkerPrimerSequence\")\r\n else:\r\n raise IndexError(\r\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\r\n if \"ReversePrimer\" in header:\r\n rev_primer_ix = header.index(\"ReversePrimer\")\r\n else:\r\n raise IndexError((\"Mapping file is missing ReversePrimer field.\"))\r\n\r\n iupac = {'A': 'A', 'T': 'T', 'G': 'G', 'C': 'C', 'R': '[AG]', 'Y': '[CT]',\r\n 'S': '[GC]', 'W': '[AT]', 'K': '[GT]', 'M': '[AC]', 'B': '[CGT]',\r\n 'D': '[AGT]', 'H': '[ACT]', 'V': '[ACG]', 'N': '[ACGT]'}\r\n\r\n raw_forward_primers = set([])\r\n raw_forward_rc_primers = set([])\r\n raw_reverse_primers = set([])\r\n raw_reverse_rc_primers = set([])\r\n\r\n for line in mapping_data:\r\n # Split on commas to handle pool of primers\r\n raw_forward_primers.update([upper(primer).strip() for\r\n primer in line[primer_ix].split(',')])\r\n raw_forward_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_forward_primers])\r\n raw_reverse_primers.update([upper(primer).strip() for\r\n primer in line[rev_primer_ix].split(',')])\r\n raw_reverse_rc_primers.update([str(DNA(primer).rc()) for\r\n primer in raw_reverse_primers])\r\n\r\n if not raw_forward_primers:\r\n raise ValueError((\"No forward primers detected in mapping file.\"))\r\n if not raw_reverse_primers:\r\n raise ValueError((\"No reverse primers detected in mapping file.\"))\r\n\r\n # Finding the forward primers, or rc of reverse primers indicates forward\r\n # read. Finding the reverse primer, or rc of the forward primers, indicates\r\n # the reverse read, so these sets are merged.\r\n raw_forward_primers.update(raw_reverse_rc_primers)\r\n raw_reverse_primers.update(raw_forward_rc_primers)\r\n\r\n forward_primers = []\r\n reverse_primers = []\r\n for curr_primer in raw_forward_primers:\r\n forward_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n for curr_primer in raw_reverse_primers:\r\n reverse_primers.append(compile(''.join([iupac[symbol] for\r\n symbol in curr_primer])))\r\n\r\n return forward_primers, reverse_primers", "def Translate(self):\n dna_to_protein = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n \n length = self.length\n reading = {}\n for i in range(3):\n reading['frame_'+str(i+1)] = tuple([dna_to_protein[self.sequence[index:index+3]] for index in range(i,length-2,3)])\n reverse_strand = Analyze_DNA_Sequence.Complementary(self,'5-3')\n for i in range(3):\n reading['frame_'+str(i+4)] = tuple([dna_to_protein[reverse_strand[index:index+3]] for index in range(i,length-2,3)])\n\n return reading", "def read_uc(input, tax_separator, tax_sense):\n with open(input, 'r') as uc_file:\n dict_taxs = {}\n for line in uc_file:\n line = line.strip().split()\n is_hit = line[0] == 'H' # check if line is for a hit or for a no hit ('H' vs 'N', respectively)\n if is_hit:\n read_id = line[8] # take read id, located in 9th column of the file\n if tax_sense == 'asc':\n taxonomy = line[9].split(tax_separator) # take taxonomy column and split it\n elif tax_sense == 'desc':\n taxonomy = line[9].split(tax_separator)[::-1] # take taxonomy and reverse order\n try:\n dict_taxs[read_id]['hits'] += 1 # sum hits for each sequence\n dict_taxs[read_id]['taxonomy'].append(taxonomy) # add taxonomy to taxonomy dict\n except KeyError: # fires when a read_id is read for the first time\n percentage_identity = line[3] # take percentage_identity to the database\n cigar_alignment = line[7]\n dict_taxs[read_id] = {'hits': 1,\n 'taxonomy': [taxonomy],\n 'perc_id': percentage_identity,\n 'alignment': cigar_alignment}\n return dict_taxs", "def process(\n self,\n name_and_reads: Tuple[str, List[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n name, reads = name_and_reads[0], list(name_and_reads[1])\n reads_copy = copy.deepcopy(reads)\n # Indent sequence strings by starting position.\n for read in reads_copy:\n indent = dc_constants.GAP_OR_PAD * read.alignment.position.position\n read.aligned_sequence = indent + read.aligned_sequence\n indented_cigar_str = indent + struct_utils.get_string_field(\n read.info, 'expanded_cigar')[0]\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n indented_cigar_str)\n yield name, reads_copy", "def get_mapping_details(mapping_fp,\r\n suppress_barcode_checks=False,\r\n suppress_primer_checks=False):\r\n\r\n mapping_f = open(mapping_fp, \"U\")\r\n\r\n # Only using the id_map and the errors from parsing the mapping file.\r\n hds, mapping_data, run_description, errors, warnings = \\\r\n process_id_map(mapping_f)\r\n\r\n mapping_f.close()\r\n\r\n # Should raise errors for barcodes or primers unless suppressed, and\r\n # should raise errors for headers or duplicate SampleIDs in any case.\r\n loc_bcs = \",1\"\r\n loc_primers = \",2\"\r\n if errors:\r\n for curr_error in errors:\r\n # Halt when header has error\r\n if curr_error.startswith(\"Found header field\"):\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n elif curr_error.endswith(loc_bcs):\r\n # Halt for barcode errors unless suppressed\r\n if suppress_barcode_checks:\r\n continue\r\n else:\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n elif curr_error.endswith(loc_primers):\r\n # Halt for primer errors unless suppressed\r\n if suppress_primer_checks:\r\n continue\r\n else:\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n # Raise error on duplicate sample IDs\r\n elif curr_error.startswith(\"Duplicate SampleID\"):\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n\r\n # create dict of dicts with SampleID:{each header:mapping data}\r\n\r\n id_map = {}\r\n\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]] = {}\r\n\r\n for header in range(len(hds)):\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]][hds[header]] = curr_data[header]\r\n\r\n sample_ids = id_map.keys()\r\n\r\n barcode_seqs = []\r\n raw_linkerprimer_seqs = []\r\n\r\n for curr_id in id_map:\r\n if not suppress_barcode_checks:\r\n barcode_seqs.append(id_map[curr_id]['BarcodeSequence'])\r\n if not suppress_primer_checks:\r\n raw_linkerprimer_seqs.append(\r\n id_map[curr_id]['LinkerPrimerSequence'])\r\n\r\n # remove duplicates\r\n raw_linkerprimer_seqs = set(raw_linkerprimer_seqs)\r\n\r\n linker_primer_seqs = expand_degeneracies(raw_linkerprimer_seqs)\r\n\r\n return set(sample_ids), set(barcode_seqs), set(linker_primer_seqs)", "def test_parse_denoiser_mapping(self):\r\n actual = parse_denoiser_mapping(self.denoiser_mapping1)\r\n expected = {'Read1': ['Read1', 'Read4', 'Read5 some comment'],\r\n 'Read2': ['Read2'],\r\n 'Read3': ['Read3', 'Read6']}\r\n self.assertDictEqual(actual, expected)", "def _parse_alignment( alignment ):\n log.info(\"Parsing subread locations from alignment data\")\n locations = {}\n for entry in BlasrReader( alignment ):\n if entry.tstrand == '1':\n start = int(entry.tlength) - int(entry.tend)\n end = int(entry.tlength) - int(entry.tstart)\n else:\n start = int(entry.tstart)\n end = int(entry.tend)\n locations[entry.qname] = (start, end)\n return locations", "def parse_denoiser_mapping(denoiser_map):\r\n result = {}\r\n for line in denoiser_map:\r\n line = line.strip().split('\\t')\r\n denoised_id = line[0].rstrip(':')\r\n original_ids = [denoised_id] + line[1:]\r\n if denoised_id in result:\r\n # just a healthy dose of paranoia\r\n raise ValueError(\"Duplicated identifiers in denoiser mapping file: \"\r\n \"are you sure you merged the correct files?\")\r\n else:\r\n result[denoised_id] = original_ids\r\n return result", "def count_mapped_bases(bam):\n\n for read in open_bam(bam):\n if not read.is_secondary:\n count = Counter(read.query_alignment_sequence)\n yield(count)", "def map_primary_2_secondary_ANs(ids_2_map, Primary_2_Secondary_IDs_dict=None, read_from_flat_files=False, ENSPs_only=False, no_ENSPs=False):\n if Primary_2_Secondary_IDs_dict is None:\n ### don't read this from flat files (VERY slow) if there is a DB and low_memory then use DB\n Primary_2_Secondary_IDs_dict = get_Primary_2_Secondary_IDs_dict_from_prim(ids_2_map, read_from_flat_files)\n Primary_2_Secondary_IDs_dict_userquery = {}\n for id_ in ids_2_map:\n try:\n sec = Primary_2_Secondary_IDs_dict[id_]\n except KeyError:\n sec = False\n if sec: # sec is a list\n if ENSPs_only:\n for sec_id in sec:\n try:\n if int(sec_id.split(\".\")[0]) > 1:\n Primary_2_Secondary_IDs_dict_userquery[id_] = sec_id\n except:\n pass\n elif no_ENSPs:\n for sec_id in sec:\n try:\n if not int(sec_id.split(\".\")[0]) > 1:\n Primary_2_Secondary_IDs_dict_userquery[id_] = sec_id\n except:\n pass\n else: # take all IDs\n Primary_2_Secondary_IDs_dict_userquery[id_] = sec\n return Primary_2_Secondary_IDs_dict_userquery", "def add_read_to_vec_using_ref(self, read):\n\t\ti = read.offset\n\t\tfor p in self.refmap.gap_map[read.ref_seq_id][read.offset:(read.offset+len(read.seq))]:\n\t\t\ts = self.refmap.fasta[read.ref_seq_id].seq[i]\n\t\t\tif s=='U': s='T'\n\t\t\tif s not in ('A','T','C','G'): s='N'\n\t\t\tDF.add_to_vec(self, nt=s, positions=[p], counts=[read.copy])\n\t\t\ti += 1", "def test_check_map(self):\r\n s = \"\"\"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tX\\tDescription\r\n#fake data\r\nx\\tAA\\tAC\\t3\\tsample_x\r\ny\\t\"AC\"\\tAC\\t4\\t\"sample_y\"\r\nz\\tGG\\tGC\\t5\\tsample_z\"\"\"\r\n f = StringIO(s)\r\n f.name = 'test.xls'\r\n headers, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(f,\r\n disable_primer_check=False)\r\n\r\n self.assertEqual(\r\n barcode_to_sample_id,\r\n {'AA': 'x',\r\n 'AC': 'y',\r\n 'GG': 'z'})\r\n\r\n self.assertEqual(errors, [])\r\n self.assertEqual(warnings, [])", "def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li # if i use 'id' it is blue; why?\n seq_dict[defline] = \"\"\n else:\n li = li.upper() # just to clean up sequence\n seq_dict[defline] += li\n\n return seq_dict", "def read_mapping_file(map_file):\n new_name_old_name = {}\n with open(map_file, 'r') as z:\n for line in z:\n clean = line.strip().split()\n\n new_name_old_name[clean[1]] = clean[0]\n\n return new_name_old_name" ]
[ "0.6826506", "0.66215014", "0.6525083", "0.6418822", "0.6416063", "0.631023", "0.6223504", "0.6182403", "0.60843307", "0.6026228", "0.6004919", "0.5979466", "0.5960708", "0.59569067", "0.59227425", "0.59207463", "0.5912195", "0.58296555", "0.5819795", "0.5811446", "0.5797608", "0.5784261", "0.5753471", "0.5741667", "0.5721999", "0.5694153", "0.56585646", "0.5657997", "0.5639698", "0.5630461", "0.562285", "0.5621822", "0.5612821", "0.56114244", "0.559594", "0.5590355", "0.5526533", "0.552227", "0.5518587", "0.5491718", "0.54787993", "0.54579705", "0.54544294", "0.5445419", "0.5437579", "0.5435407", "0.54271317", "0.5399586", "0.53961056", "0.53939515", "0.53757334", "0.5368846", "0.53687036", "0.5367005", "0.53485465", "0.5307391", "0.5299321", "0.52961624", "0.5287918", "0.52842575", "0.5279658", "0.52751845", "0.52673095", "0.52655745", "0.5259729", "0.5241627", "0.52407223", "0.5239915", "0.5233735", "0.5233636", "0.52192354", "0.52173465", "0.5210763", "0.5203864", "0.51943237", "0.51925856", "0.51886743", "0.5187459", "0.51826036", "0.51805097", "0.5177142", "0.5167765", "0.51672107", "0.5163151", "0.51391", "0.51350904", "0.51327986", "0.5116324", "0.509392", "0.5082382", "0.50822663", "0.50762016", "0.50587606", "0.5056264", "0.50543106", "0.50520056", "0.5046092", "0.50400794", "0.5039004", "0.5036501" ]
0.6700885
1
wrapper to run scikitribo from the same pipeline requires local install of modified scikitribo toolbox requires local install of all dependencies of scikitribo environment (see conda environment file)
def run_scikit_ribo(SRA, genome_fasta, genome_gtf): # 3. Scikit-ribo index print("Building scikit-ribo index") if not os.path.exists(SCIKIT_DIR): os.mkdir(SCIKIT_DIR) cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-build.py' + ' ' + '-g' + ' ' + genome_gtf + ' ' + '-f' + ' ' + genome_fasta + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + SCIKIT_DIR output = subprocess.run(cmd_scikit, shell=True) print("scikit-ribo-run.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-run.py' + ' ' + '-i' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + '-f' + ' ' + SCIKIT_DIR + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + ' ' + 'TMP/scikit_'+SRA output = subprocess.run(cmd_scikit, shell=True) print("plot_ribo_density_dict.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'plot_ribo_density_dict_noCDT.py' + ' ' + '-i' + ' ' + TMP_DIR+'scikit_'+SRA+'/riboseq_input.txt' + ' ' + '-g' + ' ' + 'all' + ' ' + '-o' + ' ' + TMP_DIR+'scikit_'+SRA #+'_profiles' output = subprocess.run(cmd_scikit, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transformation_catalog():\n tc = TransformationCatalog()\n\n # Add docker container\n #crisis_container = Container(\n # 'crisis_container',\n # Container.DOCKER,\n # image = \"docker://slnagark/crisis_wf:latest\",\n # arguments=\"--runtime=nvidia --shm-size=1gb\"\n # ).add_env(TORCH_HOME=\"/tmp\")\n \n crisis_container = Container(\n 'galaxy_container',\n Container.SINGULARITY,\n image = str(Path(\".\").parent.resolve() / \"containers/crisis-computing_latest.sif\"),\n image_site = \"local\",\n mounts=[\"${DONUT_USER_HOME}:${DONUT_USER_HOME}\"]\n ).add_env(TORCH_HOME=\"/tmp\")\n\n\n # preprocessing scripts\n preprocess_images = Transformation(\n \"preprocess_images\",\n site = \"local\",\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_images.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n preprocess_tweets = Transformation(\n \"preprocess_tweets\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_tweets.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n \n # HPO, training and inference scripts for ResNet-50\n hpo_train_resnet = Transformation(\n \"hpo_train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_resnet = Transformation(\n \"train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n resnet_inference = Transformation(\n \"resnet_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/resnet_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # HPO, training and inference scripts for Bi-LSTM\n\n hpo_train_bilstm = Transformation(\n \"hpo_train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n #.add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_bilstm = Transformation(\n \"train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n \n bilstm_inference = Transformation(\n \"bilstm_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/bilstm_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # late fusion script\n late_fusion = Transformation(\n \"late_fusion\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/late_fusion.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n\n tc.add_containers(crisis_container)\n tc.add_transformations(preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion)\n tc.write()\n\n return preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion", "def bootstrap():\n sub_install_packages()\n sub_install_virtualenv()\n sub_create_virtualenv()\n sub_install_python_requirements()", "def ci(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)\n run_yapf(session, True)\n run_all_linters(session)\n run_pytest_units(session)\n run_pytest_integrations(session)", "def install_sm_local_dependencies(framework, job_type, image, ec2_conn, ec2_instance_ami):\n python_invoker = get_python_invoker(ec2_instance_ami)\n # Install custom packages which need to be latest version\"\n # using virtualenv to avoid package conflicts with the current packages\n ec2_conn.run(f\"sudo apt-get install virtualenv -y \")\n ec2_conn.run(f\"virtualenv env --python {python_invoker}\")\n ec2_conn.run(f\"source ./env/bin/activate\")\n if framework == \"pytorch\":\n # The following distutils package conflict with test dependencies\n ec2_conn.run(\"sudo apt-get remove python3-scipy python3-yaml -y\")\n ec2_conn.run(f\"sudo {python_invoker} -m pip install -r requirements.txt \", warn=True)", "def bootstrap():\n local('virtualenv fabric_factory/ve')", "def bootstrap_aws():\n sub_install_packages()\n sub_install_virtualenv()\n sub_create_virtualenv()\n sub_install_python_requirements_aws()", "def main():\n\n # Force scripts to not use graphical output\n env = dict()\n env.update(os.environ)\n\n if \"DISPLAY\" not in os.environ:\n # No DISPLAY, set suitable default matplotlib backend as pyplot is used\n env[\"MPLBACKEND\"] = \"Agg\"\n\n if \"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\" not in os.environ:\n env[\"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\"] = str(multiprocessing.cpu_count())\n\n # Prevent user site packages from interfering with SCT dependencies (See issue #3067)\n env[\"PYTHONNOUSERSITE\"] = \"True\"\n\n command = os.path.basename(sys.argv[0])\n pkg_dir = os.path.dirname(sct.__file__)\n\n script = os.path.join(pkg_dir, \"scripts\", \"{}.py\".format(command))\n assert os.path.exists(script)\n\n cmd = [sys.executable, script] + sys.argv[1:]\n\n mpi_flags = os.environ.get(\"SCT_MPI_MODE\", None)\n if mpi_flags is not None:\n if mpi_flags == \"yes\": # compat\n mpi_flags = \"-n 1\"\n cmd = [\"mpiexec\"] + mpi_flags.split() + cmd\n\n os.execvpe(cmd[0], cmd[0:], env)", "def superconductor(local_dir, cpus, gpus, num_parallel, num_samples, oracle):\n\n # Final Version\n\n from design_baselines.autofocused_cbas import autofocused_cbas\n ray.init(num_cpus=cpus,\n num_gpus=gpus,\n include_dashboard=False,\n _temp_dir=os.path.expanduser('~/tmp'))\n tune.run(autofocused_cbas, config={\n \"logging_dir\": \"data\",\n \"normalize_ys\": True,\n \"normalize_xs\": True,\n \"task\": f\"Superconductor-{oracle}-v0\",\n \"task_kwargs\": {\"relabel\": False},\n \"bootstraps\": 5,\n \"val_size\": 200,\n \"ensemble_batch_size\": 100,\n \"vae_batch_size\": 100,\n \"embedding_size\": 256,\n \"hidden_size\": 256,\n \"num_layers\": 1,\n \"initial_max_std\": 0.2,\n \"initial_min_std\": 0.1,\n \"ensemble_lr\": 0.0003,\n \"ensemble_epochs\": 100,\n \"latent_size\": 32,\n \"vae_lr\": 0.0003,\n \"vae_beta\": 1.0,\n \"offline_epochs\": 200,\n \"online_batches\": 10,\n \"online_epochs\": 10,\n \"autofocus_epochs\": 10,\n \"iterations\": 20,\n \"percentile\": 80.0,\n \"solver_samples\": 128, \"do_evaluation\": True},\n num_samples=num_samples,\n local_dir=local_dir,\n resources_per_trial={'cpu': cpus // num_parallel,\n 'gpu': gpus / num_parallel - 0.01})", "def main():\n get_obofoundry(force_download=True)", "def slurm(ctx, alloc, nodes, memory, walltime, feature, conda_env, module,\n stdout_path, verbose):\n\n name = ctx.obj['NAME']\n tech = ctx.obj['TECH']\n points = ctx.obj['POINTS']\n sam_files = ctx.obj['SAM_FILES']\n res_file = ctx.obj['RES_FILE']\n sites_per_worker = ctx.obj['SITES_PER_WORKER']\n dirout, fout = os.path.split(ctx.obj['OUT_FPATH'])\n logdir = ctx.obj['LOGDIR']\n output_request = ctx.obj['OUTPUT_REQUEST']\n site_data = ctx.obj['SITE_DATA']\n max_workers = ctx.obj['MAX_WORKERS']\n mem_util_lim = ctx.obj['MEM_UTIL_LIM']\n timeout = ctx.obj['TIMEOUT']\n curtailment = ctx.obj['CURTAILMENT']\n gid_map = ctx.obj['GID_MAP']\n verbose = any([verbose, ctx.obj['VERBOSE']])\n\n slurm_manager = ctx.obj.get('SLURM_MANAGER', None)\n if slurm_manager is None:\n slurm_manager = SLURM()\n ctx.obj['SLURM_MANAGER'] = slurm_manager\n\n pc = get_node_pc(points, sam_files, tech, res_file, nodes)\n\n for i, split in enumerate(pc):\n node_name, fout_node = get_node_name_fout(name, fout, i, pc,\n hpc='slurm')\n\n node_fpath = os.path.join(dirout, fout_node)\n cmd = get_node_cmd(node_name, tech, sam_files, res_file, node_fpath,\n points=points,\n points_range=split.split_range,\n sites_per_worker=sites_per_worker,\n max_workers=max_workers,\n logdir=logdir,\n output_request=output_request,\n site_data=site_data,\n mem_util_lim=mem_util_lim,\n timeout=timeout,\n curtailment=curtailment,\n gid_map=gid_map,\n verbose=verbose)\n\n status = Status.retrieve_job_status(dirout, 'generation', node_name,\n hardware='eagle',\n subprocess_manager=slurm_manager)\n\n if status == 'successful':\n msg = ('Job \"{}\" is successful in status json found in \"{}\", '\n 'not re-running.'\n .format(node_name, dirout))\n elif 'fail' not in str(status).lower() and status is not None:\n msg = ('Job \"{}\" was found with status \"{}\", not resubmitting'\n .format(node_name, status))\n else:\n logger.info('Running reV generation on SLURM with node name \"{}\" '\n 'for {} (points range: {}).'\n .format(node_name, pc, split.split_range))\n # create and submit the SLURM job\n out = slurm_manager.sbatch(cmd,\n alloc=alloc,\n memory=memory,\n walltime=walltime,\n feature=feature,\n name=node_name,\n stdout_path=stdout_path,\n conda_env=conda_env,\n module=module)[0]\n if out:\n msg = ('Kicked off reV generation job \"{}\" (SLURM jobid #{}).'\n .format(node_name, out))\n # add job to reV status file.\n Status.add_job(\n dirout, 'generation', node_name, replace=True,\n job_attrs={'job_id': out, 'hardware': 'eagle',\n 'fout': fout_node, 'dirout': dirout})\n\n click.echo(msg)\n logger.info(msg)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def setup(app):\n wheel = ensure_wheel()\n subprocess.check_call([\n \"jupyter\", \"lite\", \"build\", f\"--LiteBuildConfig.federated_extensions={wheel}\",\n ], cwd=DEMO)", "def bootstrap(environment: Environment):\n pass", "def main(argv):\n parser = argparse.ArgumentParser(description=\"\"\"Bootstrap CI Scripts\"\"\")\n parser.add_argument(\"-d\", \"--directory\",\n type=str,\n required=True,\n help=(\"\"\"Directory to store language runtimes, \"\"\"\n \"\"\"scripts and other script details in\"\"\"))\n parser.add_argument(\"-s\", \"--script\",\n type=str,\n help=\"\"\"Script to pass control to\"\"\")\n parser.add_argument(\"-e\", \"--eval-output\",\n type=str,\n choices=[\n \"bash\",\n \"powershell\"\n ],\n help=\"\"\"Evaluate output in shell\"\"\")\n parser.add_argument(\"-p\", \"--print-to\",\n type=str,\n help=\"\"\"Where to print output script to\"\"\")\n parser.add_argument(\"-r\", \"--scripts-directory\",\n type=str,\n help=(\"\"\"Directory where scripts are already \"\"\"\n \"\"\"stored in\"\"\"))\n parser.add_argument(\"--keep-scripts\",\n action=\"store_true\",\n help=\"\"\"Don't remove stale scripts.\"\"\")\n args, remainder = parser.parse_known_args(argv)\n\n print_script_to, print_messages_to = _determine_outputs(args.print_to)\n\n with closing(print_script_to):\n parent_shell = construct_parent_shell(args.eval_output,\n print_script_to)\n container = ContainerDir(parent_shell,\n stale_check=_stale_check_url(args),\n **(vars(args)))\n util = container.fetch_and_import(\"util.py\")\n # suppress(unused-attribute)\n util.PRINT_MESSAGES_TO = print_messages_to\n bootstrap_script = container.script_path(\"bootstrap.py\").fs_path\n bootstrap_script_components = bootstrap_script.split(os.path.sep)\n scripts_path = os.path.sep.join(bootstrap_script_components[:-2])\n\n # Overwrite CONTAINER_DIR in the output script, but not\n # for our own invocation, we'll need the parent instance\n # if we're actually in a test\n parent_shell.overwrite_environment_variable(\"CONTAINER_DIR\",\n container.path())\n _set_ci_environment_variables(parent_shell)\n\n _define_script_command(\"polysquare_run\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n None)\n _define_script_command(\"polysquare_cleanup\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n \"clean.py\")\n\n # Done, pass control to the script we're to run\n container.fetch_and_import(args.script).run(container,\n util,\n parent_shell,\n argv=remainder)\n\n # Print a final new line so that active messages don't get\n # truncated.\n util.print_message(\"\\n\")\n\n if container.return_code() != 0:\n parent_shell.exit(container.return_code())\n\n return container.return_code()", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def sktime_custom_env(tmp_path):\n conda_env = tmp_path.joinpath(\"conda_env.yml\")\n _mlflow_conda_env(conda_env, additional_pip_deps=[\"sktime\"])\n return conda_env", "def bootstrap():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(root)s' % env, user=env.deploy_user)\n clone_repo()\n setup_dirs()\n link_config_files()\n update_services()\n create_virtualenv()\n update_requirements()\n create_local_settings()", "def installRequiredPackages(self, force=False):\n # Need to install if forced or any packages cannot be imported\n needToInstall = force\n if not needToInstall:\n try:\n import jupyter\n import jupyterlab\n import ipywidgets\n import pandas\n import ipyevents\n import ipycanvas\n except:\n needToInstall = True\n\n if needToInstall:\n # Install required packages\n import os\n if os.name != 'nt':\n # PIL may be corrupted on linux, reinstall from pillow\n slicer.util.pip_install('--upgrade pillow --force-reinstall')\n\n slicer.util.pip_install(\"jupyter jupyterlab ipywidgets pandas ipyevents ipycanvas --no-warn-script-location\")\n\n # Install Slicer Jupyter kernel\n # Create Slicer kernel\n slicer.modules.jupyterkernel.updateKernelSpec()\n # Install Slicer kernel\n import jupyter_client\n jupyter_client.kernelspec.KernelSpecManager().install_kernel_spec(slicer.modules.jupyterkernel.kernelSpecPath(), user=True, replace=True)", "def superconductor(local_dir, cpus, gpus, num_parallel, num_samples, oracle):\n\n # Final Version\n\n from design_baselines.mins import mins\n ray.init(num_cpus=cpus,\n num_gpus=gpus,\n include_dashboard=False,\n _temp_dir=os.path.expanduser('~/tmp'))\n tune.run(mins, config={\n \"logging_dir\": \"data\",\n \"task\": f\"Superconductor-{oracle}-v0\",\n \"task_kwargs\": {\"relabel\": False},\n \"val_size\": 200,\n \"offline\": True,\n \"normalize_ys\": True,\n \"normalize_xs\": True,\n \"base_temp\": 0.1,\n \"noise_std\": 0.0,\n \"method\": \"wasserstein\",\n \"use_conv\": False,\n \"gan_batch_size\": 128,\n \"hidden_size\": 1024,\n \"num_layers\": 1,\n \"bootstraps\": 1,\n \"initial_max_std\": 0.2,\n \"initial_min_std\": 0.1,\n \"oracle_lr\": 0.001,\n \"oracle_batch_size\": 128,\n \"oracle_epochs\": 100,\n \"latent_size\": 32,\n \"critic_frequency\": 10,\n \"flip_frac\": 0,\n \"fake_pair_frac\": 0.,\n \"penalty_weight\": 10.,\n \"generator_lr\": 2e-4,\n \"generator_beta_1\": 0.0,\n \"generator_beta_2\": 0.9,\n \"discriminator_lr\": 2e-4,\n \"discriminator_beta_1\": 0.0,\n \"discriminator_beta_2\": 0.9,\n \"initial_epochs\": 200,\n \"epochs_per_iteration\": 0,\n \"iterations\": 0,\n \"exploration_samples\": 0,\n \"exploration_rate\": 0.,\n \"thompson_samples\": 0,\n \"solver_samples\": 128, \"do_evaluation\": True},\n num_samples=num_samples,\n local_dir=local_dir,\n resources_per_trial={'cpu': cpus // num_parallel,\n 'gpu': gpus / num_parallel - 0.01})", "def prepare():\n sh('pip install pylint pyflakes behave nose clonedigger pep8 sphinx')\n sh('pip install watchdog coverage ipython sphinx_rtd_theme')\n develop()", "def _run_env(self):\n raise NotImplementedError()", "def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')", "def test_srnaseq_bowtie(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"test_srnaseq\"),\n os.path.join(data_dir, \"run_info-srnaseq_bowtie.yaml\")]\n subprocess.check_call(cl)", "def setup(ctx):\r\n ctx.run('pip3 install -r requirements.txt')", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/collective/demo.plone.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n if env.latest:\n if env.python3:\n sudo('ln -s local_demo_nightly_py3.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_demo_nightly_py2.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n else:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/starzel/buildout/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def get_estimator(arguments):\n \n numerical_indices = [1, 2, 4, 5,6,7,8,9,10,11,12,13,14]\n categorical_indices = [0]\n original_indices = list(set(range(59))-set(numerical_indices)-set(categorical_indices))\n \n p1 = make_pipeline(my_module.PositionalSelector(categorical_indices),OneHotEncoder())\n p2 = make_pipeline(my_module.PositionalSelector(numerical_indices),StandardScaler())\n p3 = make_pipeline(my_module.PositionalSelector(original_indices))\n \n feats = FeatureUnion([('categoricals', p1),\n ('numericals', p2),\n ('originals', p3),])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n pipeline = Pipeline([('pre', feats),\n ('estimator', linear_model.LogisticRegression(penalty=\"l2\",\n tol=arguments.tol,\n C = arguments.C,\n solver='lbfgs',\n max_iter=10000))])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n #classifier = linear_model.LogisticRegression(\n # penalty=\"l2\",\n # tol=arguments.tol,\n # C = arguments.C,\n # solver='lbfgs',\n # max_iter=1000\n #)\n \n return pipeline", "def workflow(base_dir, # base tool path\n use_cache=1, # whether to skip already executed runs (in cache) or not (1/0)\n ignore_git=0): # whether to ignore git version or not (1/0)\n\n # get some needed variables from config file\n runs = int(config['general']['runs'])\n workers = int(config['general']['workers'])\n\n batch_size = int(config['mtje']['batch_size'])\n epochs = int(config['mtje']['epochs'])\n use_malicious_labels = int(config['mtje']['use_malicious_labels'])\n use_count_labels = int(config['mtje']['use_count_labels'])\n gen_type = config['mtje']['gen_type']\n similarity_measure = config['mtje']['similarity_measure'].lower()\n net_type = 'mtje'\n\n training_n_samples = int(config['sorel20mDataset']['training_n_samples'])\n validation_n_samples = int(config['sorel20mDataset']['validation_n_samples'])\n test_n_samples = int(config['sorel20mDataset']['test_n_samples'])\n\n min_n_anchor_samples = int(config['freshDataset']['min_n_anchor_samples'])\n max_n_anchor_samples = int(config['freshDataset']['max_n_anchor_samples'])\n fresh_n_queries = int(config['freshDataset']['n_queries'])\n n_evaluations = int(config['freshDataset']['n_evaluations'])\n\n f_c_epochs = int(config['familyClassifier']['epochs'])\n f_c_train_split_proportion = int(config['familyClassifier']['train_split_proportion'])\n f_c_valid_split_proportion = int(config['familyClassifier']['valid_split_proportion'])\n f_c_test_split_proportion = int(config['familyClassifier']['test_split_proportion'])\n f_c_batch_size = int(config['familyClassifier']['batch_size'])\n\n c_l_epochs = int(config['contrastiveLearning']['epochs'])\n c_l_train_split_proportion = int(config['contrastiveLearning']['train_split_proportion'])\n c_l_valid_split_proportion = int(config['contrastiveLearning']['valid_split_proportion'])\n c_l_test_split_proportion = int(config['contrastiveLearning']['test_split_proportion'])\n c_l_batch_size = int(config['contrastiveLearning']['batch_size'])\n c_l_rank_size = int(config['contrastiveLearning']['rank_size'])\n c_l_knn_k_min = int(config['contrastiveLearning']['knn_k_min'])\n c_l_knn_k_max = int(config['contrastiveLearning']['knn_k_max'])\n\n # initialize Hash object\n ch = Hash()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items('sorel20mDataset'))))\n # get config file sha256 digest\n dataset_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items(net_type))))\n # get config file sha256 digest\n config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('freshDataset'))))\n # get config file sha256 digest\n fresh_dataset_config_sha = ch.get_b64()\n\n # create copy of the current config hash digest\n ch_copy = ch.copy()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('familyClassifier'))))\n # get config file sha256 digest\n family_class_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch_copy.update(json.dumps(dict(config.items('contrastiveLearning'))))\n # get config file sha256 digest\n contr_learn_config_sha = ch_copy.get_b64()\n\n # instantiate key-n_samples dict\n n_samples_dict = {'train': training_n_samples,\n 'validation': validation_n_samples,\n 'test': test_n_samples}\n\n # Note: The entrypoint names are defined in MLproject. The artifact directories\n # are documented by each step's .py file.\n\n # start mlflow run\n with mlflow.start_run() as active_run:\n # get code git commit version\n git_commit = active_run.data.tags.get(mlflow_tags.MLFLOW_GIT_COMMIT)\n\n # log config file\n mlflow.log_text(json.dumps({s: dict(config.items(s)) for s in config.sections()}), 'config.txt')\n\n # set dataset destination dir\n dataset_dir = os.path.join(base_dir, 'dataset')\n # set dataset base path (directory containing 'meta.db')\n dataset_base_path = os.path.join(dataset_dir, '09-DEC-2020', 'processed-data')\n # set pre-processed dataset base path (directory containing .dat files)\n pre_processed_dataset_dir = os.path.join(dataset_dir, '09-DEC-2020', 'pre-processed_dataset')\n # set fresh dataset base path (directory containing .dat files)\n fresh_dataset_dir = os.path.join(dataset_dir, 'fresh_dataset')\n\n # if pre-processed dataset files for this run parameters are not present, generate them\n if not preproc_check_files(destination_dir=pre_processed_dataset_dir,\n n_samples_dict=n_samples_dict):\n logger.info(\"Pre-processed dataset not found.\")\n\n # if the original Sorel20M dataset is not present, download it\n if not download_check_files(dataset_dir):\n logger.info(\"Dataset not found.\")\n\n # run dataset downloader\n download_dataset_run = run(\"download_dataset\", {\n 'destination_dir': dataset_dir\n }, config_sha=dataset_config_sha)\n\n # pre-process dataset\n preprocess_dataset_run = run(\"preprocess_dataset\", {\n 'ds_path': dataset_base_path,\n 'destination_dir': pre_processed_dataset_dir,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'test_n_samples': test_n_samples,\n 'batch_size': batch_size,\n 'remove_missing_features': str(os.path.join(dataset_base_path, \"shas_missing_ember_features.json\"))\n }, config_sha=dataset_config_sha)\n\n # if the fresh dataset is not present, generate it\n if not fresh_check_files(fresh_dataset_dir):\n logger.info(\"Fresh dataset not found.\")\n\n # generate fresh dataset\n build_fresh_dataset_run = run(\"build_fresh_dataset\", {\n 'dataset_dest_dir': fresh_dataset_dir\n }, config_sha=fresh_dataset_config_sha)\n\n # initialize results files dicts\n results_files = {}\n c_l_results_files = {}\n\n # instantiate common (between consecutive training runs) training parameters\n common_training_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'epochs': epochs,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'use_malicious_labels': use_malicious_labels,\n 'use_count_labels': use_count_labels,\n 'workers': workers\n }\n\n # instantiate common (between consecutive training runs) evaluation parameters\n common_evaluation_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'test_n_samples': test_n_samples,\n 'evaluate_malware': use_malicious_labels,\n 'evaluate_count': use_count_labels\n }\n\n # for each training run\n for training_run_id in range(runs):\n logger.info(\"initiating training run n. {}\".format(str(training_run_id)))\n\n # -- Model Training and Evaluation Steps -------------------------------------------------------------------\n # set training parameters\n training_params = common_training_params\n training_params.update({'training_run': training_run_id})\n\n # train network (get or run) on Sorel20M dataset\n training_run = get_or_run(\"train_network\",\n training_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n resume=True,\n config_sha=config_sha)\n\n # get model checkpoints path\n checkpoint_path = parse.unquote(parse.urlparse(os.path.join(training_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n checkpoint_file = os.path.join(checkpoint_path, \"epoch_{}.pt\".format(epochs))\n\n # set evaluation parameters\n evaluation_params = common_evaluation_params\n evaluation_params.update({'checkpoint_file': checkpoint_file})\n\n # evaluate model against Sorel20M dataset\n evaluation_run = get_or_run(\"evaluate_network\",\n evaluation_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n config_sha=config_sha)\n\n # get model evaluation results path\n results_path = parse.unquote(parse.urlparse(os.path.join(evaluation_run.info.artifact_uri,\n \"model_results\")).path)\n\n # set model evaluation results filename\n results_file = os.path.join(results_path, \"results.csv\")\n\n # add file path to results_files dictionary (used for plotting mean results)\n results_files[\"run_id_\" + str(training_run_id)] = results_file\n\n # compute (and plot) all tagging results\n all_tagging_results_run = get_or_run(\"compute_all_run_results\", {\n 'results_file': results_file,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Model Evaluation using Fresh Dataset Steps ------------------------------------------------------------\n # evaluate model against fresh dataset\n fresh_evaluation_run = get_or_run(\"evaluate_fresh\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'min_n_anchor_samples': min_n_anchor_samples,\n 'max_n_anchor_samples': max_n_anchor_samples,\n 'n_query_samples': fresh_n_queries,\n 'n_evaluations': n_evaluations\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n\n # get model evaluation results path\n fresh_results_path = parse.unquote(parse.urlparse(os.path.join(fresh_evaluation_run.info.artifact_uri,\n \"fresh_prediction_results\")).path)\n\n # set model evaluation results filename\n fresh_results_file = os.path.join(fresh_results_path, \"fresh_prediction_results.json\")\n\n # compute (and plot) all family prediction results (on fresh dataset)\n all_tagging_results_run = get_or_run(\"compute_all_run_fresh_results\", {\n 'results_file': fresh_results_file\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Family Classifier Steps -------------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n f_c_train_run = get_or_run(\"train_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': f_c_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model checkpoints path\n f_c_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(f_c_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n f_c_checkpoint_file = os.path.join(f_c_checkpoint_path, \"epoch_{}.pt\".format(f_c_epochs))\n\n # evaluate model against fresh dataset\n f_c_eval_run = get_or_run(\"evaluate_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': f_c_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model evaluation results path\n f_c_results_path = parse.unquote(parse.urlparse(os.path.join(f_c_eval_run.info.artifact_uri,\n \"family_class_results\")).path)\n\n # set model evaluation results filename\n f_c_results_file = os.path.join(f_c_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n f_c_compute_results_run = get_or_run(\"compute_all_family_class_results\", {\n 'results_file': f_c_results_file,\n 'fresh_ds_path': fresh_dataset_dir\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Contrastive Learning Steps ----------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n c_l_train_run = get_or_run(\"train_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': c_l_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model checkpoints path\n c_l_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(c_l_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n c_l_checkpoint_file = os.path.join(c_l_checkpoint_path, \"epoch_{}.pt\".format(c_l_epochs))\n\n # evaluate model against fresh dataset\n c_l_eval_run = get_or_run(\"evaluate_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': c_l_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size,\n 'rank_size': c_l_rank_size,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_results_path = parse.unquote(parse.urlparse(os.path.join(c_l_eval_run.info.artifact_uri,\n \"contrastive_learning_results\")).path)\n\n # set model evaluation results filename\n c_l_results_file = os.path.join(c_l_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n c_l_compute_results_run = get_or_run(\"compute_contrastive_learning_results\", {\n 'results_file': c_l_results_file,\n 'fresh_ds_path': fresh_dataset_dir,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_scores_dir_path = parse.unquote(parse.urlparse(os.path.join(c_l_compute_results_run.info.artifact_uri,\n \"contrastive_learning_scores\")).path)\n\n # add dir path to c_l_results_files dictionary (used for plotting mean score trends)\n c_l_results_files[\"run_id_\" + str(training_run_id)] = c_l_scores_dir_path\n # ----------------------------------------------------------------------------------------------------------\n\n # create temp dir name using the value from config_sha (sha of some parts of the config file).\n # -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n tempdir = os.path.join(base_dir, 'tmp_{}'.format(config_sha))\n # create temp dir\n os.makedirs(tempdir, exist_ok=True)\n\n # create contrastive learning temp dir name using the value from config_sha (sha of some parts of the config\n # file). -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n c_l_tempdir = os.path.join(base_dir, 'tmp_{}'.format(contr_learn_config_sha))\n # create temp dir\n os.makedirs(c_l_tempdir, exist_ok=True)\n\n # set run-to-filename file path\n run_to_filename = os.path.join(tempdir, \"results.json\")\n\n # create and open the results.json file in write mode\n with open(run_to_filename, \"w\") as output_file:\n # save results_files dictionary as a json file\n json.dump(results_files, output_file)\n\n mlflow.log_artifact(run_to_filename, \"run_to_filename\")\n\n # set run-to-filename file path\n c_l_run_to_filename = os.path.join(c_l_tempdir, \"c_l_results.json\")\n\n # create and open the c_l_results.json file in write mode\n with open(c_l_run_to_filename, \"w\") as output_file:\n # save c_l_results_files dictionary as a json file\n json.dump(c_l_results_files, output_file)\n\n mlflow.log_artifact(c_l_run_to_filename, \"run_to_filename\")\n\n # if there is more than 1 run, compute also per-tag mean results\n if runs > 1:\n # plot all roc distributions\n per_tag_plot_runs = get_or_run(\"plot_all_roc_distributions\", {\n 'run_to_filename_json': run_to_filename,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n\n # plot all model mean scores trends\n plot_all_scores_trends = get_or_run(\"plot_all_contrastive_scores_trends\", {\n 'run_to_filename_json': c_l_run_to_filename,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # remove temp files and temporary directory\n os.remove(run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(tempdir)\n\n # remove contrastive learning temp files and temporary directory\n os.remove(c_l_run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(c_l_tempdir)", "def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def run_workflow(EMBEDDING_BASE_PATH):\n train_tweets_path, val_tweets_path, test_tweets_path, image_dataset = run_pre_workflow()\n\n input_images, train_tweets, val_tweets, test_tweets, glove_embeddings = replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, image_dataset, EMBEDDING_BASE_PATH)\n\n preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion = transformation_catalog()\n \n sites_catalog()\n\n pegasus_properties()\n \n wf = Workflow('Crisis_Computing_Workflow')\n\n # --------------------------------------------------- TEXT PIPELINE ------------------------------------------------------ \n\n # Job 1: Preprocess tweets\n preprocessed_train_tweets = File('preprocessed_train_tweets.csv')\n preprocessed_val_tweets = File('preprocessed_val_tweets.csv')\n preprocessed_test_tweets = File('preprocessed_test_tweets.csv')\n \n job_preprocess_tweets = [Job(preprocess_tweets) for i in range(3)]\n job_preprocess_tweets[0].add_inputs(train_tweets)\n job_preprocess_tweets[0].add_outputs(preprocessed_train_tweets)\n job_preprocess_tweets[0].add_args('--filename', 'train_tweets.csv')\n \n job_preprocess_tweets[1].add_inputs(val_tweets)\n job_preprocess_tweets[1].add_outputs(preprocessed_val_tweets)\n job_preprocess_tweets[1].add_args('--filename', 'val_tweets.csv')\n \n job_preprocess_tweets[2].add_inputs(test_tweets)\n job_preprocess_tweets[2].add_outputs(preprocessed_test_tweets)\n job_preprocess_tweets[2].add_args('--filename', 'test_tweets.csv')\n\n\n # Job 2: HPO Bi-LSTM\n bilstm_best_params = File('best_bilstm_hpo_params.txt')\n\n job_hpo_train_bilstm = Job(hpo_train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets)\\\n .add_outputs(bilstm_best_params)\\\n .add_args('--trials', BILSTM_NUM_TRIALS)\n\n\n # Job 3: Train Bi-LSTM using best parameters from HPO study and output loss and accuracy curves\n trained_bilstm_model = File('bilstm_final_model.h5') \n bilstm_loss_curve = File('Loss_curve_bilstm.png')\n bilstm_accuracy_curve = File('Accuracy_curve_bilstm.png')\n\n\n job_train_bilstm = Job(train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, bilstm_best_params)\\\n .add_outputs(bilstm_loss_curve, bilstm_accuracy_curve, trained_bilstm_model)\\\n\n\n # Job 4: Run inference on best Bi-LSTM model to produce output on test dataset along with confusion matrix\n bilstm_train_output_prob = File('bilstm_train_output.csv')\n bilstm_test_output_prob = File('bilstm_test_output.csv')\n bilstm_confusion_matrix = File('bilstm_confusion_matrix.png')\n\n job_bilstm_inference = Job(bilstm_inference)\\\n .add_inputs(preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, trained_bilstm_model)\\\n .add_outputs(bilstm_train_output_prob, bilstm_test_output_prob, bilstm_confusion_matrix)\n\n\n # --------------------------------------------------- IMAGE PIPELINE ------------------------------------------------------ \n\n \n # Job 1: Preprocess images\n prefix = \"resized_\"\n job_preprocess_images = [Job(preprocess_images) for i in range(NUM_WORKERS)]\n resized_images = split_preprocess_jobs(job_preprocess_images, input_images, prefix)\n\n # Job 2: HPO ResNet-50\n resnet_best_params = File('best_resnet_hpo_params.txt')\n\n job_hpo_train_resnet = Job(hpo_train_resnet)\\\n .add_inputs(*resized_images)\\\n .add_args('--trials', RESNET_NUM_TRIALS)\\\n .add_outputs(resnet_best_params)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 3: Train ResNet-50 using best parameters from HPO study and output loss and accuracy curves\n trained_resnet_model = File('resnet_final_model.pth')\n resnet_loss_curve = File('Loss_curve_resnet.png')\n resnet_accuracy_curve = File('Accuracy_curve_resnet.png')\n\n job_train_resnet = Job(train_resnet)\\\n .add_inputs(*resized_images, resnet_best_params)\\\n .add_outputs(resnet_loss_curve, resnet_accuracy_curve, trained_resnet_model)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 4: Run inference on best ResNet-50 model to produce output on test dataset along with confusion matrix\n resnet_train_output_prob = File('resnet_train_output.csv')\n resnet_confusion_matrix = File('resnet_confusion_matrix.png')\n resnet_test_output_prob = File('resnet_test_output.csv') \n\n job_resnet_inference = Job(resnet_inference)\\\n .add_inputs(*resized_images, trained_resnet_model)\\\n .add_outputs(resnet_train_output_prob, resnet_test_output_prob, resnet_confusion_matrix)\n\n \n \n # --------------------------------------------------- LATE FUSION ------------------------------------------------------ \n\n # Job 1: Late Fusion\n confusion_matrix_MPC = File('late_fusion_MPC.png')\n confusion_matrix_LR = File('late_fusion_LR.png')\n confusion_matrix_MLP = File('late_fusion_MLP.png')\n report_MLP = File('late_fusion_MLP.csv')\n report_MPC = File('late_fusion_MPC.csv')\n report_LR = File('late_fusion_LR.csv')\n\n job_late_fusion = Job(late_fusion)\\\n .add_inputs(resnet_train_output_prob, resnet_test_output_prob, bilstm_train_output_prob, bilstm_test_output_prob)\\\n .add_outputs(confusion_matrix_MPC, confusion_matrix_LR, confusion_matrix_MLP, report_MLP, report_MPC, report_LR)\n\n wf.add_jobs(*job_preprocess_tweets, *job_preprocess_images, job_bilstm_inference, job_hpo_train_bilstm, job_train_bilstm, job_hpo_train_resnet, job_train_resnet, job_resnet_inference, job_late_fusion)\n\n try:\n wf.plan(submit=False, sites=[\"donut\"], output_sites=[\"donut\"], dir=\"submit\")\n #wf.wait()\n #wf.statistics()\n except PegasusClientError as e:\n print(e.output)\n \n #plot_workflow_graph(wf)\n \n return", "def quickstart():\n if not os.path.exists(\"./fabric_factory/ve\"):\n bootstrap()\n else:\n print \"No need to create virtualenv, 've' already exists\"\n install_requirements()\n project_linkage()", "def test_result_reproducibility(monkeypatch):\n script = os.path.abspath(\"examples/scikitlearn-iris/main.py\")\n monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))\n config = \"orion_config.yaml\"\n\n orion.core.cli.main(\n [\"hunt\", \"--config\", config, \"python\", script, \"orion~choices([0.1])\"]\n )\n\n experiment = create_experiment(name=\"scikit-iris-tutorial\")\n assert experiment.stats is not None\n assert experiment.stats.best_evaluation == 0.6666666666666667", "def install_requirements():\n run('source %(env_path)s/bin/activate; pip install -U -r %(repo_path)s/requirements.txt' % env)", "def kitchen_prepare(ctx, windows=is_windows, kernel_release=None, ci=False, packages=\"\"):\n build_tags = [NPM_TAG]\n if not windows:\n build_tags.append(BPF_TAG)\n\n target_packages = go_package_dirs(TEST_PACKAGES_LIST, build_tags)\n\n # Clean up previous build\n if os.path.exists(KITCHEN_ARTIFACT_DIR) and (packages == \"\" or clean_build(ctx)):\n shutil.rmtree(KITCHEN_ARTIFACT_DIR)\n elif packages != \"\":\n packages = [full_pkg_path(name) for name in packages.split(\",\")]\n # make sure valid packages were provided.\n for pkg in packages:\n if pkg not in target_packages:\n raise Exit(f\"Unknown target packages {pkg} specified\")\n\n target_packages = packages\n\n if os.path.exists(BUILD_COMMIT):\n os.remove(BUILD_COMMIT)\n\n os.makedirs(KITCHEN_ARTIFACT_DIR, exist_ok=True)\n\n # clean target_packages only\n for pkg_dir in target_packages:\n test_dir = pkg_dir.lstrip(os.getcwd())\n if os.path.exists(os.path.join(KITCHEN_ARTIFACT_DIR, test_dir)):\n shutil.rmtree(os.path.join(KITCHEN_ARTIFACT_DIR, test_dir))\n\n # This will compile one 'testsuite' file per package by running `go test -c -o output_path`.\n # These artifacts will be \"vendored\" inside a chef recipe like the following:\n # test/kitchen/site-cookbooks/dd-system-probe-check/files/default/tests/pkg/network/testsuite\n # test/kitchen/site-cookbooks/dd-system-probe-check/files/default/tests/pkg/network/netlink/testsuite\n # test/kitchen/site-cookbooks/dd-system-probe-check/files/default/tests/pkg/ebpf/testsuite\n # test/kitchen/site-cookbooks/dd-system-probe-check/files/default/tests/pkg/ebpf/bytecode/testsuite\n for i, pkg in enumerate(target_packages):\n target_path = os.path.join(KITCHEN_ARTIFACT_DIR, re.sub(\"^.*datadog-agent.\", \"\", pkg))\n target_bin = \"testsuite\"\n if windows:\n target_bin = \"testsuite.exe\"\n\n test(\n ctx,\n packages=pkg,\n skip_object_files=(i != 0),\n skip_linters=True,\n bundle_ebpf=False,\n output_path=os.path.join(target_path, target_bin),\n kernel_release=kernel_release,\n )\n\n # copy ancillary data, if applicable\n for extra in [\"testdata\", \"build\"]:\n extra_path = os.path.join(pkg, extra)\n if os.path.isdir(extra_path):\n shutil.copytree(extra_path, os.path.join(target_path, extra))\n\n if pkg.endswith(\"java\"):\n shutil.copy(os.path.join(pkg, \"agent-usm.jar\"), os.path.join(target_path, \"agent-usm.jar\"))\n\n for gobin in [\"gotls_client\", \"fmapper\", \"prefetch_file\"]:\n src_file_path = os.path.join(pkg, f\"{gobin}.go\")\n if not windows and os.path.isdir(pkg) and os.path.isfile(src_file_path):\n binary_path = os.path.join(target_path, gobin)\n with chdir(pkg):\n ctx.run(f\"go build -o {binary_path} -ldflags=\\\"-extldflags '-static'\\\" {gobin}.go\")\n\n gopath = os.getenv(\"GOPATH\")\n copy_files = [\n \"/opt/datadog-agent/embedded/bin/clang-bpf\",\n \"/opt/datadog-agent/embedded/bin/llc-bpf\",\n f\"{gopath}/bin/gotestsum\",\n ]\n\n files_dir = os.path.join(KITCHEN_ARTIFACT_DIR, \"..\")\n for cf in copy_files:\n if os.path.exists(cf):\n shutil.copy(cf, files_dir)\n\n if not ci:\n kitchen_prepare_btfs(ctx, files_dir)\n\n ctx.run(f\"go build -o {files_dir}/test2json -ldflags=\\\"-s -w\\\" cmd/test2json\", env={\"CGO_ENABLED\": \"0\"})\n ctx.run(f\"echo $(git rev-parse HEAD) > {BUILD_COMMIT}\")", "def __setup(self):\n\n build_environment = []\n\n # The download URL has the format contains vMAJOR.MINOR in the\n # path and the tarball contains MAJOR.MINOR.REVISION, so pull\n # apart the full version to get the MAJOR and MINOR components.\n match = re.match(r'(?P<major>\\d+)\\.(?P<minor>\\d+)', self.version)\n major_minor = 'v{0}.{1}'.format(match.groupdict()['major'],\n match.groupdict()['minor'])\n tarball = 'openmpi-{}.tar.bz2'.format(self.version)\n url = '{0}/{1}/downloads/{2}'.format(self.baseurl, major_minor,\n tarball)\n\n # CUDA\n if self.cuda:\n if self.__toolchain.CUDA_HOME:\n self.configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n self.configure_opts.append('--with-cuda')\n else:\n self.configure_opts.append('--without-cuda')\n\n # InfiniBand\n if self.infiniband:\n self.configure_opts.append('--with-verbs')\n else:\n self.configure_opts.append('--without-verbs')\n\n # UCX\n if self.__ucx:\n if isinstance(self.__ucx, string_types):\n # Use specified path\n self.configure_opts.append('--with-ucx={}'.format(self.__ucx))\n else:\n self.configure_opts.append('--with-ucx')\n\n # If UCX was built with CUDA support, it is linked with\n # libcuda.so.1, which is not available during the\n # build stage. Assume that if OpenMPI is built with\n # CUDA support, then UCX was as well...\n if self.cuda:\n cuda_home = \"/usr/local/cuda\"\n if self.__toolchain.CUDA_HOME:\n cuda_home = self.__toolchain.CUDA_HOME\n self.__commands.append('ln -s {0} {1}'.format(\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so'),\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so.1')))\n if not self.__toolchain.LD_LIBRARY_PATH:\n build_environment.append('LD_LIBRARY_PATH=\"{}:$LD_LIBRARY_PATH\"'.format(os.path.join(cuda_home, 'lib64', 'stubs')))\n\n if self.directory:\n # Use source from local build context\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd, self.directory),\n toolchain=self.__toolchain))\n else:\n # Download source from web\n self.__commands.append(self.download_step(url=url,\n directory=self.__wd))\n self.__commands.append(self.untar_step(\n tarball=os.path.join(self.__wd, tarball), directory=self.__wd))\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version)),\n environment=build_environment,\n toolchain=self.__toolchain))\n\n self.__commands.append(self.build_step())\n\n if self.__check:\n self.__commands.append(self.check_step())\n\n self.__commands.append(self.install_step())\n\n # Set library path\n libpath = os.path.join(self.prefix, 'lib')\n if self.ldconfig:\n self.__commands.append(self.ldcache_step(directory=libpath))\n else:\n self.__environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)\n\n if self.directory:\n # Using source from local build context, cleanup directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, self.directory)]))\n else:\n # Using downloaded source, cleanup tarball and directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, tarball),\n os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version))]))", "def develop():\n# Install package in development mode\n sh('python setup.py develop')", "def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline", "def miktex_install_deps():\n raise NotImplementedError", "def bootstrap():\n validate_configurator_version()\n\n # put new mkinitcpio.conf in place\n run(\"mv /etc/mkinitcpio.conf.pacnew /etc/mkinitcpio.conf\")\n sed(\"/etc/mkinitcpio.conf\",\n 'MODULES=\"\"',\n 'MODULES=\"xen-blkfront xen-fbfront xen-kbdfront xen-netfront xen-pcifront xenbus_probe_frontend xenfs\"') # nopep8\n sed(\"/etc/mkinitcpio.conf\",\n 'HOOKS=\"base udev autodetect modconf block filesystems keyboard fsck',\n 'HOOKS=\"base udev block filesystems shutdown autodetect\"')\n\n # upgrade pacakges\n run(\"pacman --noconfirm -Syu\")\n\n # put new pacman.conf in place\n run(\"mv /etc/pacman.conf.pacnew /etc/pacman.conf\")\n\n # install essential packages\n run(\"pacman --noconfirm -S base-devel\")\n run(\"pacman --noconfirm -S curl git rsync\")\n\n # create a user, named 'aur', to safely install AUR packages under fakeroot\n # uid and gid values auto increment from 1000\n # to prevent conficts set the 'aur' user's gid and uid to 902\n run(\"groupadd -g 902 aur && useradd -m -u 902 -g 902 -G wheel aur\")\n\n # allow users in the wheel group to sudo without a password\n uncomment(\"/etc/sudoers\", \"wheel.*NOPASSWD\")\n\n # install yaourt and upgrade non-pacman rackspace installed packages\n sudo(\"rm -rf /home/aur/.builds && mkdir /home/aur/.builds/\", user=\"aur\")\n with cd(\"/home/aur/.builds/\"):\n sudo(\"bash <(curl aur.sh) -si --noconfirm package-query yaourt\", user=\"aur\")\n sudo(\"yaourt --noconfirm -S xe-guest-utilities\", user=\"aur\")\n\n # allow fabric to sftp with contrib.files.put\n # http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed # nopep8\n # change before reboot because then the sshd config will be reloaded\n # sed(\"/etc/ssh/sshd_config\", \"Subsystem sftp /usr/lib/openssh/sftp-server\",\n # \"Subsystem sftp internal-sftp\")\n\n # systemd\n sed(\"/boot/grub/menu.lst\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0\",\n \"kernel /boot/vmlinuz-linux root=/dev/xvda1 ro console=hvc0 init=/usr/lib/systemd/systemd\")\n reboot()\n if not contains(\"/proc/1/comm\", \"systemd\"):\n abort(\"systemd is not installed properly\")\n server = [s for s in env.bootmachine_servers if s.public_ip == env.host][0]\n run(\"hostnamectl set-hostname {0}\".format(server.name))\n run(\"mv /etc/locale.gen.pacnew /etc/locale.gen.conf\")\n uncomment(\"/etc/locale.gen\", \"en_US.UTF-8 UTF-8\")\n uncomment(\"/etc/locale.gen\", \"en_US ISO-8859-1\")\n run(\"locale-gen\")\n run(\"localectl set-locale LANG='en_US.utf8'\")\n run(\"timedatectl set-timezone US/Central\")", "def train_entry_point():", "def bootstrap():\n _require_environment()\n\n adduser()\n install_python()\n install_git()\n install_apache()\n install_mysql()\n setup_project()", "def main(\n input_dir: Path = typer.Argument(..., exists=True),\n output_dir: Path = typer.Argument(...),\n beth_train_tar_name: str = \"i2b2_Beth_Train_Release.tar.gz\",\n partners_train_tar_name: str = \"i2b2_Partners_Train_Release.tar.gz\",\n test_zip_name: str = \"Task_1C.zip\",\n merge_docs: bool = True,\n):\n # Unpack compressed data files\n msg.info(\"Extracting raw data.\")\n beth_train_tar_path = input_dir / beth_train_tar_name\n partners_train_tar_path = input_dir / partners_train_tar_name\n test_zip_path = input_dir / test_zip_name\n\n for path in [beth_train_tar_path, partners_train_tar_path]:\n if path.name.endswith(\"tar.gz\"):\n msg.text(f\"Extracting {path}\")\n tar = tarfile.open(path, \"r:gz\")\n tar.extractall(path.parent)\n tar.close()\n\n shutil.unpack_archive(test_zip_path, input_dir / test_zip_name.replace(\".zip\", \"\"))\n\n # preprocess data\n msg.info(\"Converting to spaCy Doc objects.\")\n beth_train_docs = docs_from_many_clinical_records(\n input_dir / \"Beth_Train\", merge_docs=merge_docs\n )\n partners_train_docs = docs_from_many_clinical_records(\n input_dir / \"Partners_Train\", merge_docs=merge_docs\n )\n train_docs = beth_train_docs + partners_train_docs\n\n beth_test_docs = docs_from_many_clinical_records(\n input_dir / \"Task_1C/i2b2_Test/i2b2_Beth_Test\", merge_docs=merge_docs\n )\n partners_test_docs = docs_from_many_clinical_records(\n input_dir / \"Task_1C/i2b2_Test/i2b2_Partners_Test\", merge_docs=merge_docs\n )\n test_docs = beth_test_docs + partners_test_docs\n\n random.shuffle(train_docs)\n split_idx = int(len(train_docs) * 0.8)\n train_docs, dev_docs = train_docs[:split_idx], train_docs[split_idx:]\n\n msg.good(f\"Num Train Docs: {len(train_docs)}\")\n msg.good(f\"Num Dev Docs: {len(dev_docs)}\")\n msg.good(f\"Num Test Docs: {len(test_docs)}\")\n\n with msg.loading(f\"Saving docs to: {output_dir}...\"):\n DocBin(docs=train_docs).to_disk(output_dir / \"train.spacy\")\n DocBin(docs=dev_docs).to_disk(output_dir / \"dev.spacy\")\n DocBin(docs=test_docs).to_disk(output_dir / \"test.spacy\")\n msg.good(\"Done.\")", "def build(ctx):\n ctx.run(\"vsce package\", replace_env=False)", "def _setup_test_infra(world_rank, world_size):\n os.environ['RANK'] = str(world_rank)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29500'\n\n set_cuda_device_id(world_rank)\n\n dist.init_process_group(backend='nccl', world_size=world_size, rank=world_rank)", "def session_system_dataproc(session, py):\n session.interpreter = 'python{}'.format(py)\n session.virtualenv_dirname = 'system-dataproc-{}'.format(py)\n\n session.install('pytest', 'pytest-cov', 'mock', 'numpy')\n\n try:\n import pyspark\n except:\n raise RuntimeError(\"Please install pyspark and spark clusters to run \"\n \"tests\")\n\n # setups environment to be able to see Spark cluster\n session.env = {'PYTHONPATH': (':./'\n ':/usr/local/spark/python'\n ':/usr/local/spark/python/lib/py4j-0.10.4-src.zip')}\n\n session.run(\n 'py.test',\n 'tests/system/dataproc/',\n '--cov=.',\n '--cov-config=.coveragerc',\n '--cov-report=html')", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def build_experiment():\n\n # ====== Argument creation ======\n model_dir = FLAGS.model_dir.format(\n cell=FLAGS.cell,\n user=getpass.getuser(),\n trial=FLAGS.trial,\n )\n\n # ====== Jobs and runtime creation ======\n\n # Job: worker\n requirements = xm.Requirements(gpu_types=[xm.GpuType.V100],)\n runtime_worker = xm.Borg(\n cell=FLAGS.cell,\n priority=FLAGS.priority,\n requirements=requirements,\n )\n exec_worker = xm.BuildTarget(\n '//experimental/users/gjt/his:mnist',\n name='worker',\n args=dict(\n gfs_user=FLAGS.gfs_user,\n logdir=model_dir,\n mode='train',\n ),\n platform=xm.Platform.GPU,\n runtime=runtime_worker,\n )\n\n # Job: eval\n runtime_eval = xm.Borg(\n cell=FLAGS.cell,\n priority=FLAGS.priority,\n )\n exec_eval = xm.BuildTarget(\n '//experimental/users/gjt/his:mnist',\n name='eval',\n args=dict(\n gfs_user=FLAGS.gfs_user,\n logdir=model_dir,\n mode='eval',\n split='train,valid,test',\n num_iwae_samples='1,1,1000',\n ),\n platform=xm.Platform.GPU, # Do we need GPU for eval?\n runtime=runtime_eval,\n )\n\n # ====== Executable experiment creation ======\n list_executables = []\n list_executables.append(xm_helper.build_single_job(exec_worker))\n list_executables.append(xm_helper.build_single_job(exec_eval))\n\n experiment = xm.ParallelExecutable(list_executables, name='his_service')\n\n # Build experiments\n hyper_parameters = {}\n\n # SNIS vs LARS\n hyper_parameters['snis_vs_lars'] = hyper.product([\n hyper.chainit([\n hyper.product([\n hyper.fixed('proposal', 'gaussian', length=1),\n hyper.fixed('model', 'bernoulli_vae', length=1),\n ]),\n hyper.product([\n hyper.fixed('proposal', 'bernoulli_vae', length=1),\n hyper.fixed('model', 'nis', length=1),\n ]),\n hyper.product([\n hyper.fixed('proposal', 'nis', length=1),\n hyper.fixed('model', 'bernoulli_vae', length=1),\n ]),\n ]),\n hyper.sweep('run', hyper.discrete([0])),\n hyper.fixed('dataset', 'static_mnist', length=1),\n hyper.fixed('reparameterize_proposal', False, length=1),\n hyper.fixed('anneal_kl_step', 100000, length=1),\n ])\n\n # Continuous comparisons: HIS, NIS, VAE\n hyper_parameters['continuous'] = hyper.product([\n hyper.chainit([\n hyper.product([\n hyper.fixed('proposal', 'gaussian', length=1),\n hyper.fixed('model', 'gaussian_vae', length=1),\n ]),\n hyper.product([\n hyper.fixed('proposal', 'gaussian_vae', length=1),\n hyper.fixed('model', 'nis', length=1),\n ]),\n hyper.product([\n hyper.fixed('proposal', 'gaussian', length=1),\n hyper.fixed('model', 'hisvae', length=1),\n hyper.sweep('his_T', hyper.discrete([5, 10, 15])),\n ]),\n ]),\n hyper.sweep('run', hyper.discrete([0])),\n hyper.fixed('dataset', 'jittered_mnist', length=1),\n hyper.fixed('reparameterize_proposal', True, length=1),\n hyper.fixed('squash', True, length=1),\n ])\n\n hyper_parameters['celeba'] = hyper.product([\n hyper.chainit([\n hyper.product([\n hyper.fixed('proposal', 'gaussian', length=1),\n hyper.fixed('model', 'conv_gaussian_vae', length=1),\n ]),\n ]),\n hyper.sweep('run', hyper.discrete([0])),\n hyper.fixed('dataset', 'jittered_celeba', length=1),\n hyper.fixed('reparameterize_proposal', True, length=1),\n hyper.fixed('squash', True, length=1),\n hyper.fixed('latent_dim', 16, length=1),\n hyper.fixed('batch_size', 36, length=1),\n ])\n\n experiment = xm.ParameterSweep(experiment, hyper_parameters[FLAGS.exp_type])\n experiment = xm.WithTensorBoard(experiment, model_dir)\n\n return experiment", "def conda_install_requirements(venv):\n # Upload the requirements file.\n put(utils.files('requirements', 'base.txt'), utils.home('base.txt'))\n put(utils.files('requirements', 'prod.txt'), utils.home('prod.txt'))\n\n # Activate the virtual environment.\n activate = '{0}/bin/activate'.format(utils.home('apps', 'miniconda'))\n\n with prefix('source {activate} {venv}'.format(venv=venv, activate=activate)):\n run('pip install -r {0}'.format(utils.home('prod.txt')))\n\n # Remove the uploaded files.\n with cd(utils.home()):\n run('rm {0}'.format(utils.home('base.txt')))\n run('rm {0}'.format(utils.home('prod.txt')))", "def bootstrap(help_triggered):\n\n # If the user is asking for help, let them know that the whole download-and-build\n # process has to happen before anything is printed out.\n if help_triggered:\n print(\"info: Downloading and building bootstrap before processing --help\")\n print(\" command. See src/bootstrap/README.md for help with common\")\n print(\" commands.\")\n\n parser = argparse.ArgumentParser(description='Build rust')\n parser.add_argument('--config')\n parser.add_argument('--build')\n parser.add_argument('--src')\n parser.add_argument('--clean', action='store_true')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n\n args = [a for a in sys.argv if a != '-h' and a != '--help']\n args, _ = parser.parse_known_args(args)\n\n # Configure initial bootstrap\n build = RustBuild()\n build.rust_root = args.src or os.path.abspath(os.path.join(__file__, '../../..'))\n build.verbose = args.verbose\n build.clean = args.clean\n\n try:\n with open(args.config or 'config.toml') as config:\n build.config_toml = config.read()\n except (OSError, IOError):\n pass\n\n config_verbose = build.get_toml('verbose', 'build')\n if config_verbose is not None:\n build.verbose = max(build.verbose, int(config_verbose))\n\n build.use_vendored_sources = build.get_toml('vendor', 'build') == 'true'\n\n build.use_locked_deps = build.get_toml('locked-deps', 'build') == 'true'\n\n build.check_vendored_status()\n\n data = stage0_data(build.rust_root)\n build.date = data['date']\n build.rustc_channel = data['rustc']\n build.cargo_channel = data['cargo']\n\n if 'dev' in data:\n build.set_dev_environment()\n else:\n build.set_normal_environment()\n\n build.update_submodules()\n\n # Fetch/build the bootstrap\n build.build = args.build or build.build_triple()\n build.download_stage0()\n sys.stdout.flush()\n build.ensure_vendored()\n build.build_bootstrap()\n sys.stdout.flush()\n\n # Run the bootstrap\n args = [build.bootstrap_binary()]\n args.extend(sys.argv[1:])\n env = os.environ.copy()\n env[\"BUILD\"] = build.build\n env[\"SRC\"] = build.rust_root\n env[\"BOOTSTRAP_PARENT_ID\"] = str(os.getpid())\n env[\"BOOTSTRAP_PYTHON\"] = sys.executable\n env[\"BUILD_DIR\"] = build.build_dir\n env[\"RUSTC_BOOTSTRAP\"] = '1'\n env[\"CARGO\"] = build.cargo()\n env[\"RUSTC\"] = build.rustc()\n run(args, env=env, verbose=build.verbose)", "def install():\n\n if (Path.cwd() / \"src\" / \"environment.yml\").is_file():\n call([\"conda\", \"install\", \"--file\", \"src/environment.yml\", \"--yes\"])\n\n pip_command = [\"install\", \"-U\", \"-r\", \"src/requirements.txt\"]\n\n if os.name == \"posix\":\n python_call(\"pip\", pip_command)\n else:\n command = [sys.executable, \"-m\", \"pip\"] + pip_command\n subprocess.Popen(command, creationflags=subprocess.CREATE_NEW_CONSOLE)", "def setup(self):\n osr_split_path = os.path.join(\n self.data_root, \"imagenet_osr_splits_winter21.pkl\"\n )\n if not os.path.exists(osr_split_path):\n os.makedirs(self.data_root, exist_ok=True)\n osr_split = requests.get(self.OSR_URL)\n open(osr_split_path, \"wb\").write(osr_split.content)\n else:\n with open(osr_split_path, \"rb\") as f:\n osr_split = pickle.load(f)\n # Ensure data is downloaded\n assert_data_downloaded(\n osr_split, shifthappens.config.imagenet21k_preprocessed_validation_path\n )\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = _get_imagenet_ssb_subset(\n imagenet21k_root=shifthappens.config.imagenet21k_preprocessed_validation_path,\n osr_split=osr_split,\n test_transform=test_transform,\n subset_type=self.subset_type,\n )\n\n self.dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )", "def main(_config, _run):\n sacred.commands.print_config(_run)\n dump_config_and_makefile()\n prepare_and_train()", "def conda_dependencies(self):\n raise NotImplementedError", "def required():\n pip = path(\"bin/pip\")\n if not pip.exists():\n sh('%s install -E tg2env -r normal-reqs.txt --extra-index-url=http://www.turbogears.org/2.0/downloads/current/index' % pip)\n call_pavement('pavement.py', 'develop')", "def restore(c):\n c.run('pip install -r tests/requirements.txt')", "def setup(c):\n c.run('nox --envdir .')", "def complete_env() -> Python:\n return Python([\n 'click==0.0.1',\n 'googleapis-common-protos==0.0.1',\n 'numpy==0.0.1',\n 'pandas==0.0.1',\n 'Pillow==0.0.1',\n 'requests==0.0.1',\n 'scikit-learn==0.0.1',\n 'torch==0.0.1',\n 'urllib3==0.0.1',\n 'PyYAML==0.0.1',\n ]) # `verta` and `cloudpickle` included by default", "def runSlicer(slicer_executable, arguments=[], verbose=True, **kwargs):\r\n args = ['--no-splash']\r\n args.extend(arguments)\r\n return run(slicer_executable, args, verbose, **kwargs)", "def bootstrap(): # pragma: no cover, exercised via test_bootstrap() functional test\n pspec = PackageSpec(CFG, \"%s==%s\" % (PICKLEY, __version__))\n grand_parent = runez.parent_folder(runez.parent_folder(__file__))\n if grand_parent and grand_parent.endswith(\".whl\"):\n # We are indeed running from pex\n setup_audit_log()\n python = CFG.find_python(\"/usr/bin/python3\") # Prefer system py3, for stability\n if not python or python.problem:\n python = pspec.python\n\n LOG.debug(\"Bootstrapping pickley %s with %s (re-installing as venv instead of pex package)\" % (pspec.version, python))\n target = pspec.install_path\n venv = PythonVenv(target, python, pspec.index)\n venv.pip_install(\"wheel\")\n with runez.TempFolder():\n venv.run_python(\"-mwheel\", \"pack\", grand_parent)\n names = os.listdir(\".\")\n assert len(names) == 1\n venv.pip_install(names[0])\n\n delivery = DeliveryMethod.delivery_method_by_name(pspec.settings.delivery)\n return delivery.install(pspec, venv, {PICKLEY: \"bootstrapped\"})\n\n else:\n manifest = pspec.get_manifest()\n if not manifest:\n # We're not running from pex, but we need to re-install pickley with latest version, so it gets a manifest etc\n return perform_install(pspec, is_upgrade=False, quiet=False)", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def __init__(self, package, version='', repo=''):\n PackageRequirement.__init__(self, 'conda', package, version, repo)", "def check_sru_requirement(abort=False):\n\n # Check 1.\n try:\n if platform.system() == 'Windows':\n subprocess.check_output('pip freeze | findstr cupy', shell=True)\n subprocess.check_output('pip freeze | findstr pynvrtc',\n shell=True)\n else: # Unix-like systems\n subprocess.check_output('pip freeze | grep -w cupy', shell=True)\n subprocess.check_output('pip freeze | grep -w pynvrtc',\n shell=True)\n except subprocess.CalledProcessError:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires 'cupy' and 'pynvrtc' \"\n \"python packages installed.\")\n\n # Check 2.\n if torch.cuda.is_available() is False:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires pytorch built with cuda.\")\n\n # Check 3.\n pattern = re.compile(\".*cuda/lib.*\")\n ld_path = os.getenv('LD_LIBRARY_PATH', \"\")\n if re.match(pattern, ld_path) is None:\n if not abort:\n return False\n raise AssertionError(\"Using SRU requires setting cuda lib path, e.g. \"\n \"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.\")\n\n return True", "def __init__(__self__, *,\n aad_profile: Optional[pulumi.Input['AADProfileArgs']] = None,\n addon_profiles: Optional[pulumi.Input[Mapping[str, pulumi.Input['AddonProfilesArgs']]]] = None,\n agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['NamedAgentPoolProfileArgs']]]] = None,\n cloud_provider_profile: Optional[pulumi.Input['CloudProviderProfileArgs']] = None,\n control_plane: Optional[pulumi.Input['ControlPlaneProfileArgs']] = None,\n enable_rbac: Optional[pulumi.Input[bool]] = None,\n features: Optional[pulumi.Input['ProvisionedClustersCommonPropertiesFeaturesArgs']] = None,\n http_proxy_config: Optional[pulumi.Input['HttpProxyConfigArgs']] = None,\n kubernetes_version: Optional[pulumi.Input[str]] = None,\n linux_profile: Optional[pulumi.Input['LinuxProfilePropertiesArgs']] = None,\n network_profile: Optional[pulumi.Input['NetworkProfileArgs']] = None,\n node_resource_group: Optional[pulumi.Input[str]] = None,\n windows_profile: Optional[pulumi.Input['WindowsProfileArgs']] = None):\n if aad_profile is not None:\n pulumi.set(__self__, \"aad_profile\", aad_profile)\n if addon_profiles is not None:\n pulumi.set(__self__, \"addon_profiles\", addon_profiles)\n if agent_pool_profiles is not None:\n pulumi.set(__self__, \"agent_pool_profiles\", agent_pool_profiles)\n if cloud_provider_profile is not None:\n pulumi.set(__self__, \"cloud_provider_profile\", cloud_provider_profile)\n if control_plane is not None:\n pulumi.set(__self__, \"control_plane\", control_plane)\n if enable_rbac is not None:\n pulumi.set(__self__, \"enable_rbac\", enable_rbac)\n if features is not None:\n pulumi.set(__self__, \"features\", features)\n if http_proxy_config is not None:\n pulumi.set(__self__, \"http_proxy_config\", http_proxy_config)\n if kubernetes_version is not None:\n pulumi.set(__self__, \"kubernetes_version\", kubernetes_version)\n if linux_profile is not None:\n pulumi.set(__self__, \"linux_profile\", linux_profile)\n if network_profile is not None:\n pulumi.set(__self__, \"network_profile\", network_profile)\n if node_resource_group is not None:\n pulumi.set(__self__, \"node_resource_group\", node_resource_group)\n if windows_profile is not None:\n pulumi.set(__self__, \"windows_profile\", windows_profile)", "def test_get_software_bundles(self):\n pass", "def update_requirements():\n\n require('code_root', provided_by=env.environments)\n requirements = os.path.join(env.code_root, 'requirements')\n sdists = os.path.join(requirements, 'sdists')\n base_cmd = ['pip install']\n base_cmd += ['-q -E %(virtualenv_root)s' % env]\n base_cmd += ['--no-index --find-links=file://%s' % sdists]\n # install GDAL by hand, before anything else that might depend on it\n cmd = base_cmd + ['--no-install \"GDAL==1.6.1\"']\n sudo(' '.join(cmd), user=env.deploy_user)\n # this directory won't exist if GDAL was already installed\n if files.exists('%(virtualenv_root)s/build/GDAL' % env):\n sudo('rm -f %(virtualenv_root)s/build/GDAL/setup.cfg' % env, user=env.deploy_user)\n with cd('%(virtualenv_root)s/build/GDAL' % env):\n sudo('%(virtualenv_root)s/bin/python setup.py build_ext '\n '--gdal-config=gdal-config '\n '--library-dirs=/usr/lib '\n '--libraries=gdal1.6.0 '\n '--include-dirs=/usr/include/gdal '\n 'install' % env, user=env.deploy_user)\n # force reinstallation of OpenBlock every time\n with settings(warn_only=True):\n sudo('pip uninstall -y -E %(virtualenv_root)s ebpub ebdata obadmin' % env)\n for file_name in ['ebpub.txt', 'ebdata.txt', 'obadmin.txt', 'openrural.txt']:\n apps = os.path.join(requirements, file_name)\n cmd = base_cmd + ['--requirement %s' % apps]\n sudo(' '.join(cmd), user=env.deploy_user)", "def dataflow():\n print 'Building',TRAINER_NAME,'package.'\n subprocess.check_call(['python', 'setup.py', 'sdist', '--format=gztar'])\n subprocess.check_call(['gsutil', '-q', 'cp',\n os.path.join('dist', TRAINER_NAME),\n TRAINER_URI])\n opts = None\n if args.cloud:\n options = {\n 'staging_location': os.path.join(args.output_dir, 'tmp', 'staging'),\n 'temp_location': os.path.join(args.output_dir, 'tmp'),\n 'job_name': ('cloud-ml-sample-iris' + '-'\n + datetime.datetime.now().strftime('%Y%m%d%H%M%S')),\n 'project': args.project_id,\n # Dataflow needs a copy of the version of the cloud ml sdk that\n # is being used.\n 'extra_packages': [ml.sdk_location, TRAINER_URI],\n 'teardown_policy': 'TEARDOWN_ALWAYS',\n 'no_save_main_session': True\n }\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n else:\n # For local runs, the trainer must be installed as a module.\n subprocess.check_call(['pip', 'install', '--upgrade', '--force-reinstall',\n '--user', os.path.join('dist', TRAINER_NAME)])\n\n p = beam.Pipeline(get_pipeline_name(), options=opts)\n\n # Every function below writes its ouput to a file. The inputs to these\n # functions are also optional; if they are missing, the input values are read\n # from a file. Therefore if running this script multiple times, some steps can\n # be removed to prevent recomputing values.\n metadata, train_features, eval_features, predict_features = preprocess(p)\n\n trained_model, results = train(p, train_features, eval_features, metadata)\n\n evaluations = evaluate(p, trained_model, eval_features)\n\n confusion_matrix, precision_recall, logloss = (\n model_analysis(p, evaluations, metadata))\n\n if args.cloud:\n deployed = deploy_model(p, args.deploy_model_name,\n args.deploy_model_version, trained_model)\n # Use our deployed model to run a batch prediction.\n output_uri = os.path.join(args.output_dir, 'batch_prediction_results')\n deployed | \"Batch Predict\" >> ml.Predict([args.predict_data], output_uri,\n region='us-central1',\n data_format='TEXT')\n\n print 'Deploying %s version: %s' % (args.deploy_model_name,\n args.deploy_model_version)\n\n p.run()\n\n if args.cloud:\n print 'Deployed %s version: %s' % (args.deploy_model_name,\n args.deploy_model_version)", "def main():\n # This have specific paths to prevent abitrary binaries from being\n # executed. The \"gsi\"* utilities are configured to use either grid proxies\n # or ssh, automatically.\n remoteLoginCmd = \"/usr/bin/gsissh\"\n remoteCopyCmd = \"/usr/bin/gsiscp\"\n\n UNKNOWN_PLATFORM_EXIT_CODE = 10\n MISSING_PBS_CONFIG_EXIT_CODE = 20\n\n p = AllocatorParser(sys.argv[0])\n platform = p.getPlatform()\n\n creator = Allocator(platform, p.getArgs(), \"$HOME/.lsst/condor-info.py\")\n\n platformPkgDir = lsst.utils.getPackageDir(\"ctrl_platform_\"+platform)\n configName = os.path.join(platformPkgDir, \"etc\", \"config\", \"pbsConfig.py\")\n execConfigName = os.path.join(platformPkgDir, \"etc\", \"config\", \"execConfig.py\")\n\n creator.load(execConfigName)\n\n creator.loadPbs(configName)\n\n verbose = creator.isVerbose()\n \n pbsName = os.path.join(platformPkgDir, \"etc\", \"templates\", \"generic.pbs.template\")\n generatedPbsFile = creator.createPbsFile(pbsName)\n\n condorFile = os.path.join(platformPkgDir, \"etc\", \"templates\", \"glidein_condor_config.template\")\n generatedCondorConfigFile = creator.createCondorConfigFile(condorFile)\n\n scratchDirParam = creator.getScratchDirectory()\n template = Template(scratchDirParam)\n scratchDir = template.substitute(USER_HOME=creator.getUserHome())\n userName = creator.getUserName()\n \n hostName = creator.getHostName()\n\n utilityPath = creator.getUtilityPath()\n\n #\n # execute copy of PBS file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedPbsFile, userName, hostName, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute copy of Condor config file to XSEDE node\n #\n cmd = \"%s %s %s@%s:%s/%s\" % (remoteCopyCmd, generatedCondorConfigFile, userName, hostName, scratchDir, os.path.basename(generatedCondorConfigFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteCopyCmd, hostName)\n sys.exit(exitCode)\n\n #\n # execute qsub command on XSEDE node to perform Condor glide-in\n #\n cmd = \"%s %s@%s %s/qsub %s/%s\" % (remoteLoginCmd, userName, hostName, utilityPath, scratchDir, os.path.basename(generatedPbsFile))\n if verbose:\n print cmd\n exitCode = runCommand(cmd, verbose)\n if exitCode != 0:\n print \"error running %s to %s.\" % (remoteLoginCmd, hostName)\n sys.exit(exitCode)\n\n nodes = creator.getNodes()\n slots = creator.getSlots()\n wallClock = creator.getWallClock()\n nodeString = \"\"\n if int(nodes) > 1:\n nodeString = \"s\"\n print \"%s node%s will be allocated on %s with %s slots per node and maximum time limit of %s\" % (nodes, nodeString, platform, slots, wallClock)\n print \"Node set name:\"\n print creator.getNodeSetName()\n sys.exit(0)", "def _resolve_kernel_deps(self):\n for nb in self.notebooks:\n for i, dep in enumerate(nb.kernel_sources):\n if dep.endswith('.ipynb'):\n referent = self.get_notebook(dep)\n nb.kernel_sources[i] = referent.slug", "def setup_lib(CLIB):\n # {{ SETUP_LIB }}", "def init_bel():\n # Pipeline before CCA\n X_pre_processing = Pipeline(\n [\n (\"scaler\", StandardScaler(with_mean=False)),\n (\"pca\", PCA()),\n ]\n )\n Y_pre_processing = Pipeline(\n [\n (\"scaler\", StandardScaler(with_mean=False)),\n (\"pca\", PCA()),\n ]\n )\n\n # Canonical Correlation Analysis\n # Number of CCA components is chosen as the min number of PC\n n_pc_pred, n_pc_targ = (\n 50,\n 30,\n )\n cca = CCA(n_components=min(n_pc_targ, n_pc_pred), max_iter=500 * 20, tol=1e-6)\n\n # Pipeline after CCA\n X_post_processing = Pipeline(\n [(\"normalizer\", PowerTransformer(method=\"yeo-johnson\", standardize=True))]\n )\n Y_post_processing = Pipeline(\n [(\"normalizer\", PowerTransformer(method=\"yeo-johnson\", standardize=True))]\n )\n\n # Initiate BEL object\n bel_model = BEL(\n X_pre_processing=X_pre_processing,\n X_post_processing=X_post_processing,\n Y_pre_processing=Y_pre_processing,\n Y_post_processing=Y_post_processing,\n cca=cca,\n )\n\n # Set PC cut\n bel_model.X_n_pc = n_pc_pred\n bel_model.Y_n_pc = n_pc_targ\n\n return bel_model", "def setup_ooxcb():\n import ooxcb.contrib.icccm\n import ooxcb.contrib.ewmh\n\n ooxcb.contrib.icccm.mixin()\n ooxcb.contrib.ewmh.mixin()", "def launch_analysis_v2():\n\n # add explicit instructions for user\n\n os.system(\"pip install -r requirements.txt\")\n os.chdir(f'{os.getcwd()}/gui')\n\n # explicit version checking\n if os.system(\"node -v\") != 0:\n print(\"Please install node before proceeding.\")\n exit(-1)\n\n if os.system(\"npm install\") != 0:\n print(\"Could not install npm packages. \")\n\n os.system(\"npm run start-backend &\")\n os.system(\"npm start\")", "def main(args):\n\n # load dataset\n with open(args.infile, 'rb') as fin:\n x_train, y_train, x_test, y_test = pickle.load(fin)\n\n y_train = y_train.astype('int64')\n y_test = y_test.astype('int64')\n\n random_index = list(range(len(x_train)))\n random.shuffle(random_index)\n x_train = np.array(x_train[random_index])\n y_train = np.array(y_train[random_index])\n\n # y_train = y_train.astype(bool).astype(int)\n # y_test = y_test.astype(bool).astype(int)\n\n # combined different features\n feature_extractors = [\n # ('general', MyScaler(False)),\n # ('wordcount', MyCountVectorizer(ngram_range=(1, 1), stop_words='english')),\n ('tfidf', MyTfidfVectorizer(stop_words='english')),\n ]\n combined_feature = FeatureUnion(feature_extractors)\n\n estimators = [('feature', combined_feature),\n ('clf', svm.LinearSVC(C=0.3))]\n pipeline = Pipeline(estimators)\n\n # pipeline.fit(x_train, y_train)\n # print(pipeline.score(x_test, y_test))\n\n # parameters to search\n param_grid = [\n {\n 'clf': [MultinomialNB()],\n 'clf__alpha': [10, 1.0, 0.1, 0.01],\n },\n {\n 'clf': [svm.LinearSVC()],\n 'clf__C': [3, 1, 0.3, 0.1],\n },\n ]\n\n # start training\n t0 = time.time()\n grid = GridSearchCV(pipeline, param_grid=param_grid, verbose=4, n_jobs=4)\n grid.fit(x_train, y_train)\n\n print()\n print('done in %.2f seconds' % (time.time() - t0))\n print()\n print('train accuracy: %.2f%%' % (100 * grid.score(x_train, y_train)))\n print('test accuracy: %.2f%%' % (100 * grid.score(x_test, y_test)))\n print()\n print('the best parameters are:', grid.best_params_)\n print()\n print('confusion matrix:')\n print(metrics.confusion_matrix(y_test, grid.predict(x_test)))", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.svm\n self.model = sklearn.svm.NuSVR", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.svm\n self.model = sklearn.svm.LinearSVR", "def make_pipeline(context):\n \n # Base universe set to the Q1500US\n base_universe = Q500US()\n \n #Get all industry codes\n industry=morningstar.asset_classification.morningstar_industry_code.latest\n #Get all sector codes\n sector = Sector()\n \n # Create filters (to be used as masks) of different industries/sectors \n # This is the mask that should exclude the most stocks. \n # Note that these may need to be even further filtered to exclude securities outside of a \n # similar range of volumes/size. For instance, the defense sector stock provides stocks as large as # LMT but also small defense companies. Although this shouldn't matter due to the second filter of \n # crosscorrelation, this may be unnecassary computational expense. \n pipe=Pipeline()\n #Below forms a \"sentiment screen\" that takes only stocks that have been rated a certain number of times and of those ratings there are at least 2.85 times as many bull scored messages as there are bear scored messages. \n pipe.add(st.bull_scored_messages .latest, 'bull_scored_messages')\n pipe.add(st.bear_scored_messages .latest, 'bear_scored_messages')\n sentimentScreen=(((st.bull_scored_messages.latest) > (context.Sentiment_multiplier*st.bear_scored_messages.latest)) & (st.bear_scored_messages.latest > 5))\n \n dFilt=sector.eq(310) #Indicates aerospace/defense sector\n dFilt2=industry.eq(31052107) #Indicates aerospace/defense industry\n tFilt=sector.eq(311) #Indicates consumer electronics sector\n tFilt2=industry.eq(31167138) #Indicates consumer electronics industry \n cFilt=sector.eq(101) #Chemical sector\n cFilt2=industry.eq(10103003)\n aFilt=sector.eq(102)\n aFilt2=industry.eq(10209017) #Auto manufacturing industry\n depFilt2=industry.eq(10217034) #Department store industry\n #dFilt2,tFilt2,cFilt2,aFilt2=True,True,True,True #Remove industry requirement\n defenseFilt= dFilt & dFilt2 #Combination of filters\n techFilt= tFilt & tFilt2\n chemFilt = cFilt & cFilt2 \n autoFilt = aFilt & aFilt2 \n tradable=base_universe & (defenseFilt | techFilt | chemFilt | autoFilt | depFilt2) & sentimentScreen\n \n \n pipe.set_screen(tradable)\n pipe.add(defenseFilt,'defenseFilt')\n pipe.add(techFilt,'techFilt')\n pipe.add(chemFilt,'chemFilt')\n pipe.add(autoFilt,'autoFilt')\n pipe.add(depFilt2,'depFilt')\n \n \n \n #TODO: May also want to return stock sentiment data and further filter tuple couples by only accepting couples with sentiment data in a similar range (further attributing to the validity of the calculated cross-correlation)\n \n return pipe", "def prepare_env_for_all_codes(i):\n\n # Check vars\n if 'code_deps' not in i: return {'cm_return':1, 'cm_error':'\"code_deps\" is not defined in \"code prepare_env_for_all_codes\"'}\n\n include_paths=[]\n lib_paths=[]\n\n # Load OS\n os_uoa=''\n if 'os_uoa' in i and i['os_uoa']!='': os_uoa=i['os_uoa']\n elif 'cm_default_os_uoa' in cm_kernel.ini['dcfg'] and cm_kernel.ini['dcfg']['cm_default_os_uoa']!='':\n os_uoa=cm_kernel.ini['dcfg']['cm_default_os_uoa']\n\n if os_uoa=='' not in i:\n return {'cm_return':1, 'cm_error':'\"os_uoa\" is not defined and not in kernel'}\n\n ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['os'],\n 'cm_action':'load',\n 'cm_data_uoa':os_uoa}\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n os_cfg=r['cm_data_obj']['cfg']\n os_path=r['cm_path']\n os_uid=r['cm_uid']\n os_alias=r['cm_alias']\n\n s_code_deps=''\n a_code_deps=[]\n if 'code_deps' in i:\n for xx in i['code_deps']:\n yy=xx.keys()[0]\n x=xx[yy]\n\n if x=='':\n return {'cm_return':1, 'cm_error':'dependency \"'+yy+'\" is empty, please check your input'}\n\n # Check if code was installed\n if i.get('no_strict_check','')!='yes':\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'load',\n 'cm_data_uoa':x}\n r=cm_kernel.access(ii)\n if r['cm_return']==16:\n return {'cm_return':1, 'cm_error':'dependency is not resolved - code '+x+' ('+yy+') is not installed'}\n elif r['cm_return']>0: return r\n code_cfg=r['cm_data_obj']['cfg']\n if code_cfg.get('build_finished_successfully','')!='yes':\n return {'cm_return':1, 'cm_error':'dependency is not resolved - code '+x+' ('+yy+') is not installed'}\n\n code_path=r['cm_path']\n include_paths.append(os.path.join(code_path, 'include'))\n\n if 'state_input' in code_cfg and \\\n 'run_set_env2' in code_cfg['state_input'] and \\\n 'CM_TARGET_FILE' in code_cfg['state_input']['run_set_env2']:\n lib_paths.append(os.path.join(code_path, os_cfg['lib_dir'], \n code_cfg['state_input']['run_set_env2']['CM_TARGET_FILE']))\n\n # Environment script\n r=get_env({'cm_data_uoa':x, 'os_uoa':os_uoa})\n if r['cm_return']>0: return r\n\n# z=os_cfg['env_call']+' '+os.path.join(cm_kernel.ini[cm_kernel.env_cm_bin],r['cm_string'])\n z1=os_cfg['env_set']+' '+yy+'='+os_cfg['env_quotes']+x+os_cfg['env_quotes']\n z=os_cfg['env_call']+' '+r['cm_string']\n\n if s_code_deps!='': s_code_deps+=' '+os_cfg['env_separator']+' '\n s_code_deps+=z1\n if s_code_deps!='': s_code_deps+=' '+os_cfg['env_separator']+' '\n s_code_deps+=z\n # FGG added again setting environment variable since calling other scripts can change it\n # for example, we set CM_CODE_DEP_COMPILER and then call GMP that was compiled with another\n # compiler, then it will change this variable to a wrong value and further tools will \n # not be working correctly ...\n if s_code_deps!='': s_code_deps+=' '+os_cfg['env_separator']+' '\n s_code_deps+=z1\n\n a_code_deps.append(z1)\n a_code_deps.append(z)\n a_code_deps.append(z1)\n\n return {'cm_return':0, 'cm_string':s_code_deps, 'cm_array':a_code_deps, 'env_separator': os_cfg['env_separator'],\n 'include_paths':include_paths, 'lib_paths':lib_paths}", "def setup(self):\n self.cwd = os.getcwd()\n self.t = tempfile.mkdtemp()\n dir_path = Path(\"packages\")\n tmp_dir = self.t / dir_path\n src_dir = self.cwd / Path(ROOT_DIR, dir_path)\n shutil.copytree(str(src_dir), str(tmp_dir))\n shutil.copytree(Path(CUR_PATH, \"data\", \"dummy_aea\"), Path(self.t, \"dummy_aea\"))\n os.chdir(Path(self.t, \"dummy_aea\"))\n self.runner = CliRunner()", "def test_3_full_pipeline(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"110106_FC70BUKAAXX\"),\n os.path.join(data_dir, \"run_info.yaml\")]\n subprocess.check_call(cl)", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def _setup_pipeline_cfg(self):", "def main():\n run = Run.get_context()\n try:\n work_space = run.experiment.workspace\n except AttributeError:\n interactive_auth = InteractiveLoginAuthentication(\n tenant_id=os.getenv(\"TENANT_ID\")\n )\n work_space = Workspace.from_config(auth=interactive_auth)\n environment = work_space.environments[\"train_lstm\"]\n model = Model(work_space, \"currency\")\n service_name = \"currency-service\"\n inference_config = InferenceConfig(\n entry_script=\"predict_currency.py\", environment=environment\n )\n aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)\n scaler = Model(work_space, name=\"scaler\", version=1)\n service = Model.deploy(\n workspace=work_space,\n name=service_name,\n models=[model, scaler],\n inference_config=inference_config,\n deployment_config=aci_config,\n overwrite=True,\n )\n service.wait_for_deployment(show_output=True)\n print(service.get_logs())\n print(service.scoring_uri)", "def exec_anaconda():\n if PSC_PATH_PREFIX in sys.executable:\n from imp import reload\n\n fix_sys_path()\n\n reload(json)\n reload(os)\n reload(platform)\n reload(stat)\n reload(subprocess)\n reload(sys)\n return\n\n check_python_version()\n\n system = (platform.system(), platform.machine())\n if system not in SUPPORTED_SYSTEMS:\n raise Exception('Unsupported platform: %s %s' % (system))\n\n sa_scipy = '%s%s' % (PSC_PATH_PREFIX, SUPPORTED_SYSTEMS[system])\n\n sa_path = os.path.join(get_apps_path(), sa_scipy)\n if not os.path.isdir(sa_path):\n raise Exception('Failed to find Python for Scientific Computing Add-on (%s)' % sa_scipy)\n\n system_path = os.path.join(sa_path, 'bin', '%s' % (SUPPORTED_SYSTEMS[system]))\n\n if system[0] == 'Windows':\n python_path = os.path.join(system_path, 'python.exe')\n # MLA-564: Windows need the DLLs to be in the PATH\n dllpath = os.path.join(system_path, 'Library', 'bin')\n pathsep = os.pathsep if 'PATH' in os.environ else ''\n os.environ['PATH'] = os.environ.get('PATH', '') + pathsep + dllpath\n else:\n python_path = os.path.join(system_path, 'bin', 'python')\n\n # MLA-996: Unset PYTHONHOME\n # XXX: After migration to Python3 PYTHONPATH is not set anymore so this will\n # be unnecessary. SPL-170875\n os.environ.pop('PYTHONHOME', None)\n\n # Ensure that execute bit is set on <system_path>/bin/python\n if system[0] != 'Windows':\n mode = os.stat(python_path).st_mode\n os.chmod(python_path, mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n print('INFO Running %s' % \" \".join([python_path] + sys.argv), sys.stderr)\n sys.stderr.flush()\n\n # In Quake and later PYTHONPATH is removed or not set.\n # So after shelling into PSC Python interpreter will lose\n # information about what Splunk core's Python path is. So we\n # stash it into an environment variable to retrieve it after\n # switching into conda.\n os.environ['SPLUNK_CORE_PYTHONPATH'] = json.dumps(sys.path)\n\n try:\n if system[0] == \"Windows\":\n os.environ['MKL_NUM_THREADS'] = '1'\n # os.exec* broken on Windows: http://bugs.python.org/issue19066\n subprocess.check_call([python_path] + sys.argv)\n os._exit(0)\n else:\n os.environ['VECLIB_MAXIMUM_THREADS'] = '1'\n os.environ['OPENBLAS_NUM_THREADS'] = '1'\n os.execl(python_path, python_path, *sys.argv)\n except Exception:\n traceback.print_exc(None, sys.stderr)\n sys.stderr.flush()\n time.sleep(0.1)\n raise RuntimeError(\n 'Error encountered while loading Python for Scientific Computing, see search.log.'\n )", "def _client(self):\n return self.m.cipd.ensure_tool('infra/tools/luci/isolated/${platform}',\n self._version)", "def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )", "def __init__(self, **kwargs):\n self.sample = kwargs.get(\"sample\", None)\n self.min_completion_fraction = kwargs.get(\"min_completion_fraction\", 1.0)\n self.open_dataset = kwargs.get(\"open_dataset\", False)\n self.events_per_output = kwargs.get(\"events_per_output\", -1)\n self.files_per_output = kwargs.get(\"files_per_output\", -1)\n self.output_name = kwargs.get(\"output_name\",\"output.root\")\n self.arguments = kwargs.get(\"arguments\",\"output.root\")\n # self.output_dir = kwargs.get(\"output_dir\",None)\n self.scram_arch = kwargs.get(\"scram_arch\",\"slc6_amd64_gcc530\")\n self.tag = kwargs.get(\"tag\",\"v0\")\n self.global_tag = kwargs.get(\"global_tag\")\n self.cmssw_version = kwargs.get(\"cmssw_version\", None)\n self.tarfile = kwargs.get(\"tarfile\",None)\n # LHE, for example, might be large, and we want to use\n # skip events to process event chunks within files\n # in that case, we need events_per_output > 0 and total_nevents > 0\n self.split_within_files = kwargs.get(\"split_within_files\", False)\n self.total_nevents = kwargs.get(\"total_nevents\", -1)\n\n # If we have this attribute, then we must have gotten it from\n # a subclass (so use that executable instead of just bland condor exe)\n if not hasattr(self, \"input_executable\"):\n self.input_executable = kwargs.get(\"executable\", self.get_metis_base()+\"metis/executables/condor_skim_exe.sh\")\n\n self.read_only = kwargs.get(\"read_only\",False)\n special_dir = kwargs.get(\"special_dir\", \"ProjectMetis\")\n\n # If we didn't get an output directory, use the canonical format. E.g.,\n # /hadoop/cms/store/user/namin/ProjectMetis/MET_Run2017A-PromptReco-v2_MINIAOD_CMS4_V00-00-03\n hadoop_user = os.environ.get(\"USER\") # NOTE, might be different for some weird folks\n self.output_dir = \"/hadoop/cms/store/user/{0}/{1}/{2}_{3}/\".format(hadoop_user,special_dir,self.sample.get_datasetname().replace(\"/\",\"_\")[1:],self.tag)\n\n # I/O mapping (many-to-one as described above)\n self.io_mapping = []\n\n # Some storage params\n self.prepared_inputs = False\n self.job_submission_history = {}\n self.queried_nevents = 0\n\n # Make a unique name from this task for pickling purposes\n self.unique_name = kwargs.get(\"unique_name\", \"{0}_{1}_{2}\".format(self.get_task_name(),self.sample.get_datasetname().replace(\"/\",\"_\")[1:],self.tag))\n\n # Pass all of the kwargs to the parent class\n super(CondorTask, self).__init__(**kwargs)\n\n self.logger.info(\"Instantiated task for {0}\".format(self.sample.get_datasetname()))\n\n # Can keep calling update_mapping afterwards to re-query input files\n if not self.read_only:\n do_flush = kwargs.get(\"flush\", False)\n self.update_mapping(flush=do_flush)", "def bootstrap(execute=dummy_execute):\n path = node(['-p',\n 'try { require.resolve(\"@prometheusresearch/react-scripts/bin/react-scripts.js\") } catch (e) {\"\"}'],\n quiet=True)\n if not path.strip():\n def bootstrap_yarn():\n url, md5_hash = download.parse_url(YARN_URL)\n yarn_data = download.download(url, md5_hash=md5_hash)\n yarn_path = os.path.join(sys.prefix, 'bin', 'yarn')\n with open(yarn_path, 'w') as f:\n f.write(yarn_data)\n yarn_stat = os.stat(yarn_path)\n os.chmod(yarn_path, yarn_stat.st_mode | stat.S_IEXEC)\n\n def bootstrap_npm():\n npm_path = find_executable('npm', 'npm')\n out, err = exe(npm_path, ['--version'])\n npm_version = out.strip()\n if npm_version[0] not in ('4', '3', '2'):\n npm(['install', '--global', 'npm@2.x.x'])\n npm(['install', '--global', 'npm@' + NPM_VERSION])\n\n def bootstrap_react_scripts():\n deps = [\n '@prometheusresearch/react-scripts@%s' % REACT_SCRIPTS_VERSION,\n 'nan@2.6.2', # this is required for yarn to function propely\n ]\n npm(['install', '--global'] + deps)\n\n execute(bootstrap_yarn, (), 'Installing yarn')\n execute(bootstrap_npm, (), 'Installing npm')\n execute(bootstrap_react_scripts, (), 'Installing react-scripts')", "def run_sklearn_stack():\r\n X_train, y_train, X_test = load_features()\r\n base_models = [\r\n XGBClassifier(learning_rate=0.05,\r\n eval_metric='auc',\r\n # n_estimators=712, # 750\r\n n_estimators=7, # 750\r\n max_depth=5,\r\n min_child_weight=7,\r\n gamma=0,\r\n subsample=0.8,\r\n colsample_bytree=0.6,\r\n eta=0.05,\r\n silent=1,\r\n seed=3,\r\n objective='binary:logistic',\r\n scale_pos_weight=1),\r\n LGBMClassifier(num_leaves=31,\r\n learning_rate=0.05,\r\n # n_estimators=543, # 443\r\n n_estimators=5, # 443\r\n objective='binary',\r\n metric={'auc'},\r\n seed=3,\r\n colsample_bytree=0.8,\r\n min_child_weight=7,\r\n subsample=0.8,\r\n silent=1),\r\n CatBoostClassifier(iterations=5,\r\n learning_rate=0.05,\r\n eval_metric='AUC',\r\n depth=8\r\n ),\r\n ]\r\n X_train_stack, y_train_stack, X_test_stack = sklearn_stacking(base_models, X_train, y_train, X_test, n_fold=5)\r\n result_path = 'result/sklearn_stack_result-{}.csv'.format(time.strftime(\"%m%d-%H%M%S\"))\r\n check_path(result_path)\r\n y_pred_prob = final_fit_predict(X_train_stack, y_train_stack, X_test_stack, save_result_path=result_path)\r\n return y_pred_prob", "def test_operator_bundle_from_scratch(\n self, tmpdir, docker_tasker, from_scratch, multistage, labels, expected_fail\n ):\n if multistage:\n dockerfile_f = mock_dockerfile_multistage\n else:\n dockerfile_f = mock_dockerfile\n\n dockerfile_f = partial(dockerfile_f, from_scratch=from_scratch)\n\n runner = mock_env(\n tmpdir, docker_tasker,\n dockerfile_f=dockerfile_f, labels=labels\n )\n runner.workflow.builder.base_from_scratch = from_scratch\n runner.workflow.builder.parents_ordered = (\n ['scratch', 'scratch'] if multistage else ['scratch']\n )\n\n if expected_fail:\n with pytest.raises(PluginFailedException) as e:\n runner.run()\n assert 'Operator bundle build can be only' in str(e.value)\n else:\n runner.run()", "def main():\n spark_it_up()", "def init_single_subject_wf(\n estimator, atlas_img, atlas_lut, bold_metadata_list, brainmask_list,\n confound_tsv_list, events_tsv_list, fir_delays, hrf_model, high_pass,\n name, output_dir,\n preproc_img_list, selected_confounds, smoothing_kernel\n ):\n workflow = pe.Workflow(name=name)\n\n # name the nodes\n input_node = pe.Node(niu.IdentityInterface(fields=['atlas_img',\n 'atlas_lut',\n 'bold_metadata',\n 'brainmask',\n 'confound_tsv',\n 'events_tsv',\n 'preproc_img',\n ]),\n name='input_node',\n iterables=[('brainmask', brainmask_list),\n ('confound_tsv', confound_tsv_list),\n ('events_tsv', events_tsv_list),\n ('preproc_img', preproc_img_list),\n ('bold_metadata', bold_metadata_list)],\n synchronize=True)\n\n input_node.inputs.atlas_img = atlas_img\n input_node.inputs.atlas_lut = atlas_lut\n\n output_node = pe.Node(niu.IdentityInterface(fields=['correlation_matrix',\n 'correlation_fig',\n 'betaseries_file']),\n name='output_node')\n\n # initialize the betaseries workflow\n betaseries_wf = init_betaseries_wf(estimator=estimator,\n fir_delays=fir_delays,\n hrf_model=hrf_model,\n high_pass=high_pass,\n selected_confounds=selected_confounds,\n smoothing_kernel=smoothing_kernel)\n\n # initialize the analysis workflow\n correlation_wf = init_correlation_wf()\n\n # correlation matrix datasink\n ds_correlation_matrix = pe.MapNode(DerivativesDataSink(base_directory=output_dir),\n iterfield=['in_file'],\n name='ds_correlation_matrix')\n\n ds_correlation_fig = pe.MapNode(DerivativesDataSink(base_directory=output_dir),\n iterfield=['in_file'],\n name='ds_correlation_fig')\n\n ds_betaseries_file = pe.MapNode(DerivativesDataSink(base_directory=output_dir),\n iterfield=['in_file'],\n name='ds_betaseries_file')\n\n # connect the nodes for the beta series workflow\n workflow.connect([\n (input_node, betaseries_wf,\n [('preproc_img', 'input_node.bold_file'),\n ('events_tsv', 'input_node.events_file'),\n ('brainmask', 'input_node.bold_mask_file'),\n ('confound_tsv', 'input_node.confounds_file'),\n ('bold_metadata', 'input_node.bold_metadata')]),\n (betaseries_wf, output_node,\n [('output_node.betaseries_files', 'betaseries_file')]),\n (input_node, ds_betaseries_file, [('preproc_img', 'source_file')]),\n (output_node, ds_betaseries_file, [('betaseries_file', 'in_file')]),\n ])\n\n if atlas_img and atlas_lut:\n # connect the nodes for the atlas workflow\n input_node.inputs.atlas_img = atlas_img\n input_node.inputs.atlas_lut = atlas_lut\n\n workflow.connect([\n (betaseries_wf, correlation_wf,\n [('output_node.betaseries_files', 'input_node.betaseries_files')]),\n (input_node, correlation_wf,\n [('atlas_img', 'input_node.atlas_file'),\n ('atlas_lut', 'input_node.atlas_lut')]),\n (correlation_wf, output_node,\n [('output_node.correlation_matrix', 'correlation_matrix'),\n ('output_node.correlation_fig', 'correlation_fig')]),\n (input_node, ds_correlation_matrix, [('preproc_img', 'source_file')]),\n (output_node, ds_correlation_matrix, [('correlation_matrix', 'in_file')]),\n (input_node, ds_correlation_fig, [('preproc_img', 'source_file')]),\n (output_node, ds_correlation_fig, [('correlation_fig', 'in_file')]),\n ])\n\n return workflow", "def generate_models(R, u_t, inverse_transform, algo):\n model_list = []\n it_max = 10000 # maximum number of iterations after which the Lasso and SR3 are stopped to save computational time\n # in our experience, if the model converges at all, this is usually far sooner than 10000 iterations\n tol_iterativ = 10 * np.finfo(float).eps # convergence tolerance of SR3 and Lasso\n if algo == 'FoBa':\n log_epsilon_range = np.arange(-15., 15., 0.5)\n for log_epsilon in log_epsilon_range:\n w = FoBa(R, u_t, epsilon=10 ** log_epsilon, backwards_freq=1, maxit_f=20)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'Lasso':\n log_lambda_range = np.arange(-15., 15., 0.5) # l1 factor\n for log_lambda in log_lambda_range:\n # initialize Lasso model\n clf = linear_model.Lasso(alpha=10**log_lambda, copy_X=True, fit_intercept=True, max_iter=it_max,\n normalize=False, positive=False, precompute=False, random_state=None,\n selection='cyclic', tol=tol_iterativ, warm_start=False)\n clf.fit(R, u_t) # fit model\n w = clf.coef_\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'STRidge':\n log_lambda_range = np.arange(-15, 15., 1.) # l2 factor (Ridge)\n log_tol_range = np.arange(-16, 10., 1.)\n for log_lambda in log_lambda_range:\n for log_tol in log_tol_range:\n w = STRidge(R, u_t, maxit=1000, lam=10**log_lambda, tol=10**log_tol, normalize=2)\n initialize_model(w, model_list, algo, inverse_transform)\n\n elif algo == 'SR3':\n # Uses python-matlab interface to directly use the original SR3 implementation.\n # Note that setting up the interface can be a bit tricky; if setting up the interface is too much effort,\n # just leave SR3 out of the 'algo_list' in the SITE file.\n t_sr3_start = time.time()\n eng = matlab.engine.start_matlab()\n eng.setup_matlab(nargout=0)\n log_lambda_range = np.arange(-15, 15., 1.) # l1 factor\n log_kappa_range = np.arange(-5, 6., 1.)\n for log_kappa in log_kappa_range:\n for log_lambda in log_lambda_range:\n R_matlab = matlab.double(R.tolist())\n u_t_matlab = matlab.double(u_t.tolist())\n # iters can be used to check if model converged or it_max was reached\n x, w, iters = eng.sr3(R_matlab, u_t_matlab, 'mode', '0', 'kap', (10**log_kappa).item(), 'lam',\n (10**log_lambda).item(), 'itm', it_max, 'tol', tol_iterativ.item(), 'ptf',\n 45000, nargout=3)\n w = np.asarray(w)\n initialize_model(w, model_list, algo, inverse_transform)\n eng.quit()\n print('Time for evaluation SR3: ', time.time() - t_sr3_start)\n\n else: raise ('The algorithm ' + str(algo) + ' is not implemented! (or a typo)')\n return model_list", "def runSCRO():\n\n dataset = None\n if EXPERIMENT == 0:\n print \"EXPERIMENT WITH MNIST DATASET\"\n dataset = mnist_data_builder()\n # elif EXPERIMENT == 1:\n # print \"EXPERIMENT WITH ...\"\n # dataset = my_call()\n else:\n print \"EXPERIMENT NOT IMPLEMENTED!\"\n\n print \"Starting keras executor\"\n ke = KerasExecutor(dataset, TEST_SIZE, METRICS, EARLY_STOPPING_PATIENCE_KERAS, LOSS)\n\n config_data = open('parametersGenetic.json')\n configuration = Configuration(config_data)\n\n ##############################\n # Initialisation\n ##############################\n\n reef = initialisation(Rsize=RSIZE, config=configuration, n_global_in=deepcopy(ke.n_in), n_global_out=ke.n_out,\n ke=ke)\n # Population is already evaluated in the initialisation function\n\n history = []\n\n max_fitness_ever = 0.0\n generations_with_no_improvement = 0\n\n ID_EXECUTION = str(time.time())\n\n output_file = open(\"EXECUTION_\" + ID_EXECUTION + \".csv\", \"w\")\n output_file.write(\"fitness_mean_validation,fitness_std_validation,fitness_max_validation,fitness_min_validation,\"\n \"fitness_mean_train,fitness_std_train,fitness_max_train,fitness_min_train,\"\n \"fitness_mean_test,fitness_std_test,fitness_max_test,fitness_min_test,\"\n \"count_evaluations,individuals_depredated,ratio_reef,time_generation,reef\\n\")\n\n output_file_population = open(\"EXECUTION_\" + ID_EXECUTION + \"_REEF_EVOLUTION.csv\", \"w\")\n\n output_file_population.write(\n \"generation,pos_in_reef,accuracy_validation,number_layers,accuracy_training,accuracy_test\\n\")\n\n output_file_individuals = open(\"EXECUTION_\" + str(ID_EXECUTION) + \"_INDIVIDUALS.txt\", \"w\")\n\n ##############################\n # Loop\n ##############################\n for i in range(MAX_GENERATIONS_SCRO):\n\n start_time = time.time()\n\n print colored(\"GENERATION: \" + str(i), \"red\")\n pool = []\n\n if len(filter(lambda w: w is not None, reef)) == 0:\n output_file.write(\"ALL REEF IS NONE!\")\n print colored(\"ALL REEF IS NONE!, BREAKING EVOLUTION!\", \"red\")\n break\n\n # 1 Asexual reproduction\n asexual_new_individual = deepcopy(asexual_reproduction(reef, configuration))\n if asexual_new_individual is not None:\n pool = pool + [asexual_new_individual]\n\n # 2 Sexual reproduction\n sexual_new_individuals = sexual_reproduction(reef, configuration)\n pool = pool + sexual_new_individuals\n\n # 3 Larvae settlement\n print colored(\"STARTING EVALUATION. INDIVIDUALS TO EVALUATE: \" + str(len(pool)), \"red\")\n pool, count_evaluations = eval_population(pool, ke)\n\n # print \"POOL EVALUATED: \"\n # for ind_pool in pool:\n # print \"IND: \" + str(ind_pool.fitness[\"accuracy_validation\"])\n\n reef, settled = larvae_settlement(reef, pool)\n\n # 4 Depredation\n # Todo remove returns same object\n reef, individuals_depredated = depredation(reef)\n\n # History\n\n fitness = fitness_mean_std(reef)\n\n fitness_mean_validation = fitness[\"validation\"][\"mean\"]\n fitness_std_validation = fitness[\"validation\"][\"std\"]\n fitness_max_validation = fitness[\"validation\"][\"max\"]\n fitness_min_validation = fitness[\"validation\"][\"min\"]\n\n fitness_mean_train = fitness[\"train\"][\"mean\"]\n fitness_std_train = fitness[\"train\"][\"std\"]\n fitness_max_train = fitness[\"train\"][\"max\"]\n fitness_min_train = fitness[\"train\"][\"min\"]\n\n fitness_mean_test = fitness[\"test\"][\"mean\"]\n fitness_std_test = fitness[\"test\"][\"std\"]\n fitness_max_test = fitness[\"test\"][\"max\"]\n fitness_min_test = fitness[\"test\"][\"min\"]\n\n if fitness_max_validation > max_fitness_ever:\n max_fitness_ever = fitness_max_validation\n generations_with_no_improvement = 0\n else:\n generations_with_no_improvement += 1\n\n finish_time = time.time()\n\n time_generation = finish_time - start_time\n\n positions_free = len(filter(lambda w: w is not None, reef))\n positions_total = len(reef)\n\n history.append([fitness_mean_validation, fitness_std_validation, fitness_max_validation, fitness_min_validation,\n fitness_mean_train, fitness_std_train, fitness_max_train, fitness_min_train,\n fitness_mean_test, fitness_std_test, fitness_max_test, fitness_min_test,\n count_evaluations, individuals_depredated,\n str(positions_free) + \"/\" + str(positions_total), time_generation, deepcopy(reef)])\n\n output_file.write(str(fitness_mean_validation) + \",\" +\n str(fitness_std_validation) + \",\" +\n str(fitness_max_validation) + \",\" +\n str(fitness_min_validation) + \",\" +\n\n str(fitness_mean_train) + \",\" +\n str(fitness_std_train) + \",\" +\n str(fitness_max_train) + \",\" +\n str(fitness_min_train) + \",\" +\n\n str(fitness_mean_test) + \",\" +\n str(fitness_std_test) + \",\" +\n str(fitness_max_test) + \",\" +\n str(fitness_min_test) + \",\" +\n\n str(count_evaluations) + \",\" +\n str(individuals_depredated) + \",\" +\n str(positions_free) + \"/\" + str(positions_total) + \",\" +\n str(time_generation) + \",\" +\n str(reef) + \"\\n\")\n\n # PRINTING THE STATUS OF THE REEF and INDIVIDUALS DEFINITION\n output_file_individuals.write(\"GENERATION: \" + str(i) + \" - POSITION IN REEF: \" + str(position_reef) + \"\\n\")\n\n for position_reef in range(len(reef)):\n\n # Controlar nones\n if reef[position_reef] is not None:\n output_file_population.write(\",\".join([str(i), str(position_reef),\n str(reef[position_reef].fitness[\"accuracy_validation\"]),\n str(reef[position_reef].fitness[\"number_layers\"]),\n str(reef[position_reef].fitness[\"accuracy_training\"]),\n str(reef[position_reef].fitness[\"accuracy_test\"])]) + \"\\n\")\n\n # PRINTING INDIVIDUALS:\n output_file_individuals.write(reef[position_reef].toString())\n\n\n output_file_individuals.write(\"----------------------------------------------------------\\n\")\n output_file_individuals.write(\"----------------------------------------------------------\\n\")\n output_file_individuals.write(\"----------------------------------------------------------\\n\\n\\n\")\n \n\n # GENERATION,INDIVIDUAL_POSITION,\n\n print colored(\n str(fitness_mean) + \",\" + str(fitness_std) + \",\" + str(fitness_max) + \",\" + str(fitness_min) + \",\" + str(\n count_evaluations) + \",\" + str(individuals_depredated) + \",\" +\n str(positions_free) + \"/\" + str(positions_total) + \",\" + str(time_generation), 'yellow')\n\n if generations_with_no_improvement >= MAX_GENERATIONS_SCRO:\n print colored(\"Stop criterion reached! \" + str(generations_with_no_improvement) + \"generations with no \"\n \"improvement!\", \"red\")\n break\n\n output_file.close()\n output_file_population.close()\n output_file_individuals.close()", "def run_workflow(args, run=True):\n\n import os\n import os.path as op\n\n import nipype.interfaces.io as nio\n import nipype.pipeline.engine as pe\n import nipype.interfaces.utility as niu\n\n import qap\n from qap_utils import read_json\n\n import glob\n\n import time\n from time import strftime\n from nipype import config as nyconfig\n\n # unpack args\n resource_pool_dict, sub_info_list, config, run_name, runargs, \\\n bundle_idx, num_bundles = args\n\n # Read and apply general settings in config\n keep_outputs = config.get('write_all_outputs', False)\n\n # take date+time stamp for run identification purposes\n pipeline_start_stamp = strftime(\"%Y-%m-%d_%H:%M:%S\")\n pipeline_start_time = time.time()\n\n if \"workflow_log_dir\" not in config.keys():\n config[\"workflow_log_dir\"] = config[\"output_directory\"]\n\n bundle_log_dir = op.join(config[\"workflow_log_dir\"],\n '_'.join([\"bundle\", str(bundle_idx)]))\n\n try:\n os.makedirs(bundle_log_dir)\n except:\n if not op.isdir(bundle_log_dir):\n err = \"[!] Bundle log directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % bundle_log_dir\n raise Exception(err)\n else:\n pass\n\n # set up logging\n nyconfig.update_config(\n {'logging': {'log_directory': bundle_log_dir, 'log_to_file': True}})\n logging.update_logging(nyconfig)\n\n logger.info(\"QAP version %s\" % qap.__version__)\n logger.info(\"Pipeline start time: %s\" % pipeline_start_stamp)\n\n workflow = pe.Workflow(name=run_name)\n workflow.base_dir = op.join(config[\"working_directory\"])\n\n # set up crash directory\n workflow.config['execution'] = \\\n {'crashdump_dir': config[\"output_directory\"]}\n\n # create the one node all participants will start from\n starter_node = pe.Node(niu.Function(input_names=['starter'], \n output_names=['starter'], \n function=starter_node_func),\n name='starter_node')\n\n # set a dummy variable\n starter_node.inputs.starter = \"\"\n\n new_outputs = 0\n\n # iterate over each subject in the bundle\n logger.info(\"Starting bundle %s out of %s..\" % (str(bundle_idx),\n str(num_bundles)))\n # results dict\n rt = {'status': 'Started', 'bundle_log_dir': bundle_log_dir}\n\n for sub_info in sub_info_list:\n\n resource_pool = resource_pool_dict[sub_info]\n\n # in case we're dealing with string entries in the data dict\n try:\n resource_pool.keys()\n except AttributeError:\n continue\n\n # resource pool check\n invalid_paths = []\n\n for resource in resource_pool.keys():\n try:\n if not op.isfile(resource_pool[resource]) and resource != \"site_name\":\n invalid_paths.append((resource, resource_pool[resource]))\n except:\n err = \"\\n\\n[!]\"\n raise Exception(err)\n\n if len(invalid_paths) > 0:\n err = \"\\n\\n[!] The paths provided in the subject list to the \" \\\n \"following resources are not valid:\\n\"\n\n for path_tuple in invalid_paths:\n err = \"%s%s: %s\\n\" % (err, path_tuple[0], path_tuple[1])\n\n err = \"%s\\n\\n\" % err\n raise Exception(err)\n\n # process subject info\n sub_id = str(sub_info[0])\n # for nipype\n if \"-\" in sub_id:\n sub_id = sub_id.replace(\"-\",\"_\")\n if \".\" in sub_id:\n sub_id = sub_id.replace(\".\",\"_\")\n\n if sub_info[1]:\n session_id = str(sub_info[1])\n # for nipype\n if \"-\" in session_id:\n session_id = session_id.replace(\"-\",\"_\")\n if \".\" in session_id:\n session_id = session_id.replace(\".\",\"_\")\n else:\n session_id = \"session_0\"\n\n if sub_info[2]:\n scan_id = str(sub_info[2])\n # for nipype\n if \"-\" in scan_id:\n scan_id = scan_id.replace(\"-\",\"_\")\n if \".\" in scan_id:\n scan_id = scan_id.replace(\".\",\"_\")\n else:\n scan_id = \"scan_0\"\n\n name = \"_\".join([\"\", sub_id, session_id, scan_id])\n\n rt[name] = {'id': sub_id, 'session': session_id, 'scan': scan_id,\n 'resource_pool': str(resource_pool)}\n\n logger.info(\"Participant info: %s\" % name)\n\n # set output directory\n output_dir = op.join(config[\"output_directory\"], run_name,\n sub_id, session_id, scan_id)\n\n try:\n os.makedirs(output_dir)\n except:\n if not op.isdir(output_dir):\n err = \"[!] Output directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % output_dir\n raise Exception(err)\n else:\n pass\n\n # for QAP spreadsheet generation only\n config.update({\"subject_id\": sub_id, \"session_id\": session_id,\n \"scan_id\": scan_id, \"run_name\": run_name})\n\n if \"site_name\" in resource_pool:\n config.update({\"site_name\": resource_pool[\"site_name\"]})\n\n logger.info(\"Configuration settings:\\n%s\" % str(config))\n\n qap_types = [\"anatomical_spatial\", \n \"functional_spatial\", \n \"functional_temporal\"]\n\n # update that resource pool with what's already in the output\n # directory\n for resource in os.listdir(output_dir):\n if (op.exists(op.join(output_dir, resource)) and\n resource not in resource_pool.keys()):\n try:\n resource_pool[resource] = \\\n glob.glob(op.join(output_dir, resource, \"*\"))[0]\n except IndexError:\n if \".json\" in resource:\n # load relevant json info into resource pool\n json_file = op.join(output_dir, resource)\n json_dict = read_json(json_file)\n sub_json_dict = json_dict[\"%s %s %s\" % (sub_id,\n session_id,\n scan_id)]\n\n if \"anatomical_header_info\" in sub_json_dict.keys():\n resource_pool[\"anatomical_header_info\"] = \\\n sub_json_dict[\"anatomical_header_info\"]\n\n if \"functional_header_info\" in sub_json_dict.keys():\n resource_pool[\"functional_header_info\"] = \\\n sub_json_dict[\"functional_header_info\"]\n\n for qap_type in qap_types:\n if qap_type in sub_json_dict.keys():\n resource_pool[\"_\".join([\"qap\",qap_type])] = \\\n sub_json_dict[qap_type]\n except:\n # a stray file in the sub-sess-scan output directory\n pass\n\n # create starter node which links all of the parallel workflows within\n # the bundle together as a Nipype pipeline\n resource_pool[\"starter\"] = (starter_node, 'starter')\n\n # individual workflow and logger setup\n logger.info(\"Contents of resource pool for this participant:\\n%s\"\n % str(resource_pool))\n\n # start connecting the pipeline\n qw = None\n for qap_type in qap_types:\n if \"_\".join([\"qap\", qap_type]) not in resource_pool.keys():\n if qw is None:\n from qap import qap_workflows as qw\n wf_builder = \\\n getattr(qw, \"_\".join([\"qap\", qap_type, \"workflow\"]))\n workflow, resource_pool = wf_builder(workflow, resource_pool,\n config, name)\n\n if (\"anatomical_scan\" in resource_pool.keys()) and \\\n (\"anatomical_header_info\" not in resource_pool.keys()):\n if qw is None:\n from qap import qap_workflows as qw\n workflow, resource_pool = \\\n qw.qap_gather_header_info(workflow, resource_pool, config,\n name, \"anatomical\")\n\n if (\"functional_scan\" in resource_pool.keys()) and \\\n (\"functional_header_info\" not in resource_pool.keys()):\n if qw is None:\n from qap import qap_workflows as qw\n workflow, resource_pool = \\\n qw.qap_gather_header_info(workflow, resource_pool, config,\n name, \"functional\")\n\n # set up the datasinks\n out_list = []\n for output in resource_pool.keys():\n for qap_type in qap_types:\n if qap_type in output:\n out_list.append(\"_\".join([\"qap\", qap_type]))\n\n # write_all_outputs (writes everything to the output directory, not\n # just the final JSON files)\n if keep_outputs:\n out_list = resource_pool.keys()\n logger.info(\"Outputs we're keeping: %s\" % str(out_list))\n logger.info('Resource pool keys after workflow connection: '\n '{}'.format(str(resource_pool.keys())))\n\n # Save reports to out_dir if necessary\n if config.get('write_report', False):\n\n if (\"qap_mosaic\" in resource_pool.keys()) and \\\n (\"qap_mosaic\" not in out_list):\n out_list += ['qap_mosaic']\n\n # The functional temporal also has an FD plot\n if 'qap_functional_temporal' in resource_pool.keys():\n if (\"qap_fd\" in resource_pool.keys()) and \\\n (\"qap_fd\" not in out_list):\n out_list += ['qap_fd']\n\n for output in out_list:\n # we use a check for len()==2 here to select those items in the\n # resource pool which are tuples of (node, node_output), instead\n # of the items which are straight paths to files\n\n # resource pool items which are in the tuple format are the\n # outputs that have been created in this workflow because they\n # were not present in the subject list YML (the starting resource\n # pool) and had to be generated\n if (len(resource_pool[output]) == 2) and (output != \"starter\"):\n ds = pe.Node(nio.DataSink(), name='datasink_%s%s'\n % (output,name))\n ds.inputs.base_directory = output_dir\n node, out_file = resource_pool[output]\n workflow.connect(node, out_file, ds, output)\n new_outputs += 1\n elif \".json\" in resource_pool[output]:\n new_outputs += 1\n\n logger.info(\"New outputs: %s\" % str(new_outputs))\n\n # run the pipeline (if there is anything to do)\n if new_outputs > 0:\n if config.get('write_graph', False):\n workflow.write_graph(\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n workflow.write_graph(\n graph2use=\"orig\",\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n workflow.write_graph(\n graph2use=\"hierarchical\",\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n if run:\n try:\n logger.info(\"Running with plugin %s\" % runargs[\"plugin\"])\n logger.info(\"Using plugin args %s\" % runargs[\"plugin_args\"])\n workflow.run(plugin=runargs[\"plugin\"],\n plugin_args=runargs[\"plugin_args\"])\n rt['status'] = 'finished'\n logger.info(\"Workflow run finished for bundle %s.\"\n % str(bundle_idx))\n except Exception as e: # TODO We should be more specific here ...\n errmsg = e\n rt.update({'status': 'failed'})\n logger.info(\"Workflow run failed for bundle %s.\"\n % str(bundle_idx))\n # ... however this is run inside a pool.map: do not raise\n # Exception\n else:\n return workflow\n\n else:\n rt['status'] = 'cached'\n logger.info(\"\\nEverything is already done for bundle %s.\"\n % str(bundle_idx))\n\n # Remove working directory when done\n if not keep_outputs:\n try:\n work_dir = op.join(workflow.base_dir, scan_id)\n\n if op.exists(work_dir):\n import shutil\n shutil.rmtree(work_dir)\n except:\n logger.warn(\"Couldn\\'t remove the working directory!\")\n pass\n\n if rt[\"status\"] == \"failed\":\n logger.error(errmsg)\n else:\n pipeline_end_stamp = strftime(\"%Y-%m-%d_%H:%M:%S\")\n pipeline_end_time = time.time()\n logger.info(\"Elapsed time (minutes) since last start: %s\"\n % ((pipeline_end_time - pipeline_start_time) / 60))\n logger.info(\"Pipeline end time: %s\" % pipeline_end_stamp)\n\n return rt", "def test_get_software_bundle(self):\n pass", "def calrissian_make_tool(spec, loadingContext):\n if \"class\" in spec and spec[\"class\"] == \"CommandLineTool\":\n return CalrissianCommandLineTool(spec, loadingContext)\n else:\n return default_make_tool(spec, loadingContext)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n acr: Optional[pulumi.Input[pulumi.InputType['ACRArgs']]] = None,\n aks_resource_id: Optional[pulumi.Input[str]] = None,\n app_name: Optional[pulumi.Input[str]] = None,\n branch_name: Optional[pulumi.Input[str]] = None,\n builder_version: Optional[pulumi.Input[str]] = None,\n deployment_properties: Optional[pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']]] = None,\n docker_build_context: Optional[pulumi.Input[str]] = None,\n dockerfile: Optional[pulumi.Input[str]] = None,\n dockerfile_generation_mode: Optional[pulumi.Input[Union[str, 'DockerfileGenerationMode']]] = None,\n dockerfile_output_directory: Optional[pulumi.Input[str]] = None,\n generation_language: Optional[pulumi.Input[Union[str, 'GenerationLanguage']]] = None,\n image_name: Optional[pulumi.Input[str]] = None,\n image_tag: Optional[pulumi.Input[str]] = None,\n language_version: Optional[pulumi.Input[str]] = None,\n last_workflow_run: Optional[pulumi.Input[pulumi.InputType['WorkflowRunArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n manifest_generation_mode: Optional[pulumi.Input[Union[str, 'ManifestGenerationMode']]] = None,\n manifest_output_directory: Optional[pulumi.Input[str]] = None,\n manifest_type: Optional[pulumi.Input[Union[str, 'GenerationManifestType']]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n oidc_credentials: Optional[pulumi.Input[pulumi.InputType['GitHubWorkflowProfileOidcCredentialsArgs']]] = None,\n port: Optional[pulumi.Input[str]] = None,\n repository_name: Optional[pulumi.Input[str]] = None,\n repository_owner: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n workflow_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def initialize(context):\n pipe = Pipeline()\n attach_pipeline(pipe, 'ff_example')\n\n # common_stock = CommonStock()\n # # filter down to securities that are either common stock or SPY\n # pipe.set_screen(common_stock.eq(1))\n mkt_cap = MarketEquity()\n pipe.add(mkt_cap, 'market_cap')\n\n book_equity = BookEquity()\n # book equity over market equity\n be_me = book_equity/mkt_cap\n pipe.add(be_me, 'be_me')\n\n returns = Returns(window_length=2)\n pipe.add(returns, 'returns')\n \n dt = get_datetime().normalize()\n start_ = dt if dt > START_DATE else START_DATE\n context.result = result.loc[start_: , :]", "def main():\n\n # choose number of data-points and sample a pair of vectors: the input\n # values and the corresponding target values\n N = 500\n inputs, targets = sample_data(N, arbitrary_function_2, seed=1)\n\n # specify the centres and scale of some rbf basis functions\n default_centres = np.linspace(0,1,21)\n default_scale = 0.03\n default_reg_param = 0.08\n\n # get the cross-validation folds\n num_folds = 4\n folds = create_cv_folds(N, num_folds)\n\n # evaluate then plot the performance of different reg params\n evaluate_reg_param(inputs, targets, folds, default_centres, default_scale)\n # evaluate then plot the performance of different scales\n evaluate_scale(inputs, targets, folds, default_centres, default_reg_param)\n # evaluate then plot the performance of different numbers of basis\n # function centres.\n evaluate_num_centres(\n inputs, targets, folds, default_scale, default_reg_param)\n\n plt.show()", "def run(pars, #parameter files\n #directory of scenario files\n scen_dir = r'C:\\LS\\03_TOOLS\\_git\\COVID_01\\scenarios',\n \n #map to scenario files\n scen_d = {\n 'NoNPI':'NPI_Scenario1_None.R',\n 'BI1918':'NPI_Scenario2_Bootsma_1918Influenza.R',\n 'SouthKorea':'NPI_Scenario3_SouthKorea.R',\n 'Reduced':'NPI_Scenario4_ReducedGamma.R', \n }\n ):\n \n \n \n #===========================================================================\n # precheck \n #===========================================================================\n assert len(pars)==4, 'unexpected inputs count'\n print('pars: \\n%s'%pars)\n \n #check the R Environment variables\n assert 'R_USER' in os.environ\n assert 'R_HOME' in os.environ\n \n #print('R_USER=%s \\nR_HOME=%s'%(os.getenv('R_USER'), os.getenv('R_HOME')))\n\n \n \n \n \n #===========================================================================\n # setup\n #===========================================================================\n s = setup.Setup(setup_name = 'mid_utah_'+pars[2],\n spatial_setup = WestCoastSpatialSetup(),\n nsim = int(pars[1]),\n ti = datetime.date(2020, 3, 6),\n tf = datetime.date(2020, 10, 1),\n interactive = False,\n write_csv = True,\n dt = 1/4)\n \n #===========================================================================\n # set the scenario parmaters\n #===========================================================================\n\n \n \n assert pars[2] in scen_d, 'unrecognized scenario: %s'%pars[2]\n \n rfp = os.path.join(scen_dir, scen_d[pars[2]])\n assert os.path.exists(rfp)\n \n s.script_npi = rfp\n \n print('set script_npi=%s'%s.script_npi)\n\n #===========================================================================\n # execute\n #===========================================================================\n\n print()\n print()\n print(f\">>> Starting {s.nsim} model runs on {pars[3]} processes\")\n print(f\">>> Setup *** {s.setup_name} *** from {s.ti} to {s.tf} !\")\n print(f\">>> writing to folder : {s.datadir}{s.setup_name}\")\n print()\n print()\n \n tic = time.time()\n \n res_l = seir.run_parallel(s, int(pars[3]))\n print(f\">>> Runs done in {time.time()-tic} seconds...\")" ]
[ "0.57629657", "0.56087464", "0.54360694", "0.54355013", "0.5393979", "0.53826815", "0.5367081", "0.5317132", "0.52905655", "0.5287661", "0.52496874", "0.5199404", "0.51819116", "0.51798064", "0.51774645", "0.5171329", "0.50645137", "0.50610745", "0.50353134", "0.5028036", "0.50058246", "0.49911824", "0.4987422", "0.49488166", "0.49283755", "0.49149662", "0.49086893", "0.48963335", "0.48933706", "0.48926333", "0.4891367", "0.48855013", "0.4881171", "0.4881084", "0.4848276", "0.48415792", "0.48338562", "0.48283988", "0.48233375", "0.48216087", "0.48071003", "0.48026773", "0.48007643", "0.48003983", "0.4792079", "0.4785831", "0.47853464", "0.47793633", "0.47673258", "0.47624066", "0.47608253", "0.47566923", "0.47545862", "0.47487247", "0.47486064", "0.47483268", "0.4742283", "0.47306466", "0.47235388", "0.47228748", "0.47205815", "0.47129723", "0.47101423", "0.47087955", "0.4708023", "0.470649", "0.47044447", "0.47033718", "0.47018003", "0.46992147", "0.4698384", "0.46969944", "0.46967867", "0.4696564", "0.4688197", "0.46848673", "0.4683682", "0.46834716", "0.46772936", "0.4673197", "0.46728536", "0.46637124", "0.46629423", "0.4660144", "0.4659359", "0.46591225", "0.46547568", "0.4647609", "0.46428397", "0.46405375", "0.4632028", "0.46318176", "0.4628648", "0.46264592", "0.46236518", "0.46222347", "0.4621363", "0.4614411", "0.46066642", "0.46027374" ]
0.6405816
0
identify all reads that map ambigously and their positions
def run_multimapping(SRA): if not os.path.exists("TMP/ambiguous_reads/"): os.mkdir("TMP/ambiguous_reads/") cmd_STAR = 'STAR --outSAMtype BAM SortedByCoordinate --runThreadN 8 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_' output = subprocess.run(cmd_STAR, shell=True) # Keep only multi-mapping reads: cmd_filter = 'python code/sam_STAR_mapq_filtering.py' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_Aligned.sortedByCoord.out.bam' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' + ' ' + 'all' output = subprocess.run(cmd_filter, shell=True) cmd_samtools2 = 'samtools index' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' output = subprocess.run(cmd_samtools2, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def determine_crossmapped_reads(self, read_alignment_path):\n references_by_species = self._get_references_by_species()\n crossmapped_reads = set()\n done_replicon_comparison = []\n with pysam.AlignmentFile(read_alignment_path) as bam:\n for org, replicon_ids in references_by_species.items():\n for replicon_id in replicon_ids:\n self._read_ids = set()\n # First, collect the ids of the aligned reads of\n # this replicon\n for alignment in bam.fetch(reference=replicon_id):\n self._read_ids.add(alignment.qname)\n # Then compare them to the alignments of each\n # replicon of the other organism(s)\n for (\n comp_org,\n comp_replicon_ids,\n ) in references_by_species.items():\n # Only compare replicons of different species\n if org == comp_org:\n continue\n for comp_replicon_id in comp_replicon_ids:\n comparison = sorted([replicon_id, comp_replicon_id])\n # Check if comparison of the two replicons\n # has been done already\n if comparison in done_replicon_comparison:\n continue\n done_replicon_comparison.append(comparison)\n # Compare all read ids of the comparison\n # replicon to the query replicon read ids\n for alignment in bam.fetch(\n reference=comp_replicon_id\n ):\n if alignment.qname in self._read_ids:\n crossmapped_reads.add(alignment.qname)\n no_of_crossmapped_reads = len(crossmapped_reads)\n return crossmapped_reads", "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True", "def get_read_alignments(sam_f):\n sparser = samparser.SamParser(sam_f=sam_f, aligned_only=True, mapq=20, mismatches=1)\n \n # parse all the hits into this to make sure multi mapping hits map to the same contig\n hit_dict = {}\n ambig_reads = 0\n processed_reads = 0\n for hit in sparser.parse_sam_file():\n processed_reads += 1\n if hit_dict.get(hit['qname'], 0):\n if hit_dict[hit['qname']] != hit['rname']:\n print(\"Warning read: {} aligns to two different contigs\".format(hit['qname']), file=sys.stderr)\n ambig_reads += 1\n else:\n continue\n else:\n hit_dict[hit['qname']] = hit['rname']\n\n print(\"{} of {} processed reads were ambiguous.\".format(ambig_reads, processed_reads))\n\n # condense the hit dict into a contig dict\n contig_dict = {}\n for read, contig in hit_dict.items():\n if contig_dict.get(contig, 0):\n contig_dict[contig].append(read)\n else:\n contig_dict[contig] = [read]\n\n return contig_dict", "def look_for_read_in_sim(read, sim_info):\n\t\n\tsim_ints = {}\n\t\n\n\t# look through rows of sim info for matches\n\tfor sim_row in sim_info:\n\t\t\n\t\t# look in chimeric\n\t\tif read['merged']:\n\t\t\t\n\t\t\t# if read was merged, we just want to look for either read 1 or 2 annotated as chimeric\n\t\t\tfor annotated_read in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\t\t\t\n\t\t\tfor annotated_read in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\t\n\t\telse:\n\t\t\t# if read wasn't merged, check for this specific read number\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\n\t\t# look in discordant\n\t\tif read['qname'] in sim_row['left_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_left_discord\"] = sim_row\n\t\t\t\n\t\tif read['qname'] in sim_row['right_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_right_discord\"] = sim_row\n\t\t\t\n\treturn sim_ints", "def test_read_mapping_file_multiple(reference_multi):\n content, reference = reference_multi\n from_names = list(reference.keys())\n to_names = []\n block_names = []\n\n for k in reference:\n to_names.extend(reference[k].keys())\n for to in reference[k]:\n block_names.extend(reference[k][to].keys())\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n {(0, 'X1'): [(0, 'A')], (0, 'X2'): [(0, 'B')], (0, 'X3'): [(0, 'D')]},\n {(0, 'A'): {(0, 'X1'): 1.0}, (0, 'B'): {(0, 'X2'): 1.0}, (0, 'C'): {(0, 'X2'): 1.0}, (0, 'D'): {(0, 'X3'): 1.0}},\n [])\n mappings = vermouth.map_input.read_backmapping_file(content, force_fields)\n compare_old_new_mappings(mappings, reference)", "def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list", "def complete_mapping(self):\r\n\r\n self._reset_map()\r\n #position_prey = self.prey.position\r\n #self.complete_map[position_prey[1], position_prey[0]] = 1.0\r\n position_body = [part.position for part in self.body]\r\n\r\n for position in position_body:\r\n self.complete_map[position[1], position[0]] = 1\r\n\r\n return self.complete_map", "def readMappedData(options,phase):\n whole_mapped_data={}\n mapped_data_per_size_per_register={}\n alignment_filename=options.output_directory+\"/\"+options.input_filename+\"_bowtie1.bwt\"\n fhr=open(alignment_filename,\"r\")\n for line in fhr:\n try:\n read_id, strand, chromosome, coordinate, sequence, quality, mapped_times = line.strip().split()\n except ValueError:\n print(line)\n continue\n try:\n coordinate=int(coordinate)\n mapped_times=int(mapped_times)+1\n length=len(sequence)\n except ValueError:\n print(line)\n continue\n if strand==\"-\":\n coordinate+=2\n if chromosome not in whole_mapped_data:\n whole_mapped_data[chromosome]={}\n if coordinate not in whole_mapped_data[chromosome]: \n whole_mapped_data[chromosome][coordinate]=0\n whole_mapped_data[chromosome][coordinate]+=1\n \n if phase!=length:\n continue\n if chromosome not in mapped_data_per_size_per_register:\n mapped_data_per_size_per_register[chromosome]={}\n register=coordinate % length\n if register not in mapped_data_per_size_per_register[chromosome]:\n mapped_data_per_size_per_register[chromosome][register]={}\n if coordinate not in mapped_data_per_size_per_register[chromosome][register]:\n mapped_data_per_size_per_register[chromosome][register][coordinate]=0\n mapped_data_per_size_per_register[chromosome][register][coordinate]+=1\n if mapped_data_per_size_per_register[chromosome][register][coordinate]>2:\n print(\"Trouble with alignments\",length,chromosome,register,coordinate)\n \n return whole_mapped_data,mapped_data_per_size_per_register", "def _find_coordinates(self, coords, ref):\n result = []\n temp_fastq_length = 500\n reference_seq = ''\n with open(ref) as f:\n lines = f.readlines()[1:]\n for line in lines:\n reference_seq += line.strip()\n with open('temp_index/temp_index.fasta', 'w') as fw:\n fw.write('>{}\\n{}'.format(self.read_id, self.seq))\n subprocess.run('bwa index temp_index/temp_index.fasta', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)\n for coord in coords:\n with open('temp_index/coordinate_rDNA.fastq', 'w') as fw:\n fw.write('>temp\\n{}\\n+\\n{}\\n'.format(reference_seq[coord-1:coord+temp_fastq_length-1], 'J' * temp_fastq_length))\n # with -a option, multiple hits are more clearly shown\n utilities.bwa_mapping('temp_index/temp_index.fasta', 'temp_index/coordinate_rDNA.fastq', 'temp_index/temp_sam4coord.sam', multi=True)\n with open('temp_index/temp_sam4coord.sam') as samf:\n map_result = samf.readlines()[2:]\n for mapping in map_result:\n row = mapping.strip().split()\n AS = int(mapping.strip().split('AS:i:')[1].split()[0])\n flag = int(row[1])\n if utilities.easy_flag(flag, 16) != 1:\n direction = '+'\n else:\n direction = '-'\n mapped_coord = int(row[3])\n if AS > 0.3 * temp_fastq_length:\n result.append([coord, mapped_coord, direction])\n return result", "def combine_reads(filtered_reads, positions):\n\n\tcombined_reads = []\n\ttrue_reads = []\n\n\tfor r in filtered_reads:\n\t\t# Find associated position\n\t\tr_position = float(r.get_position())\n\t\tdesired_start = -1\n\n\t\tfor p in positions:\n\t\t\tlow_position = p - 25\n\t\t\thigh_position = p + 25\n\t\t\tif r_position > low_position and r_position < high_position:\n\t\t\t\tdesired_start = p\n\t\t\t\tbreak\n\n\t\tif desired_start is not -1:\n\t\t\t# Find another read that overlaps\n\t\t\tif r_position < desired_start:\n\t\t\t\toffset = desired_start - r_position\n\t\t\t\tfor r2 in filtered_reads:\n\t\t\t\t\tr2_position = float(r2.get_position())\n\t\t\t\t\tif r2_position > desired_start and r2_position <= desired_start + offset and r2_position != r_position:\n\t\t\t\t\t\tfuse_read = r2\n\t\t\t\t\t\tbreak\n\t\t\telif r_position == desired_start:\n\t\t\t\tfuse_read = None\n\t\t\telse:\n\t\t\t\toffset = r_position - desired_start\n\t\t\t\tfor r2 in filtered_reads:\n\t\t\t\t\tr2_position = float(r2.get_position())\n\t\t\t\t\tr2_end = r2_position + 49\n\t\t\t\t\tif r2_end + 49 > desired_start and r2_end >= r_position - 1 and r2_position != r_position:\n\t\t\t\t\t\tfuse_read = r2 \n\t\t\t\t\t\tbreak\n\n\t\t\tif fuse_read is None:\n\t\t\t\tif r_position == desired_start:\n\t\t\t\t\ttrue_reads.append(r)\n\t\t\telse:\n\t\t\t\tr.fuse_read(fuse_read, desired_start)\n\t\t\t\tcombined_reads.append(r)\n\t\t\t\t\n\n\tdef f(x): return len(x.get_read()) == 50\n\tcombined_reads = filter(f, combined_reads)\n\n\t# for c in combined_reads:\n\t# \tc.print_read()\n\t# \tprint '\\n'\n\t\n\treturn (combined_reads, true_reads)", "def map_reads(SRA):\n\n #1. bowtie to rRNA\n print(\"Bowtie alignement on contaminant RNA...\")\n cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam'\n output = subprocess.run(cmd_bowtie, shell=True)\n\n # 2. STAR to ref genome\n print(\"STAR alignement to yeast genome...\")\n cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n # 3. Samtools keep uniquely mapped reads and sort\n print(\"Samtools to keep uniquely mapped reads and sort...\")\n cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam'\n output = subprocess.run(cmd_samtools1, shell=True)\n\n cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam'\n output = subprocess.run(cmd_samtools2, shell=True)\n\n cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam'\n output = subprocess.run(cmd_samtools3, shell=True)", "def test_fastq_map():\n cluster = clust.Clustering.from_fastq(TMP + 'map.fastq', 4, 'ACGT',\n threshold=2, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)\n assert cluster[uid1_expect].size == 5, \"%r != %r\" % (cluster[uid1_expect].size, 5)\n assert cluster[uid2_expect].size == 5, \"%r != %r\" % (cluster[uid2_expect].size, 5)", "def scan(self):\n for fn in self.map:\n coords = list(self.map[fn].keys())\n coords.sort()\n for coord in coords:\n yield fn, coord, self.map[fn][coord]", "def caricaReadsEsIn(fileInput):\n\n\tidx_gene \t= 4 \n\tidx_chrom \t= 0\n\tidx_start\t= 1\n\tidx_end\t\t= 2\n\tidx_reads\t= 6\n\n\tdictReadsEsIn = {}\n\n\tlines = [x.strip('\\n').split('\\t') for x in open(fileInput)]\n\t\n\tfor riga in lines:\n\t\tgeneName \t= riga[idx_gene]\n\t\tchrom\t\t= riga[idx_chrom]\n\t\tstart\t\t= riga[idx_start]\n\t\tend\t\t\t= riga[idx_end]\n\t\treads\t\t= riga[idx_reads]\n\n\t\tif not geneName in dictReadsEsIn:\n\t\t\tdictReadsEsIn[geneName] = {}\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\t# Il primo campo indica se il cromosoma ha almeno..\n\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..una regione con reads\n\t\telif chrom not in dictReadsEsIn[geneName]:\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\n\t\telse:\n\t\t\tdictReadsEsIn[geneName][chrom][idx_start].append(start)\n\t\t\tdictReadsEsIn[geneName][chrom][idx_end].append(end)\n\t\t\tdictReadsEsIn[geneName][chrom][3].append(reads)\n\n\t\ti = len(dictReadsEsIn[geneName][chrom][3])\n\t\tif int(dictReadsEsIn[geneName][chrom][3][i-1]) != 0:\n\t\t\tdictReadsEsIn[geneName][chrom][0] = True\t\t\t\t\t\t\t# Indica se c'e' almeno una regione esonica/intronica\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# che mappa delle reads\n\n\t# Si eliminano i cromosomi che non hanno mappato reads ne' su introni\n\t# ne' su esoni (primo value del dizionario = FALSE)\n\t#\n\tgeneKeys = dictReadsEsIn.keys()\n\tfor geneName in geneKeys:\n\t\tchromKeys = dictReadsEsIn[geneName].keys()\n\t\tfor chrom in chromKeys:\n\t\t\tif not dictReadsEsIn[geneName][chrom][0]:\n\t\t\t\tdel dictReadsEsIn[geneName][chrom]\n\t\t\t\t# Si eliminano i geni che non hanno piu' cromosomi\n\t\t\t\t#\n\t\t\t\tif not dictReadsEsIn[geneName]:\n\t\t\t\t\tdel dictReadsEsIn[geneName]\n\t\t\t\t\tprint 'Il gene %s non presenta cromosomi con reads mappanti.\\n' % geneName,\n\n\treturn dictReadsEsIn", "def test_toplevel_query_lat_mappings(self):\n\n patient = Semiology('Aphasia', Laterality.NEUTRAL, Laterality.NEUTRAL)\n patient.data_frame = self.df\n all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n self.assertIs(type(all_combined_gifs), pd.DataFrame)\n assert not all_combined_gifs.empty\n\n labels = ['Gif Parcellations', 'pt #s']\n all_combined_gifs = all_combined_gifs.astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n new_all_combined_gifindexed = all_combined_gifs.loc[:, labels]\n\n new_all_combined_gifindexed.set_index(\n 'Gif Parcellations', inplace=True)\n\n # new_all_combined_gifindexed.to_csv(r'D:\\aphasia_fixture.csv')\n # load fixture:\n fixture = pd.read_excel(\n dummy_data_path,\n header=0,\n usecols='A:B',\n sheet_name='fixture_aphasia',\n index_col=0,\n engine=\"openpyxl\",\n )\n # fixture.sort_index(inplace=True)\n assert((new_all_combined_gifindexed.shape) == (fixture.shape))\n# print('new_all_combined_gifindexed.shape is: ',\n# new_all_combined_gifindexed.shape)\n# print('fixture.shape.shape is: ', fixture.shape)\n\n assert(new_all_combined_gifindexed.index == fixture.index).all()\n assert(new_all_combined_gifindexed.values == fixture.values).all()", "def _annotate(reads, mirbase_ref, precursors):\n for r in reads:\n for p in reads[r].precursors:\n start = reads[r].precursors[p].start + 1 # convert to 1base\n end = start + len(reads[r].sequence)\n for mature in mirbase_ref[p]:\n mi = mirbase_ref[p][mature]\n is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p])\n logger.debug((\"{r} {p} {start} {is_iso} {mature} {mi} {mature_s}\").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals()))\n if is_iso:\n reads[r].precursors[p].mirna = mature\n break\n return reads", "def map_reads_2genes(self, reads_file):\n start1 = time()\n read_starts = self.__get_reads_pos(reads_file)\n start2 = time()\n times = 0\n for ref_gene in self.ref_genes:\n times += 1\n if times % 500 == 0:\n print 'calculated %d genes read count ...' % times\n if len(read_starts[ref_gene.chrom]) == 0:\n continue\n starts = read_starts[ref_gene.chrom]\n for es, ed in zip(ref_gene.exon_starts, ref_gene.exon_ends):\n # rd = starts[(starts > es) & (starts < ed)].size\n rd = cal_read_count(es, ed, starts)\n ref_gene.read_count += rd\n\n print 'start calculate rpkm ...'\n mapped_read_count = self.mapped_read_count\n for ref_gene in self.ref_genes:\n # calculate RPKM\n ref_gene.read_density = \\\n ref_gene.read_count * 1000 * 1000 * 1000. / (ref_gene.mRNA_length * mapped_read_count)\n print 'got reads time: %f' % (time() - start1)\n print 'map reads time: %f' % (time() - start2)", "def read_flat_map(filename,i_map=0) :\n hdul=fits.open(filename)\n w=WCS(hdul[0].header)\n\n maps=hdul[i_map].data\n ny,nx=maps.shape\n\n return w,maps", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)", "def get_ROIs(self, base):\n locs3d = self.locs3d\n #print loc3d\n base_locs = locs3d[base]\n ROI_dic = dict((i, [Id]) for i,Id in enumerate(base))\n for i, loc in enumerate(locs3d):\n if i not in base:\n dist = np.sqrt(np.sum((base_locs - loc)**2, 1))\n min_i = np.argmin(dist)\n ROI_dic[min_i].append(i)\n out = ROI_dic.values()\n return out", "def _parse_alignment( alignment ):\n log.info(\"Parsing subread locations from alignment data\")\n locations = {}\n for entry in BlasrReader( alignment ):\n if entry.tstrand == '1':\n start = int(entry.tlength) - int(entry.tend)\n end = int(entry.tlength) - int(entry.tstart)\n else:\n start = int(entry.tstart)\n end = int(entry.tend)\n locations[entry.qname] = (start, end)\n return locations", "def test_combine_mappings(self):\r\n\r\n self.tmp_dir = mkdtemp(dir=\"./\", suffix=\"/\")\r\n\r\n combine_mappings(\r\n fasta,\r\n denoiser_mapping,\r\n denoised_seqs,\r\n otu_picker_map,\r\n self.tmp_dir)\r\n\r\n observed_otu_map = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_otu_map.txt\")))\r\n\r\n expected_otu_map = \"\"\"1:\\tS1_1\\tS1_2\\tS2_4\\tS2_5\r\n2:\\tS2_3\\tS1_6\r\n\"\"\"\r\n self.assertEqual(observed_otu_map, expected_otu_map)\r\n\r\n observed_fasta = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_all.fasta\")))\r\n expected_fasta = \"\"\">S1_1 Read1\r\nAAA\r\n>S1_2 Read2\r\nTTT\r\n>S2_3 Read3\r\nGGG\r\n\"\"\"\r\n self.assertEqual(observed_fasta, expected_fasta)", "def __init__(self, reads, fasta_handler, chromosome_name, region_start_position, region_end_position):\n self.region_start_position = region_start_position\n self.region_end_position = region_end_position\n self.chromosome_name = chromosome_name\n self.fasta_handler = fasta_handler\n self.reads = reads\n\n # the store which reads are creating candidates in that position\n self.coverage = defaultdict(int)\n self.rms_mq = defaultdict(int)\n self.mismatch_count = defaultdict(int)\n self.match_count = defaultdict(int)\n\n # the base and the insert dictionary for finding alleles\n self.positional_allele_dictionary = {}\n self.read_allele_dictionary = {}\n self.reference_dictionary = {}\n\n # few new dictionaries for image creation\n self.base_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.insert_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.delete_dictionary = defaultdict(lambda: defaultdict(tuple))\n self.read_info = defaultdict(tuple)\n self.insert_length_info = defaultdict(int)\n self.delete_length_info = defaultdict(int)\n self.positional_read_info = defaultdict(list)\n\n # for image generation\n self.image_row_for_reads = defaultdict(tuple)\n self.image_row_for_ref = defaultdict(list)\n self.positional_info_index_to_position = defaultdict(tuple)\n self.positional_info_position_to_index = defaultdict(tuple)\n self.allele_dictionary = defaultdict(lambda: defaultdict(list))\n self.read_id_by_position = defaultdict(list)", "def process_reads(self, ref, reads, ref_name, ref_offset):\n # A list of candidate positions mapping to their number of supporting reads.\n candidates = defaultdict(int)\n\n for read in reads:\n for ref_pos in self.process_read(ref, read, ref_offset):\n candidates[ref_pos] += 1\n return self.windows(candidates, ref_name, ref_offset)", "def test_extract_read_to_sample_mapping(self):\r\n\r\n labels = [\r\n 'S160_1 E86FECS01DW5V4 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0',\r\n 'S160_2 E86FECS01DW5V5 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0']\r\n\r\n expected = {'E86FECS01DW5V4': 'S160_1',\r\n 'E86FECS01DW5V5': 'S160_2'}\r\n\r\n self.assertEqual(extract_read_to_sample_mapping(labels),\r\n expected)", "def get_chromosome_reads(bam):\n stats = bam.get_index_statistics()\n mapped_reads = {}\n for stat in stats:\n mapped_reads[stat[0]] = [stat[1], stat[2], stat[3]]\n if stat[2] != 0:\n warnings.warn(\"Unmapped reads found in chromosome \" + stat[0])\n\n return mapped_reads", "def get_index_info(reads: List[Union[reads_pb2.Read,\n deepconsensus_pb2.Subread]],\n index: int) -> Tuple[bool, bool]:\n\n insert_char = cigar_utils.CIGAR_OPS_TO_CHAR[cigar_pb2.CigarUnit.INSERT]\n out_of_bounds = True\n has_insert = False\n for read in reads:\n expanded_cigar = get_expanded_cigar(read)\n if index < len(expanded_cigar):\n out_of_bounds = False\n has_insert = expanded_cigar[index] == insert_char\n # Stop early as soon as we find a read with an insertion.\n if has_insert:\n break\n return out_of_bounds, has_insert", "def count_mapped_bases(bam):\n\n for read in open_bam(bam):\n if not read.is_secondary:\n count = Counter(read.query_alignment_sequence)\n yield(count)", "def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ", "def seed_and_extend(read, k, h, index, genome):\n\n list_mapping_read = [] # List containing the positions tested to map the read on the genome\n #(will be used to not try to align a read twice at the same position)\n\n # Variables which will be returned\n position_mapping = len(genome) # Optimal position of mapping for the read\n nb_mismatch = int(h) + 1 # Number of mismatch in this mapping\n list_mismatch = [] # List of mismatch positions on the genome\n\n for kmer_index in range(len(read)-int(k)+1):\n kmer = read[kmer_index:kmer_index + int(k)]\n # For each kmer, tries to find the optimal position of mapping\n # for the read with this kmer as seed.\n position_mapping_kmer = len(genome)\n nb_mismatch_kmer = int(h) + 1\n list_mismatch_kmer = []\n\n list_occurences = sorted(index.get_occurences(kmer))\n\n if not list_occurences:\n continue\n\n for occurences in list_occurences:\n\n nb_mismatch_occu = 0 # For each occurence of the kmer,\n # count the number of mismatch during alignment\n\n list_mismatch_occu = [] # List of mismatch seen during alignment\n # of read with this occurence of the kmer\n\n index_char_genome = occurences - kmer_index # Index where to map in the genome\n index_char_read = 0 # Index of the character to compare\n\n if index_char_genome in list_mapping_read: # If position already tested,\n #do not test it a second time.\n continue\n else:\n list_mapping_read.append(index_char_genome) # Add this position to the list\n # so it won't be tested a second time for this read\n\n while nb_mismatch_occu <= int(h) \\\n and index_char_read < len(read) \\\n and index_char_genome < len(genome):\n if genome[index_char_genome] != read[index_char_read]:\n nb_mismatch_occu += 1\n list_mismatch_occu.append(index_char_genome)\n\n index_char_genome += 1\n index_char_read += 1\n\n\n # If the mapping of the read with this occurence of the read\n # is better than the previous one (less mismatch) : optimal values for kmer stored\n if nb_mismatch_occu < nb_mismatch_kmer:\n nb_mismatch_kmer = nb_mismatch_occu\n list_mismatch_kmer = list_mismatch_occu\n position_mapping_kmer = occurences - kmer_index\n\n # If the best mapping found for this kmer is better than the mapping\n # found with the previous kmer : optimal values for read stored\n if nb_mismatch_kmer < nb_mismatch \\\n or nb_mismatch_kmer == nb_mismatch \\\n and position_mapping_kmer < position_mapping:\n nb_mismatch = nb_mismatch_kmer\n list_mismatch = list_mismatch_kmer\n position_mapping = position_mapping_kmer\n\n return position_mapping, nb_mismatch, list_mismatch", "def _assign_reads( medians, centroids ):\n log.info(\"Assigning subreads reads to the closet amplicon cluster\")\n assignments = {'5p':set(), '3p':set()}\n five_prime, three_prime = centroids\n for read, median in medians.iteritems():\n five_prime_diff = abs(median - five_prime)\n three_prime_diff = abs(median - three_prime)\n if five_prime_diff < three_prime_diff:\n assignments['5p'].add( read )\n else:\n assignments['3p'].add( read )\n return assignments", "def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua", "def mapsMatch(m1,m2):\n same = True\n f1 = file(m1,'r').readlines()\n f2 = file(m2,'r').readlines()\n for i, row in enumerate(f1):\n row = row.strip().split()\n row2 = f2[i].strip().split()\n if row[0] <> row2[0]:\n\t same = False\n break\n return same", "def _read_pyMatch(fn, precursors):\n with open(fn) as handle:\n reads = defaultdict(realign)\n for line in handle:\n query_name, seq, chrom, reference_start, end, mism, add = line.split()\n reference_start = int(reference_start)\n # chrom = handle.getrname(cols[1])\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso = isomir()\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n if len(iso.subs) > 1:\n continue\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def getMasterMap(self,masterInfo):\n masterMap = [0]\n #--Map'em\n for mmName in masterInfo.masterNames:\n if mmName not in self.masterNames: \n raise MoshError(_(\"Misordered esm: %s should load before %s\") % (mmName, masterInfo.name))\n masterMap.append(self.masterNames.index(mmName)+1)\n #--Done\n return masterMap", "def get_observations(asteroid_map: str) -> Dict[Asteroid, List[Asteroid]]:\n # initialize asteroid map\n asteroids = intialize_asteroid_map(asteroid_map)\n all_observations = {}\n for asteroid_1 in asteroids:\n asteroid_1_observations = {}\n for asteroid_2 in asteroids:\n if asteroid_1 == asteroid_2:\n continue\n angle = calculate_angle(asteroid_1, asteroid_2)\n if angle in asteroid_1_observations:\n asteroid_1_observations[angle].append(asteroid_2)\n else:\n asteroid_1_observations[angle] = [asteroid_2]\n all_observations[asteroid_1] = asteroid_1_observations\n return all_observations", "def get_mapping_details(mapping_fp,\r\n suppress_barcode_checks=False,\r\n suppress_primer_checks=False):\r\n\r\n mapping_f = open(mapping_fp, \"U\")\r\n\r\n # Only using the id_map and the errors from parsing the mapping file.\r\n hds, mapping_data, run_description, errors, warnings = \\\r\n process_id_map(mapping_f)\r\n\r\n mapping_f.close()\r\n\r\n # Should raise errors for barcodes or primers unless suppressed, and\r\n # should raise errors for headers or duplicate SampleIDs in any case.\r\n loc_bcs = \",1\"\r\n loc_primers = \",2\"\r\n if errors:\r\n for curr_error in errors:\r\n # Halt when header has error\r\n if curr_error.startswith(\"Found header field\"):\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n elif curr_error.endswith(loc_bcs):\r\n # Halt for barcode errors unless suppressed\r\n if suppress_barcode_checks:\r\n continue\r\n else:\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n elif curr_error.endswith(loc_primers):\r\n # Halt for primer errors unless suppressed\r\n if suppress_primer_checks:\r\n continue\r\n else:\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n # Raise error on duplicate sample IDs\r\n elif curr_error.startswith(\"Duplicate SampleID\"):\r\n raise ValueError('Error in mapping file, please validate '\r\n 'mapping file with validate_mapping_file.py')\r\n\r\n # create dict of dicts with SampleID:{each header:mapping data}\r\n\r\n id_map = {}\r\n\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]] = {}\r\n\r\n for header in range(len(hds)):\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]][hds[header]] = curr_data[header]\r\n\r\n sample_ids = id_map.keys()\r\n\r\n barcode_seqs = []\r\n raw_linkerprimer_seqs = []\r\n\r\n for curr_id in id_map:\r\n if not suppress_barcode_checks:\r\n barcode_seqs.append(id_map[curr_id]['BarcodeSequence'])\r\n if not suppress_primer_checks:\r\n raw_linkerprimer_seqs.append(\r\n id_map[curr_id]['LinkerPrimerSequence'])\r\n\r\n # remove duplicates\r\n raw_linkerprimer_seqs = set(raw_linkerprimer_seqs)\r\n\r\n linker_primer_seqs = expand_degeneracies(raw_linkerprimer_seqs)\r\n\r\n return set(sample_ids), set(barcode_seqs), set(linker_primer_seqs)", "def test_check_map(self):\r\n s = \"\"\"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tX\\tDescription\r\n#fake data\r\nx\\tAA\\tAC\\t3\\tsample_x\r\ny\\t\"AC\"\\tAC\\t4\\t\"sample_y\"\r\nz\\tGG\\tGC\\t5\\tsample_z\"\"\"\r\n f = StringIO(s)\r\n f.name = 'test.xls'\r\n headers, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(f,\r\n disable_primer_check=False)\r\n\r\n self.assertEqual(\r\n barcode_to_sample_id,\r\n {'AA': 'x',\r\n 'AC': 'y',\r\n 'GG': 'z'})\r\n\r\n self.assertEqual(errors, [])\r\n self.assertEqual(warnings, [])", "def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos", "def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def _fill_impropers_cross_maps(self) -> None:\n impropers, cross_maps = [], []\n for residue in self.residues:\n for improper in residue.impropers:\n impropers.append([self._id_to_index[x] for x in improper])\n for cross_map in residue.cross_maps:\n cross_maps.append([self._id_to_index[x] for x in cross_map])\n self.impropers, self.cross_maps = impropers, cross_maps", "def read_activity_mappings_both(self):\n with open('oca.translate', \"r\") as file:\n for line in file:\n x = str(str(line).strip()).split(' ', 3)\n self.amappings[x[0]] = x[1]\n self.amappings2[x[0]] = x[2]", "def find(self, read, aa=None):\n aa = aa or ['C']\n\n for i, base in enumerate(read.sequence):\n if base in aa:\n yield Landmark(self.NAME, self.SYMBOL, i, 1)", "def test_reading_warehouse_map(warehouse_map1):\n map_ = (\n (2, 2, 2, 2, 3),\n (1, \"a\", 1, \"b\", 3),\n (1, \"c\", 1, \"d\", 3),\n (1, \"e\", 1, \"f\", 3),\n (1, 4, 4, 4, 9),\n )\n assert map_ == warehouse_map1", "def inferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping,\n sequences_mapping, protein_mapping, verbose=False, throwOnMismatch=False, fileType=None):\n import csv, os\n\n if fileType == \"simple\":\n return simpleInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n elif fileType == \"traml\":\n return tramlInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n elif fileType == \"sqmass\":\n return sqlInferMapping(rawdata_files, aligned_pg_files, mapping, precursors_mapping, sequences_mapping, protein_mapping, verbose=verbose)\n\n nomatch_found = set([])\n for file_nr, f in enumerate(aligned_pg_files):\n header_dict = {}\n if f.endswith('.gz'):\n import gzip \n filehandler = gzip.open(f,'rb')\n else:\n filehandler = open(f)\n reader = csv.reader(filehandler, delimiter=\"\\t\")\n header = next(reader)\n for i,n in enumerate(header):\n header_dict[n] = i\n\n if not \"align_origfilename\" in header_dict or not \"align_runid\" in header_dict:\n\n # Check whether we have a single mzML file and a single result\n # file. If so, simply map these to each other.\n if len(rawdata_files) == 1 and len(aligned_pg_files) == 1:\n mapping[\"0_0\"] = rawdata_files\n return\n\n print (header_dict)\n raise Exception(\"need column header align_origfilename and align_runid\")\n\n for this_row in reader:\n\n if len(this_row) == 0: \n continue\n\n # Get the transition mapping ... \n mapRow(this_row, header_dict, precursors_mapping, sequences_mapping, protein_mapping)\n\n # 1. Get the original filename (find a non-NA entry) and the corresponding run id\n aligned_fname, aligned_id = getAlignedFilename(this_row, header_dict)\n\n if aligned_id is None or aligned_id in mapping:\n continue \n\n # 2. Go through all chromatogram input files and try to find\n # one that matches the one from align_origfilename\n for rfile in rawdata_files:\n\n # 2.1 remove common file endings from the raw data\n rfile_base = os.path.basename(rfile)\n for ending in [\".sqMass\", \".filter\", \".mzML\", \".chrom\"]:\n rfile_base = rfile_base.split(ending)[0]\n\n # 2.3 Check if we have a match\n if aligned_fname == rfile_base:\n if verbose: \n print(\"- Found match:\", os.path.basename(rfile), \"->\", os.path.basename(this_row[ header_dict[\"align_origfilename\"] ]))\n mapping[aligned_id] = [rfile]\n\n if not aligned_id in mapping:\n if True:\n nomatch_found.update( [aligned_fname] )\n if throwOnMismatch:\n raise Exception(\"Mismatch, alignment filename could not be matched to input chromatogram\")\n\n if verbose:\n print(\"- No match found for :\", list(nomatch_found), \"in any of\", \\\n [os.path.basename(rfile) for rfile in rawdata_files])\n print(\"- This may be a bad sign if you expected a match here. You might have \" +\\\n \"to either rename your files to have matching filenames \" +\\\n \"or provide an input yaml file describing the matching in detail.\")", "def map():", "def test_get_sam_ids(self):\r\n map_file = StringIO.StringIO(\"\"\"#SampleID\tCountry\tAgeYears\tFamily\tAgeCat\r\n h208A.1\tMalawi\t0.032854209\th208\tChild\r\n h301A.1\tMalawi\t0.05\th301\tChild\r\n h301B.1\tMalawi\t0.05\th301\tChild\r\n USinfTw20.1\tUSA\t0.083333333\tUSinfTw20\tChild\r\n USinfTw20.2\tUSA\t0.083333333\tUSinfTw20\tChild\r\n USinfTw1.1\tUSA\t0.083333333\tUSinfTw1\tChild\r\n h10M\tMalawi\t26\th10\tAdult\r\n h68M\tMalawi\t26\th68\tAdult\r\n TS25\tUSA\t26\tUSts9\tAdult\r\n TS26\tUSA\t26\tUSts9\tAdult\"\"\")\r\n\r\n map_data, map_header, comments = parse_mapping_file(map_file)\r\n colorby = 'Country'\r\n cat = 'USA'\r\n primary_state = 'AgeCat:Child'\r\n ids1, ids2 = get_sam_ids(map_data, map_header, colorby, cat,\r\n primary_state, secondary_state=None)\r\n self.assertEqual(set(ids1),\r\n set(['USinfTw20.1', 'USinfTw20.2', 'USinfTw1.1']))\r\n self.assertEqual(set(ids2), set(['TS25', 'TS26']))", "def sam_parsed(sam_file):\n\n sam_file= open(sam_file)\n\n sam_dic = {}\n read_frame_dic ={}\n count = 0\n counter_1 = 0\n counter_2 = 0\n #.sam file parsed - crucial information was retrited (scaffold information)\n # start - the starting position of the locus_sequence\n #reading_frame - locus in the correct sense [0] or CR [16]\n #sequence_locus - locus sequence information\n\n for line in sam_file:\n\n if line.startswith(\"@\"):\n pass\n\n else:\n line_information = line.strip().split()\n scaffold = line_information[2]\n loci = line_information[0]\n mapping_beginning = line_information[3]\n read_frame = line_information [1]\n locus_sequence = line_information [9]\n cigar = line_information [5]\n if \"D\" in cigar or \"I\" in cigar:\n count += 1\n if \"D\" in cigar and \"I\" in cigar:\n counter_2 +=1\n a = count - counter_2\n if scaffold != \"*\":\n sam_dic[loci] = {\"scaffold\": scaffold,\n \"start\": int(mapping_beginning),\n \"reading_frame\": read_frame,\n \"sequence_locus\": locus_sequence,\n \"cigar\": cigar}\n counter_1 +=1\n print (\"Number of loci mappead on Cg: {}\".format(len(sam_dic)))\n\n print (\"Step 1 - Parse the .sam file -- Done\")\n\n #The sam_dic return a dictionary where the key is the locus(read) and the\n #value has the scaffold information, the position of the gene beginin,\n #the correct read frame of the gene, and finally the sequence of locus, in\n #the same reading frame of the Cg\n\n # \n # print (\"Number of locus with insertion or deletion \" + str(count))\n # print (\"Number of locus with insertion and deletion \" + str(counter_2))\n # print (\"Number of locus with problems \" + str(a))\n return sam_dic", "def _update_farness_map(self,ind):", "def test_get_indices_several_existing_items(self):\r\n control_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']\r\n exp_control_indices = [0, 1, 2, 3, 4]\r\n\r\n fast_ids = ['PC.607', 'PC.634', 'PC.635', 'PC.636']\r\n exp_fast_indices = [5, 6, 7, 8]\r\n\r\n obs_control = _get_indices(self.dist_matrix_header, control_ids)\r\n self.assertEqual(obs_control, exp_control_indices)\r\n\r\n obs_fast = _get_indices(self.dist_matrix_header, fast_ids)\r\n self.assertEqual(obs_fast, exp_fast_indices)", "def _is_mapping_correct(self):\n for i in range(self.mapping_size):\n target = self.mapping[i]\n if target < 0:\n continue\n if target == i // 2:\n continue\n return False\n return True", "def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping", "def parse_geno_file(folder,return_flag):\n\n perc_alt = defaultdict(list)\n perc_ref = defaultdict(list)\n abs_alt = defaultdict(list)\n abs_ref = defaultdict(list)\n\n perc_alt_inv = defaultdict(dict)\n perc_ref_inv = defaultdict(dict)\n abs_alt_inv = defaultdict(dict)\n abs_ref_inv = defaultdict(dict)\n\n for geno_file in glob.glob(folder+'*_test_summary.tsv'):\n strain = geno_file.split('/')[-1].split('_')[0]\n #print strain\n prev_coordinate = \"0\"\n count = 0\n alt_allele = {}\n amb_allele = {}\n ref_allele = {}\n flag = 0 \n\n TEMP_HANDLE = open(geno_file,'r')\n for line in TEMP_HANDLE:\n line = line.rstrip('\\n')\n\n if(line[0]!='v'): ## Skip the header\n coordinate = line.split('\\t')[0].split('::')[-1]\n if(coordinate != prev_coordinate):\n #prev_coordinate = coordinate\n count = count + 1\n if(count == 1):\n if(line.split('\\t')[-3]!='alt'): ## No reads supporting the alternate allele\n flag = 1 \n alt_allele[coordinate] = 0\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n #print line\n else:\n alt_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 2):\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 3):\n if(line.split('\\t')[-3]!='ref'): ## No reads supporting the reference allele (all are ambiguous)\n ref_allele[coordinate] = 0\n else:\n ref_allele[coordinate] = int(line.split('\\t')[-1])\n prev_coordinate = coordinate\n count = 0\n if(flag == 1): ## The case where there are no alternate allele reads, counter is incremented to account for changed numbering\n count = count + 1 \n flag = 0 \n\n \n for key in alt_allele:\n if(alt_allele[key]+ref_allele[key]!= 0): ## Check to see if the denominator is not zero\n abs_alt[strain].append(float(alt_allele[key]))\n abs_ref[strain].append(float(ref_allele[key]))\n perc_alt[strain].append(float(alt_allele[key])/(alt_allele[key]+ref_allele[key]))\n perc_ref[strain].append(float(ref_allele[key])/(alt_allele[key]+ref_allele[key]))\n\n\n abs_alt_inv[strain][key] = float(alt_allele[key])\n abs_ref_inv[strain][key] = float(ref_allele[key])\n perc_alt_inv[strain][key] = float(alt_allele[key])/(alt_allele[key]+ref_allele[key])\n perc_ref_inv[strain][key] = float(ref_allele[key])/(alt_allele[key]+ref_allele[key])\n \n \n\n ## Keep only the common inversions, i.e. those between MC and the rest \n all_inversions = []\n common_inversions = []\n abs_alt_set = defaultdict(list)\n perc_alt_set = defaultdict(list)\n\n abs_alt_inv_set = defaultdict(dict)\n perc_alt_inv_set = defaultdict(dict)\n abs_ref_inv_set = defaultdict(dict)\n perc_ref_inv_set = defaultdict(dict)\n\n Rock = ['AC', 'CL','CM','CN','TI','PN','MC']\n Sand = ['MZ','DC','LF','MP','MS','CV']\n\n\n sand_inversions = []\n rock_inversions = []\n\n for strain in abs_alt_inv.keys():\n for inversion in abs_alt_inv[strain].keys():\n if(strain in Rock):\n rock_inversions.append(inversion)\n else:\n sand_inversions.append(inversion)\n all_inversions.append(inversion)\n \n \n common_inversions_sand = Counter(sand_inversions)\n common_inversions_rock = Counter(rock_inversions)\n #count_sand = 0\n common_inversions = Counter(all_inversions)\n return_inversions = []\n \n \n #print common_inversions\n for inversion in common_inversions.keys():\n if(common_inversions[inversion]==13):\n return_inversions.append(inversion)\n for strain in abs_alt_inv.keys():\n abs_alt_set[strain].append(abs_alt_inv[strain][inversion])\n perc_alt_set[strain].append(perc_alt_inv[strain][inversion])\n\n abs_alt_inv_set[strain][inversion] = abs_alt_inv[strain][inversion]\n perc_alt_inv_set[strain][inversion] = perc_alt_inv[strain][inversion]\n abs_ref_inv_set[strain][inversion] = abs_ref_inv[strain][inversion]\n perc_ref_inv_set[strain][inversion] = perc_ref_inv[strain][inversion]\n\n\n for inversion in abs_alt_inv_set['MC']:\n alternate_allele_sum_rock = 0\n reference_allele_sum_rock = 0\n alternate_allele_sum_sand = 0\n reference_allele_sum_sand = 0 \n for strain in Rock:\n alternate_allele_sum_rock = alternate_allele_sum_rock + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_rock = reference_allele_sum_rock + abs_ref_inv_set[strain][inversion]\n\n for strain in Sand:\n alternate_allele_sum_sand = alternate_allele_sum_sand + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_sand = reference_allele_sum_sand + abs_ref_inv_set[strain][inversion]\n\n abs_alt_set['Rock'].append(alternate_allele_sum_rock)\n perc_alt_set['Rock'].append(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock)))\n \n abs_alt_set['Sand'].append(alternate_allele_sum_sand)\n perc_alt_set['Sand'].append(float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand)))\n \n with open('log_file.txt','a') as LOG_FILE:\n if(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock))>float(sys.argv[2]) or float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand))>float(sys.argv[2])):\n print >> LOG_FILE,inversion \n \n\n print \"Sand : \"+str(count_sand)\n\n if return_flag == True:\n #print len([abs_alt_inv_set,abs_ref_inv_set,perc_alt_inv_set,perc_ref_inv_set])\n return perc_alt_inv_set\n else:\n return [abs_alt_set,perc_alt_set]", "def extract_map(infile, outfile):\n subprocess.check_call(['mudraw', '-w', '1800', '-h', '1800', '-o', outfile, infile, '1'])", "def others_locations(state):\n others_ = others(state)\n locations = {i: e['pos'] for i, e in others_.items()}\n return locations", "def test_read_multiple(self):\n meshes = stlreader.get_data(self.stl_multi_file)\n for name, vertices, polygons in meshes:\n self.assertEqual(name, \"{}#{}\".format(os.path.basename(self.stl_multi_file), 0))\n self.assertTrue(len(vertices) > 0)\n self.assertTrue(len(polygons) > 0)\n polygon_ids = list()\n for a, b, c in polygons.itervalues():\n polygon_ids += [a, b, c]\n self.assertItemsEqual(set(vertices.keys()), set(polygon_ids))", "def reference_to_signal_partial_mapping(rb_map_string, reference_location, read_location, contig_name,\n ref_start, bas_start):\n\n a, b = basecall_to_reference_mapping(rb_map_string, ref_start, bas_start)\n f = h5py.File(read_location, 'r')\n grp = np.array(f.get('/Analyses/Basecall_1D_000/BaseCalled_template/Events'))\n bts = base_to_signal_mapping(grp)\n norm_sig = normalized_signal(grp)\n vectors_for_nn = np.array([], dtype=np.int64).reshape(0, cs.NN_VECTOR_LENGTH)\n\n for i in b:\n rs = i[0]\n re = i[1]\n bs = i[2]\n # R=B cast sekvencie\n ref = refrence_sequence_from_interval(reference_location, contig_name, rs, re)\n left_border = int(cs.LENGTH/2 - 2)\n right_border = int(cs.LENGTH/2 + 2)\n ref1 = np.concatenate(create_one_hot(ref))\n\n for x in range(0, len(ref)-cs.LENGTH, 5):\n start = bts[bs+x+left_border]\n end = bts[bs+x+right_border]\n number_of_signals = end - start + 1\n\n if number_of_signals < cs.SIGNAL_LENGTH:\n d = int((cs.SIGNAL_LENGTH - number_of_signals) / 2)\n signal_relevant_start = bs+x+left_border - d\n signal_relevant_end = bs+x + left_border + number_of_signals + d - 1 \\\n if number_of_signals + 2*d == cs.SIGNAL_LENGTH else \\\n bs + x + left_border + number_of_signals + d\n else:\n continue\n\n signal_relevant = []\n [signal_relevant.append(x) for x in norm_sig[signal_relevant_start:signal_relevant_end+1]]\n id_sig, std = ideal_signal_for_sequence(ref[x:x+cs.LENGTH])\n help_con = np.concatenate((ref1[4*x:4*(x+cs.LENGTH)], np.array(signal_relevant)), axis=0)\n help_con = np.concatenate((help_con, id_sig), axis=0)\n help_con = np.concatenate((help_con, [std]), axis=0)\n\n if len(help_con) != cs.NN_VECTOR_LENGTH:\n break\n vectors_for_nn = np.append(vectors_for_nn, help_con[None, :], axis=0)\n\n return vectors_for_nn", "def get_read_properties(line, merged_reads):\n\tparts = line.split('\\t')\n\n\tif int(parts[1]) & 64 != 0:\n\t\tread_num = \"1\"\n\telif int(parts[1]) & 128 != 0:\n\t\tread_num = \"2\"\n\telse:\n\t\traise ValueError(f\"read {read.qname} is neither read1 nor read2, but reads must be paired\")\n\t\n\tif parts[0] in merged_reads:\n\t\tmerged = True\n\telse:\n\t\tmerged = False\n\t\n\treturn {\n\t\t'qname' : parts[0],\t\n\t\t'num' : read_num,\n\t\t'merged' : merged\n\t}", "def find_nearby_membranes(all_membranes, all_membrane_map, vert_normals):\r\n membrane_tree = scipy.spatial.cKDTree(all_membranes)\r\n nearby_membranes = np.array(list(membrane_tree.query_pairs(adhesion_max_dist, p=2)))\r\n nearby_membrane_map = defaultdict(list)\r\n if nearby_membranes.shape[0] > 0:\r\n # Exclude same-cell membrane interactions and same-direction-facing segments\r\n all_vert_normals = np.concatenate(vert_normals, axis=0)\r\n subset = np.where(\r\n (all_membrane_map[nearby_membranes[:, 0], 0] !=\r\n all_membrane_map[nearby_membranes[:, 1], 0])\r\n & (np.einsum('ij,ik->i', all_vert_normals[nearby_membranes[:, 0]], all_vert_normals[nearby_membranes[:, 1]]) < 0.0)\r\n )\r\n nearby_membranes = nearby_membranes[subset]\r\n # {cell idx: (vert idx, other cell idx, other vert idx, 'all_membranes' vert idx)}\r\n for nm in nearby_membranes:\r\n m0 = all_membrane_map[nm[0]]\r\n m1 = all_membrane_map[nm[1]]\r\n nearby_membrane_map[m0[0]].append((m0[1], m1[0], m1[1], nm[1]))\r\n nearby_membrane_map[m1[0]].append((m1[1], m0[0], m0[1], nm[0]))\r\n nearby_membrane_map = {k: np.array(v)\r\n for k, v in nearby_membrane_map.items()}\r\n# print(nearby_membrane_map)\r\n return nearby_membranes, nearby_membrane_map", "def makeSNPMap(snpfile, referencemap):\n\tbimfile = open(snpfile, \"r\") # open the input file\n\tmapfile = open(referencemap, \"r\")\n\toutfilename = re.sub(r'\\.bim', '.markerpos', snpfile)\n\tposfilename = re.sub(r'\\.bim', '.snp_locations', snpfile)\n\toutfile = open(outfilename, \"w\")\n\tposfile = open(posfilename, \"w\")\n\t# Initialize variables \n\tpreviousCM = 0\n\tpreviousPos = 0\n\ti=0\n\tbimline = bimfile.readline().strip().split() # Pos 1 is rsID, Pos 3 is location\n\tfor mapline in mapfile:\n\t\tif len(bimline) == 0:\n\t\t\tbreak\t\t\n\t\tif i==0:\n\t\t\ti+=1\n\t\t\tcontinue\n\t\tmapline = mapline.strip().split()\n\t\t# Three cases: 1. SNP pos gt map pos\n\t\twhile int(bimline[3]) < int(mapline[0]): # This means that the BIM file is behind the map file, so need to write output here with the interopolation\n\t\t# of the previous values\n\t\t\tdiffCM = float(mapline[2]) - float(previousCM)\n\t\t\tdiffpos = float(mapline[0]) - float(previousPos)\n\t\t\tmulti = (float(bimline[3]) - float(previousPos))/diffpos\n\t\t\tcmout = multi*diffCM + float(previousCM)\n\t\t\tif cmout < 0: # this should not happen so if it does dump data and quit\n\t\t\t\tprint i\n\t\t\t\tprint cmout\n\t\t\t\tprint diffCM\n\t\t\t\tprint diffpos\n\t\t\t\tprint previousCM\n\t\t\t\tprint previousPos\n\t\t\t\tprint bimline\n\t\t\t\tprint mapline\n\t\t\t\texit()\n\n\t\t\toutfile.write( str(cmout) +\"\\n\")\n\t\t\tposfile.write( str(bimline[3]) + \"\\t\" + str(cmout) + \"\\n\")\n\t\t\tbimline = bimfile.readline().strip().split()\n\t\t\tif len(bimline) == 0:\n\t\t\t\tbreak\t\t\n\t\tif len(bimline) ==0:\n\t\t\tbreak\n\t\tif bimline[3] == mapline[0]: # write out genetic position\n\t\t\toutfile.write( mapline[2]+ \"\\n\")\n\t\t\tposfile.write( str(bimline[3]) + \"\\t\" + mapline[2] + \"\\n\")\n\t\t\tbimline = bimfile.readline().strip().split()\n\t\n\t\t#if bimline[3] > mapline[0]: # read next line in the map file\n\t\t#\tpreviousCM = mapline[2]\n\t\t#\tpreviousPos = mapline[0]\n\t\t#\tcontinue\n\t\t# Hits this and continues if bimline is above mapline\n\t\tpreviousCM = mapline[2]\n\t\tpreviousPos = mapline[0]\n\t\ti += 1\n\toutfile.close()\n\treturn(outfile.name)", "def get_all_offgrid_pin(self, pin, insufficient_list):\n #print(\"INSUFFICIENT LIST\",insufficient_list)\n # Find the coordinate with the most overlap\n any_overlap = set()\n for coord in insufficient_list:\n full_pin = self.convert_track_to_pin(coord)\n # Compute the overlap with that rectangle\n overlap_rect=pin.compute_overlap(full_pin)\n # Determine the max x or y overlap\n max_overlap = max(overlap_rect)\n if max_overlap>0:\n any_overlap.update([coord])\n \n return any_overlap", "def test_bad_region():\n ref_file = pkg_resources.resource_filename('m260b.test_data', 'ref_practice_W_1_chr_1.fasta')\n read_file = pkg_resources.resource_filename('m260b.test_data', 'practice_w_1.std.bad_region1.bam')\n ref_hdr, reference = read_basic_fasta(ref_file) \n read_iter = pysam.Samfile(read_file)\n chr = ref_hdr[1:].strip()\n areg = list(active_regions(read_iter, reference, chr, start_offset=0, flank=30, dfrac=1.0))\n found = False\n for region, reads in areg:\n found |= region.start <= 5769 <= region.stop\n if not found:\n raise ValueError('Window did not open around variant')", "def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders", "def check_map(infile, disable_primer_check, barcode_type=\"golay_12\",\r\n added_demultiplex_field=None, has_barcodes=True):\r\n\r\n if barcode_type == \"variable_length\":\r\n var_len_barcodes = True\r\n else:\r\n var_len_barcodes = False\r\n\r\n if barcode_type == \"0\":\r\n has_barcodes = False\r\n\r\n # hds, id_map, dsp, run_description, errors, warnings\r\n hds, mapping_data, run_description, errors, warnings = \\\r\n process_id_map(infile, has_barcodes=has_barcodes,\r\n disable_primer_check=disable_primer_check,\r\n added_demultiplex_field=added_demultiplex_field,\r\n variable_len_barcodes=var_len_barcodes)\r\n\r\n if errors:\r\n raise ValueError('Errors were found with mapping file, ' +\r\n 'please run validate_mapping_file.py to ' +\r\n 'identify problems.')\r\n\r\n id_map = {}\r\n\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]] = {}\r\n\r\n for header in range(len(hds)):\r\n for curr_data in mapping_data:\r\n id_map[curr_data[0]][hds[header]] = curr_data[header]\r\n\r\n barcode_to_sample_id = {}\r\n\r\n primer_seqs_lens = {}\r\n all_primers = {}\r\n\r\n for sample_id, sample in id_map.items():\r\n if added_demultiplex_field:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper() + \",\" +\r\n sample[added_demultiplex_field]] = sample_id\r\n else:\r\n barcode_to_sample_id[sample['BarcodeSequence'].upper()] = sample_id\r\n if not disable_primer_check:\r\n raw_primers = sample['LinkerPrimerSequence'].upper().split(',')\r\n\r\n if len(raw_primers[0].strip()) == 0:\r\n raise ValueError('No primers detected, please use the ' +\r\n '-p parameter to disable primer detection.')\r\n expanded_primers = expand_degeneracies(raw_primers)\r\n curr_bc_primers = {}\r\n for primer in expanded_primers:\r\n curr_bc_primers[primer] = len(primer)\r\n all_primers[primer] = len(primer)\r\n primer_seqs_lens[sample['BarcodeSequence']] = curr_bc_primers\r\n\r\n return hds, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers", "def ensure_indicies(self):\n # Search indicies for materials\n self.materials.ensure_index(self.materials.key)\n self.materials.ensure_index(self.materials.last_updated_field)\n\n # Search indicies for elasticity\n self.elasticity.ensure_index(self.elasticity.key)\n self.elasticity.ensure_index(self.elasticity.last_updated_field)\n\n # Search indicies for substrates\n self.substrates.ensure_index(self.substrates.key)\n self.substrates.ensure_index(self.substrates.last_updated_field)", "def __get_map_offsets(self):\n map = self.map.copy()\n map_up = np.zeros((self.h + 1, self.w), np.uint8) # create 4-neighbor connectivity comparision\n map_down = np.zeros((self.h + 1, self.w), np.uint8)\n map_right = np.zeros((self.h, self.w + 1), np.uint8)\n map_left = np.zeros((self.h, self.w + 1), np.uint8)\n map_up[1:, :] = map # paste mask onto it, 1 shifted\n map_down[:-1, :] = map\n map_right[:, :-1] = map\n map_left[:, 1:] = map\n map_up = np.delete(map_up, -1, 0) # delete the extra row/column\n map_down = np.delete(map_down, 0, 0)\n map_right = np.delete(map_right, 0, 1)\n map_left = np.delete(map_left, -1, 1)\n map_up[0, :] = 1 # set new cells (after the shift) to 1(walls) to eliminate false-positives\n map_down[-1, :] = 1\n map_right[:, -1] = 1\n map_left[:, 0] = 1\n return map_up, map_right, map_down, map_left", "def checkMap(self):\n return True", "def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads", "def test_process_id_map_multiple_problems(self):\r\n\r\n header, mapping_data, comments, errors, warnings =\\\r\n process_id_map(self.errors_warnings_mapping_fp)\r\n\r\n expected_header = [\r\n 'SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Treatment',\r\n 'Treatment',\r\n 'Description']\r\n expected_mapping_data = [['PC.354',\r\n 'AGCACGAGCCTA',\r\n 'YATGCTGCCTCCCGTAGGAGT',\r\n 'Cont^^rol',\r\n 'ATGACCGATTRGACCAG',\r\n 'Control_mouse_I.D._354'],\r\n ['PC-355',\r\n 'AACTCGTCGATGN',\r\n 'YATGCTGCCTCCCGTAGGAGT',\r\n 'Control',\r\n 'ATGACCGATTRGACCAG',\r\n 'Control_mouse_I.D._355',\r\n 'outofbounds'],\r\n ['PC.356',\r\n 'ACAGACCACTCA',\r\n 'YATGCTGCCTCxCCGTAGGAGT',\r\n 'Control',\r\n 'ATGACCGATTRGACCAG',\r\n 'Control_mouse_I.D._356']]\r\n expected_comments = [\r\n 'Example mapping file for the QIIME analysis package. These 9 samples are from a study of the effects of exercise and diet on mouse cardiac physiology (Crawford, et al, PNAS, 2009).']\r\n expected_errors = [\r\n 'Treatment found in header 2 times. Header fields must be unique.\\t0,3',\r\n 'Treatment found in header 2 times. Header fields must be unique.\\t0,4',\r\n 'Invalid DNA sequence detected: YATGCTGCCTCxCCGTAGGAGT\\t3,2',\r\n 'Invalid DNA sequence detected: AACTCGTCGATGN\\t2,1']\r\n expected_warnings = [\r\n 'Barcode AACTCGTCGATGN differs than length 12\\t2,1',\r\n 'Invalid characters found in PC-355\\t2,0',\r\n 'Invalid characters found in Cont^^rol\\t1,3',\r\n 'Data field outofbounds found after Description column\\t2,6']\r\n\r\n self.assertEqual(header, expected_header)\r\n self.assertEqual(mapping_data, expected_mapping_data)\r\n self.assertEqual(comments, expected_comments)\r\n self.assertEqual(errors, expected_errors)\r\n self.assertEqual(warnings, expected_warnings)", "def supportingReadsFilter(spot, args):\n if spot.tags[\"label\"] == \"INS\":\n errId = 1\n errLab = 'insertion'\n elif spot.tags[\"label\"] == \"DEL\":\n errId = 2\n errLab = 'deletion'\n else:#don't worry about other types\n return False\n\n begin, ending = spot.fetchbounds()\n begin -= args.buffer #abs(begin-ending)*.5\n ending += args.buffer #abs(begin-ending)*.5\n #do the hard work\n reads = args.bam.fetch(str(spot.chrom), begin, ending)\n totSizes = []\n coverage = 0\n nReadsErr = 0\n #For tandem\n strandCnt = {True: 0, False: 0}\n \n #count reads and errSizes\n for i in reads:\n mySize = 0\n coverage += 1\n start = i.pos - 1\n cigar = expandCigar(i.cigar)\n curSize = 0\n extraSize = 0\n readHasErr = False\n \n #What if I just intersect any stretches of errors with my boundaries.\n #Then for insertions I'll keep coordinates\n #For deletions I'll user outer bounds?\n for code in cigar: \n if code != 1:\n start += 1\n #must be in region\n if start < begin:\n continue\n if start >= ending:\n break\n \n if code == errId:\n curSize += 1\n if curSize != 0 and code != errId:\n if curSize >= args.minIndelErr:\n readHasErr = True\n mySize += curSize\n elif curSize > 1:#1bp errors will inflate\n extraSize += curSize\n curSize = 0\n \n\n if readHasErr and mySize >= args.minIndelSize:\n nReadsErr += 1\n totSizes.append(mySize + extraSize)\n strandCnt[i.is_reverse] += 1\n \n spot.tags[\"strandCnt\"] = \"%d,%d\" % (strandCnt[False], strandCnt[True])\n if len(totSizes) == 0:\n logging.debug(\"no %s found!? %s\" % (errLab, str(spot)))\n return True # true you should filter\n \n if len(totSizes) < max(math.ceil(coverage * args.minIndelPct), args.minErrReads):\n logging.debug(\"not large cnt %s found %s \" % (errLab, str(spot)))\n return True\n \n totSizes.sort()\n totSizes = numpy.array(totSizes)\n mean = totSizes.mean()\n median = numpy.percentile(totSizes, 50)\n firstQ = numpy.percentile(totSizes, 25)\n thirdQ = numpy.percentile(totSizes, 75)\n \n logging.debug(\"PassFilt %s\" % (str(spot))) \n logging.debug(\"cov %d\" % coverage )\n logging.debug(\"size %d %s\" % (len(totSizes), str(totSizes)))\n logging.debug(\"mean %d\" % mean )\n logging.debug(\"median %d\" % median)\n logging.debug(\"firstQ %d\" % firstQ)\n logging.debug(\"thirdQ %d\" % thirdQ)\n \n spot.tags[\"szCount\"] = int(nReadsErr)\n spot.tags[\"szMean\"] = int(mean)\n spot.tags[\"szMedian\"] = int(median)\n spot.tags[\"sz1stQ\"] = int(firstQ)\n spot.tags[\"sz3rdQ\"] = int(thirdQ)\n return False", "def get_paired_reads(section):\n read_1, read_2 = None, None\n for line in section:\n if line.startswith('fg1'):\n read_1 = line.strip().split(':')[1]\n elif line.startswith('fg2'):\n read_2 = line.strip().split(':')[1]\n if not (read_1 and read_2):\n sys.stderr.write(\"Error finding paired reads in {0}\".format(section))\n sys.stderr.write(\" ... Exiting.\\n\")\n sys.exit()\n else:\n return read_1, read_2", "def test_latnotloc_and_latandloc_2(self):\n patient = Semiology('lat_', Laterality.LEFT, Laterality.LEFT)\n patient.data_frame = self.df\n lat_not_loc_all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n # inspect result\n lat_not_loc_result, _ = patient.query_semiology()\n\n self.assertIs(type(lat_not_loc_all_combined_gifs), pd.DataFrame)\n assert not lat_not_loc_all_combined_gifs.empty\n\n # drop the zero entries - should be only the IL left ones which aren't MTG of TL:\n lat_not_loc_all_combined_gifs = lat_not_loc_all_combined_gifs[['Gif Parcellations', 'pt #s']].astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n lat_not_loc_all_combined_gifs.set_index(\n 'Gif Parcellations', inplace=True)\n lat_not_loc_gifsclean = lat_not_loc_all_combined_gifs.loc[\n lat_not_loc_all_combined_gifs['pt #s'] != 0, :]\n\n gifs_right, gifs_left = gifs_lat_factor()\n lat_not_loc_gifsclean_rights = (\n lat_not_loc_gifsclean.drop(index=156).index.isin(gifs_right).all()\n )\n\n # inspect result assertions\n assert(lat_not_loc_result.Localising.sum() == 1)\n assert(lat_not_loc_result['Lateralising'].sum() == 2)\n\n # all_combined_gifs assertions\n # all except GIF 156 (L MTG) are in the right GIFs:\n assert((\n lat_not_loc_gifsclean_rights == True)\n )\n assert(\n (\n lat_not_loc_gifsclean.index.isin(gifs_left)).any() == True\n )\n # assert using shape as all pt #s are 1:\n assert (lat_not_loc_gifsclean['pt #s'].sum()\n == lat_not_loc_gifsclean.shape[0])\n\n # check that latnotloc gives 1 and latandloc adds zero to right MTG GIF #155\n heatmap, _ = patient.get_num_datapoints_dict(method='minmax')\n assert heatmap[155] == 1 # right", "def _unique_beams(self):\n bmap, mask = self.single_pointing_telescope._unique_beams()\n block_bmap = linalg.block_diag(*[bmap+i*self.single_pointing_telescope.nfeed for i, _ in enumerate(self.pointings)])\n block_mask = linalg.block_diag(*[mask for _ in self.pointings])\n\n return block_bmap, block_mask", "def getReadOnGeneFile(rnameList, len_param):\n log.info(\"Select reads that are on genes\")\n for ch in rnameList:\n tcount = 0\n \n geneS = {}#gene start\n geneE = {}#gene end\n g_direct = {}#gene direction\n readS = {}#read start\n readE = {}#read End\n readDic = {}#readDic[id] = read\n sortGeneId = {}\n sortReadId = {}\n genefile = os.path.join(working_dir, 'removeOverlap.'+ch+'.gff')\n readfile = os.path.join(working_dir, 'MappedRead.'+ch+'.sam')\n rgfile = os.path.join(working_dir, 'ReadOnGeneList.'+ch+'.tab')\n log.info(\"Generate \" + rgfile)\n f=open(rgfile, \"w\") \n \n geneS, geneE, g_direct = getGFFStartEnd(genefile, len_param)\n sortGeneId = sortId(geneS)\n \n readS, readE,readDic = getSAMStartEnd(readfile)\n sortReadId = sortId(readS)\n ys = 0\n \n for x in range(len(sortGeneId)):\n \n gID = sortGeneId[x]#gene id\n gs = geneS.get(gID)#gene start\n ge = geneE.get(gID)#gene end\n gd = g_direct.get(gID)\n glineList = []\n sameG = False\n \n for y in range(ys,len(sortReadId)):\n rID = sortReadId[y]\n rs = readS.get(rID)\n re = readE.get(rID)\n if rs >= gs:\n if re <= ge:\n f.write(gID)\n f.write('\\t')\n f.write(str(gs))\n f.write('\\t')\n f.write(str(ge))\n f.write('\\t')\n f.write(gd)\n f.write('\\t')\n f.write(rID)\n f.write('\\t')\n f.write(str(rs))\n f.write('\\t')\n f.write(str(re))\n f.write('\\t')\n f.write(readDic.get(rID))\n elif re > ge:\n ys = y\n break\n elif rs > ge:\n ys = y\n break\n f.close()", "def aggregate_input(wildcards):\n with open(checkpoints.mapped_reads.get(sample=wildcards.sample, reference=wildcards.reference).output[0]) as f:\n summary = json.load(f)\n all_segments_aligned = summary[\"all_segments_aligned\"]\n min_reads = summary[\"minimum_reads_required\"]\n mapped = summary[\"mapped_reads\"]\n\n if not all_segments_aligned or mapped <= min_reads:\n return rules.not_mapped.output.not_mapped\n else:\n return rules.post_masked_consensus_and_summary_stats_to_id3c.output.successful_post", "def _get_positions(self):\n position_map = dict()\n # Assumes that the positions are indexed in the order of Row-->Well-->FOV\n for well in self.wells:\n for pos in self.store[well].attrs.get('well').get('images'):\n pos_name = pos['path']\n # pos name is 'Pos_xxx'\n pos_idx = int(pos_name.split('_')[-1])\n position_map[pos_idx] = {'name': pos_name, 'well': well}\n return position_map", "def test_get_interesting_mapping_fields(self):\r\n # all columns are completely unique\r\n d = parse_mapping_file(self.mapping_f1)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # all columns are completely identical\r\n d = parse_mapping_file(self.mapping_f2)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = []\r\n self.assertEqual(actual, expected)\r\n\r\n # some columns retained\r\n d = parse_mapping_file(self.mapping_f3)\r\n actual = get_interesting_mapping_fields(d[0], d[1])\r\n expected = ['Something', 'days_since_epoch']\r\n self.assertEqual(actual, expected)", "def get_others(map_, r, c):\n nums = 0\n # your code here\n if r == 0 and c == 0: #top left corder\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n elif r == 0 and c == len(map_[0])-1: #top right corner\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n elif r == len(map_)-1 and c == 0: #bottom left corder\n nums += 2\n if len(map_[0]) > 1:\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif r == len(map_)-1 and c == len(map_[0])-1: #bottom right corner\n nums += 2\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif r == 0: # top edge, excluding corner\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n if len(map_) > r and map_[r+1][c] == 0:\n nums += 1\n elif r == len(map_)-1: # bottom edge, excluding corner\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n elif c == 0: # left edge, excluding corner\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if len(map_[0]) > c and map_[r][c+1] == 0:\n nums += 1\n elif c == len(map_[0])-1: # right edge. excluding corner\n nums += 1\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n else: # the rest, excluding edge and corner\n if map_[r-1][c] == 0:\n nums += 1\n if map_[r+1][c] == 0:\n nums += 1\n if map_[r][c-1] == 0:\n nums += 1\n if map_[r][c+1] == 0:\n nums += 1\n return nums", "def test_lat_not_loc_1(self):\n patient = Semiology('lat_not_loc', Laterality.LEFT, Laterality.LEFT)\n patient.data_frame = self.df\n lat_not_loc_all_combined_gifs = patient.query_lateralisation(\n one_map_dummy)\n\n # inspect result\n lat_not_loc_result, num_query_loc = patient.query_semiology()\n\n self.assertIs(type(lat_not_loc_all_combined_gifs), pd.DataFrame)\n assert not lat_not_loc_all_combined_gifs.empty\n\n # drop the zero entries as these are from the CL/IL zeros:\n lat_not_loc_all_combined_gifs = lat_not_loc_all_combined_gifs[['Gif Parcellations', 'pt #s']].astype(\n {'Gif Parcellations': 'int32', 'pt #s': 'int32'})\n lat_not_loc_all_combined_gifs.set_index(\n 'Gif Parcellations', inplace=True)\n lat_not_loc_gifsclean = lat_not_loc_all_combined_gifs.loc[\n lat_not_loc_all_combined_gifs['pt #s'] != 0, :]\n # now we know only the CL data remains in this dummy data, which is on the RIGHT.\n gifs_right, gifs_left = gifs_lat_factor()\n lat_not_loc_gifsclean_rights = (\n lat_not_loc_gifsclean.index.isin(gifs_right).all()\n )\n\n # inspect result assertions\n assert(lat_not_loc_result.Localising.sum() == 0)\n assert(lat_not_loc_result['Lateralising'].sum() == 1)\n\n # all_combined_gifs assertions\n assert((\n lat_not_loc_gifsclean_rights == True)\n )\n assert(\n (\n lat_not_loc_gifsclean.index.isin(gifs_left)).any() == False\n )\n assert (lat_not_loc_gifsclean['pt #s'].sum()\n == lat_not_loc_gifsclean.shape[0])\n\n # test MTG on right 155 gif # gives 1:\n heatmap, _ = patient.get_num_datapoints_dict(method='minmax')\n assert 156 not in heatmap # left\n assert heatmap[155] == 1 # right", "def get_contig_rpkms(identifier, path, minscore):\n \n print('Parsing file:', path)\n \n bamfile = pysam.AlignmentFile(path, \"rb\")\n \n # We can only get secondary alignment reference names, not indices. So we must\n # make an idof dict to look up the indices.\n idof = {contig: i for i, contig in enumerate(bamfile.references)}\n contiglengths = bamfile.lengths\n halfreads = [0] * len(contiglengths)\n \n nhalfreads = 0\n for segment in filter_segments(bamfile, minscore):\n nhalfreads += 1\n \n # Read w. unmapped mates count twice as they represent a whole read\n value = 2 if segment.mate_is_unmapped else 1\n \n for reference in get_all_references(segment):\n id = idof[reference]\n halfreads[id] += value\n \n bamfile.close()\n \n print('Done parsing file:', path)\n \n rpkms = list()\n \n # Compensate for having paired reads\n millionmappedreads = nhalfreads / 2e6\n \n for contiglength, nhalfreads in zip(contiglengths, halfreads):\n kilobases = contiglength / 1000\n rpkms.append(nhalfreads / (kilobases * millionmappedreads))\n \n return identifier, rpkms", "def test_tb_full_mapping_iter_02():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_2 = resource_path + \"tb.Human.SRR1658573_2.fastq\"\n\n files = [\n gem_file,\n fastq_file_2\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm2 = tbFullMappingTool()\n tfm2_files, tfm2_meta = tfm2.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_2_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_2_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_2_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_2_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def find_common_genes(input_fp):\n trait_genes = {}\n all_genes = []\n common_genes = []\n snp_count = {}\n traits = {}\n matrix = []\n print('Extracting genes from eQTL interactions for...')\n _,_,t_files = next(os.walk(input_fp), (None, None, []))\n for trait_file in t_files:\n trait = trait_file[:len(trait_file)-4]\n print('\\t' + trait)\n tfile = open(os.path.join(input_fp, trait_file), 'r')\n eqtls= csv.reader(tfile, delimiter = '\\t') \n next(tfile, None)\n for line in eqtls:\n genes = []\n if trait in trait_genes.keys():\n genes = trait_genes[trait]\n genes.append(line[3])\n trait_genes[trait] = genes\n all_genes.append(line[3])\n tfile.close()\n \n for trait in trait_genes:\n trait_genes[trait] = list(set(trait_genes[trait]))\n all_genes = list(set(all_genes))\n print(len(all_genes))\n\n done_genes = []\n \"\"\"\n for snp in all_snps:\n occur = all_snps.count(snp)\n if occur > 1 and snp not in done_snps:\n done_snps.append(snp)\n for record in trait_snps:\n if snp == record[1] and record not in common_snps:\n common_snps.append(record)\n snp_count[snp] = occur\n to_dict = []\n if record[0] not in traits.keys():\n to_dict.append(snp)\n traits[record[0]] = to_dict\n else:\n to_dict = traits[record[0]]\n to_dict.append(snp)\n traits[record[0]] = to_dict\n \"\"\"\n for trait in trait_genes.keys():\n gene_count = {}\n genes_total = len(trait_genes[trait])\n compare_traits = trait_genes.keys()\n if genes_total > 3:\n for trait_gene in trait_genes[trait]:\n for compare in compare_traits:\n if trait_gene in trait_genes[compare]:\n if compare not in gene_count.keys():\n gene_count[compare] = 1\n else:\n gene_count[compare] += 1\n #else:\n # gene_count[compare] = 0\n row = []\n row.append(trait)\n for t in gene_count:\n ratio = round(gene_count[t]/float(genes_total), 7)\n matrix.append([trait, t, genes_total, gene_count[t], ratio])\n\n \"\"\"\n with open (output_fp + '/' + 'common_snps_count.txt', 'wb') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['snp', 'count'])\n for snp in snp_count:\n writer.writerow([snp,snp_count[snp]])\n \"\"\"\n\n with open ('gene_matrix.txt', 'w') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['trait_x', 'trait_y', '#total_genes', '#common_snps', \\\n 'ratio'])\n writer.writerows(matrix)", "def process_coords():\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed", "def _read_bam(bam_fn, precursors):\n mode = \"r\" if bam_fn.endswith(\"sam\") else \"rb\"\n handle = pysam.Samfile(bam_fn, mode)\n reads = defaultdict(realign)\n for line in handle:\n chrom = handle.getrname(line.reference_id)\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n query_name = line.query_name\n if query_name not in reads:\n reads[query_name].sequence = line.query_sequence\n iso = isomir()\n iso.align = line\n iso.start = line.reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def _locate_sample_single_map(self, idx, map_name):\n cumsum = self.traj_len_cumsum_per_map[map_name]\n assert 0 <= idx < cumsum[-1], \"Map index %d out of range [0, %d)\" % (\n idx,\n cumsum[-1],\n )\n\n trajs = self.traj_ids_per_map[map_name]\n\n traj_idx = bisect.bisect_right(cumsum, idx)\n dataset_idx, traj_id = trajs[traj_idx]\n\n if traj_idx == 0:\n sample_idx = idx\n else:\n sample_idx = idx - cumsum[traj_idx - 1]\n\n return dataset_idx, traj_id, sample_idx", "def _test_map_repeatability():\n map1 = map.Map(config.MAP_HEIGHT, config.MAP_WIDTH, 3)\n map1.random_seed = libtcod.random_save(0)\n _build_map(map1)\n\n map2 = map.Map(config.MAP_HEIGHT, config.MAP_WIDTH, 3)\n map2.random_seed = map1.random_seed\n _build_map(map2)\n\n assert map1.terrain == map2.terrain\n for i in range(len(map1.rooms)):\n assert map1.rooms[i] == map2.rooms[i]", "def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)", "def test_overlapping_alignments_2():\n generate_bam_file(gqd.sam_content, gqd.sam_bam_prefix)\n gqd.gene_wise_quantification._min_overlap = 5\n sam = pysam.Samfile(gqd.sam_bam_prefix + \".bam\")\n # 1 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 10))) == []\n # 4 overlapping base in the 5' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 13))) == []\n # 5 overlapping base in the 5' end of the reads => okay\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 1, 14))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]\n # 1 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 19, 23))) == []\n # 4 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 16, 23))) == []\n # 5 overlapping base in the 3' end of the reads => not enough\n assert mapping_ids(gqd.gene_wise_quantification._overlapping_alignments(\n sam, Gff3EntryMoc(\"chrom\", 15, 23))) == [\n \"myread:01\", \"myread:02\", \"myread:03\", \"myread:04\", \"myread:05\"]", "def test_tb_full_mapping_iter_01():\n resource_path = os.path.join(os.path.dirname(__file__), \"data/\")\n gem_file = resource_path + \"tb.Human.GCA_000001405.22_gem.fasta.gem\"\n\n fastq_file_1 = resource_path + \"tb.Human.SRR1658573_1.fastq\"\n\n files = [\n gem_file,\n fastq_file_1\n ]\n\n metadata = {\n 'assembly': 'test',\n # 'enzyme_name': 'MboI',\n 'windows': ((1, 25), (1, 50), (1, 75), (1, 100))\n }\n\n gem_file = files[1]\n\n print(gem_file)\n\n tfm1 = tbFullMappingTool()\n tfm1_files, tfm1_meta = tfm1.run(files, [], metadata) # pylint: disable=unused-variable\n\n map25 = resource_path + \"tb.Human.SRR1658573_1_full_1-25.map\"\n map50 = resource_path + \"tb.Human.SRR1658573_1_full_1-50.map\"\n map75 = resource_path + \"tb.Human.SRR1658573_1_full_1-75.map\"\n map100 = resource_path + \"tb.Human.SRR1658573_1_full_1-100.map\"\n\n assert os.path.isfile(map25) is True\n assert os.path.getsize(map25) > 0\n assert os.path.isfile(map50) is True\n assert os.path.getsize(map50) > 0\n assert os.path.isfile(map75) is True\n assert os.path.getsize(map75) > 0\n assert os.path.isfile(map100) is True\n assert os.path.getsize(map100) > 0", "def align(aligner, reads):\n counter = 0\n for read in SeqIO.parse(reads, \"fasta\"): \n try:\n alignInfo = next(aligner.map(str(read.seq)))\n print(alignInfo) \n except StopIteration:\n print(read.format(\"fasta\"), end='')", "def markDups(bam):\n names = {}\n numDups = 0\n for read in bam:\n n = \"/\".join(read.qname.split('/')[:2])\n try:\n cRead, cScore = names['/'.join(read.qname.split('/')[:2])]\n #myScore = sum([ord(y)-33 for y in read.qqual])/float(len(read.qqual))\n myScore = read.mapq\n if cScore > myScore:\n read.is_duplicate = True\n else:\n cRead.is_duplicate = True\n numDups += 1\n except KeyError:\n #myScore = sum([ord(y)-33 for y in read.qqual])/float(len(read.qqual))\n myScore = read.mapq\n names[n] = (read, myScore)\n logging.info(\"Marked %d ZMW duplicates\" % (numDups))\n del(names)", "def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh,\r\n otu_picker_otu_map_fh, out_dir):\r\n\r\n # read in mapping from split_library file\r\n labels = imap(lambda a_b: a_b[0], parse_fasta(fasta_fh))\r\n # mapping from seq_id to sample_id\r\n sample_id_mapping = extract_read_to_sample_mapping(labels)\r\n\r\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\r\n # read in cd_hit otu map\r\n # and write out combined otu_picker+denoiser map\r\n otu_fh = open(out_dir + \"/denoised_otu_map.txt\", \"w\")\r\n for otu_line in otu_picker_otu_map_fh:\r\n otu_split = otu_line.split()\r\n\r\n otu = otu_split[0]\r\n ids = otu_split[1:]\r\n\r\n get_sample_id = sample_id_mapping.get\r\n # concat lists\r\n # make sure the biggest one is first for pick_repr\r\n all_ids = sort_ids(ids, denoiser_mapping)\r\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\r\n try:\r\n otu_fh.write(\"%s\\t\" % otu +\r\n \"\\t\".join(map(get_sample_id, all_ids)) + \"\\n\")\r\n except TypeError:\r\n # get returns Null if denoiser_mapping id not present in\r\n # sample_id_mapping\r\n print \"Found id in denoiser output, which was not found in split_libraries \" +\\\r\n \"output FASTA file. Wrong file?\"\r\n exit()\r\n\r\n fasta_out_fh = open(out_dir + \"/denoised_all.fasta\", \"w\")\r\n for label, seq in parse_fasta(denoised_seqs_fh):\r\n id = label.split()[0]\r\n newlabel = \"%s %s\" % (sample_id_mapping[id], id)\r\n fasta_out_fh.write(BiologicalSequence(seq, id=newlabel).to_fasta())", "def test_check_map_single_sample(self):\r\n\r\n header, mapping_data = check_map(\r\n valid_mapping_data_no_bcs_no_added_demultiplex,\r\n barcode_type=0)\r\n\r\n expected_header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n expected_mapping_data =\\\r\n [['s1', '', '', 's1_description']]\r\n\r\n self.assertEquals(header, expected_header)\r\n self.assertEquals(mapping_data, expected_mapping_data)", "def count_unique_mirbase_reads(bam, counts_file):\n count_ref_hits(bam, counts_file)", "def map_and_save_gene_ids(hit_genes_location, all_detectable_genes_location=''):\n\n standardized_hits = [] # [primary_set]\n standardized_secondary_hits = [] # [secondary_set=None]\n\n if type(hit_genes_location) == str or isinstance(hit_genes_location, pathlib.PurePath):\n # log.info('codepath 1')\n standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location)]\n standardized_secondary_hits = [None]\n\n if type(hit_genes_location) == tuple:\n # log.info('codepath 2')\n standardized_hits = [cast_external_refs_to_internal_ids(hit_genes_location[0])]\n standardized_secondary_hits = [cast_external_refs_to_internal_ids(hit_genes_location[1])]\n\n if type(hit_genes_location) == list:\n # log.info('codepath 3')\n for sub_hit_genes_location in hit_genes_location:\n # log.info('codepath 3.0')\n if type(sub_hit_genes_location) == str or isinstance(sub_hit_genes_location, pathlib.PurePath):\n # log.info('codepath 3.1')\n standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location)]\n standardized_secondary_hits += [None]\n if type(sub_hit_genes_location) == tuple:\n # log.info('codepath 3.2')\n standardized_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[0])]\n standardized_secondary_hits += [cast_external_refs_to_internal_ids(sub_hit_genes_location[1])]\n\n log.debug('standardized primary hits:\\n\\t%s' % standardized_hits)\n log.debug('standardized secondary_hits:\\n\\t%s' % standardized_secondary_hits)\n\n dump_object(Dumps.analysis_set_bulbs_ids, (standardized_hits, standardized_secondary_hits))\n\n if all_detectable_genes_location:\n background_set = cast_external_refs_to_internal_ids(all_detectable_genes_location)\n # print(background_set)\n primary_set = [y for x in standardized_hits for y in x] # flattens the mapped ids list\n # print(primary_set)\n\n formatted_secondary_hits = [_l\n if _l is not None\n else []\n for _l in standardized_secondary_hits]\n\n sec_set = [y for x in formatted_secondary_hits for y in x]\n\n re_primary_set = set()\n for _id in primary_set:\n if type(_id) == str or type(_id) == int:\n re_primary_set.add(_id)\n else:\n re_primary_set.add(_id[0])\n\n primary_set = re_primary_set\n\n re_secondary_set = set()\n for _id in sec_set:\n if type(_id) == str or type(_id) == int:\n re_secondary_set.add(_id)\n else:\n re_secondary_set.add(_id[0])\n\n sec_set = re_primary_set\n\n if type(background_set[0]) == str or type(background_set[0]) == int: # unweighted\n background_set = list(set(background_set).union(primary_set).union(sec_set))\n\n else:\n bck_set = {_id[0] for _id in background_set}\n bck_set = list(bck_set)\n\n if not primary_set.issubset(bck_set):\n log.info('Nodes ids %s are missing in background set and are added with weight 0' %\n (primary_set - bck_set))\n background_set += [(_id, 0) for _id in (primary_set - bck_set)]\n\n if not sec_set.issubset(bck_set):\n log.info('Secondary set nodes ids %s are missing in background set and are added '\n 'with weight 0' % (sec_set - bck_set))\n background_set += [(_id, 0) for _id in (sec_set - bck_set)]\n\n else:\n background_set = []\n\n dump_object(Dumps.background_set_bulbs_ids, background_set)\n\n return standardized_hits, standardized_secondary_hits, background_set", "def test_check_map_primer_pool(self):\r\n s = \"\"\"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tX\\tDescription\r\n#fake data\r\nx\\tAA\\tAC\\t3\\tsample_x\r\ny\\t\"AC\"\\tAT,DC\\t4\\t\"sample_y\"\r\nz\\tGG\\tGC\\t5\\tsample_z\"\"\"\r\n f = StringIO(s)\r\n f.name = 'test.xls'\r\n headers, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(f,\r\n disable_primer_check=False)\r\n\r\n self.assertEqual(\r\n barcode_to_sample_id,\r\n {'AA': 'x',\r\n 'AC': 'y',\r\n 'GG': 'z'})\r\n self.assertEqual(errors, [])\r\n self.assertEqual(warnings, [])\r\n\r\n # Returns all possible primers with lengths associated.\r\n expected_all_primers = {'AC': 2, 'GC': 2, 'AT': 2, 'TC': 2}\r\n self.assertEqual(all_primers, expected_all_primers)\r\n\r\n # Returns all primers associated with each barcode.\r\n expected_primer_seqs_lens = {'AA': {'AC': 2}, 'GG': {'GC': 2},\r\n 'AC': {'AC': 2, 'GC': 2, 'AT': 2, 'TC': 2}}\r\n\r\n self.assertEqual(primer_seqs_lens, expected_primer_seqs_lens)", "def find_fast5s_from_ids_readdb(readdb, read_ids, read_dirs, recursive=False):\n for name, fast5 in parse_read_name_map_file(readdb, read_dirs, recursive=recursive):\n if name.split(\"_\")[0] in read_ids:\n yield name, fast5", "def additionalMatch(handIn, indx):", "def find_pacgums(self):\n for row in range(len(self.structure)):\n for col in range(len(self.structure[row])):\n if self.structure[row][col] == 'n': \n self.pacgums.append((col, row))" ]
[ "0.6598933", "0.64177066", "0.6393043", "0.6074415", "0.6032629", "0.60148144", "0.57585204", "0.5614244", "0.55744815", "0.55740666", "0.5564409", "0.55503625", "0.5542675", "0.55369735", "0.55324143", "0.55242544", "0.5486904", "0.547165", "0.5459795", "0.54455733", "0.5401535", "0.539839", "0.5380562", "0.53804374", "0.5377497", "0.53734016", "0.53688765", "0.53560364", "0.5350236", "0.53467715", "0.5346525", "0.5344236", "0.5341456", "0.5336751", "0.5326166", "0.5324319", "0.5319582", "0.5313544", "0.52914864", "0.5289721", "0.5286873", "0.52862215", "0.52848715", "0.5252286", "0.52413136", "0.52270854", "0.5220121", "0.521756", "0.5214621", "0.5211202", "0.5209844", "0.5201222", "0.5199695", "0.51846564", "0.5182717", "0.51694393", "0.51606476", "0.51580316", "0.5156414", "0.51441336", "0.5142324", "0.5141084", "0.5129542", "0.51262456", "0.51145774", "0.51144856", "0.5113208", "0.5098777", "0.509684", "0.50962466", "0.50938326", "0.5092919", "0.50903285", "0.5090103", "0.5089776", "0.50876594", "0.5084254", "0.50794894", "0.50786495", "0.50733685", "0.50696194", "0.50673205", "0.50666654", "0.5063015", "0.50620526", "0.50514865", "0.50498563", "0.50460494", "0.50444347", "0.50317574", "0.5027262", "0.5026734", "0.50255203", "0.5023056", "0.50223535", "0.5018826", "0.50145054", "0.5013927", "0.5010956", "0.5001146", "0.49928585" ]
0.0
-1
consolidate results into dataframes
def parse_dataframes(genome_gtf, sralist): def gather_strand_by_geneID_dict(genome_gtf): """ Returns dictionary with strand orientation as values and geneIDs as Keys/ e.g.: {'YAL012W': '+', 'YAL069W': '+', 'YAL068W-A': '+', """ strand_by_geneID_dict = {} with open(genome_gtf) as f: for line in f: current_line = line.split('\t') if current_line[2] == "CDS": current_orf = current_line[8].split(';')[2].split()[1].strip('\"') current_strand = current_line[6] strand_by_geneID_dict[current_orf] = current_strand return strand_by_geneID_dict def import_scikit_data(sralist): """ Import results from scikit pipeline for all datasets contained in datsets_names. """ scikit_data_dict = {} for dataset in sralist: with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data: scikit_data_dict[dataset] = [json.load(scikit_data)] return scikit_data_dict def build_mat_scikit_strandOriented(sralist, scikit_data): """ Building of scikit_df based on the output of plot_ribo_density_dict.py script. C/-/reverse/complementary strand are taken into account and the profile values ("codon_density_profile", "codon_triplet", "codon_AA") are reversed. This is performed by adding [::-1] to C strands profile ends. Same profile values are also have their extremities trimmed out of 8 codons. (This is because the scikit-ribo pipeline considers 8 extra codons on each end, but here we are only interested in the coding sequence). This is performed by adding [8:-8] to profile lists ends. """ scikit_mat = {} seq_codons = {} seq_aa = {} for geneID in scikit_data[sralist[0]][0].keys(): for ix, dataset in enumerate(sralist): if geneID in scikit_data[dataset][0].keys(): current_profile = scikit_data[dataset][0].get(geneID, np.nan) current_ribo = current_profile[0] current_ribo = current_ribo[8:-8] N = len(sralist) M = len(current_ribo) print(geneID, M) if ix == 0: current_matrix = np.zeros((N,M)) * np.nan current_seq_codons = current_profile[1] current_seq_codons = current_seq_codons[8:-8] current_seq_aa = current_profile[2] current_seq_aa = current_seq_aa[8:-8] if strand_by_geneID_dict.get(geneID, "NA") == "+": seq_codons[geneID] = current_seq_codons seq_aa[geneID] = current_seq_aa elif strand_by_geneID_dict.get(geneID, "NA") == "-": seq_codons[geneID] = current_seq_codons[::-1] seq_aa[geneID] = current_seq_aa[::-1] if strand_by_geneID_dict.get(geneID, "NA") == "+": current_matrix[ix,:] = current_ribo elif strand_by_geneID_dict.get(geneID, "NA") == "-": current_matrix[ix,:] = current_ribo[::-1] if np.sum(current_matrix) > 0: scikit_mat[geneID] = current_matrix # scikit_df = pd.DataFrame(values_list, columns=columns_list) return scikit_mat, seq_codons, seq_aa def mean_norm(row): codon_dens_prof = row.codon_density_profile profile_average = np.average(codon_dens_prof) return [x/profile_average for x in codon_dens_prof] #scikit_data_df["mean_norm_codon_density_profile"] = scikit_data_df.apply(mean_norm, axis=1) #scikit_data_df["mean_norm_codon_density_profile"] = scikit_data_df['mean_norm_codon_density_profile'].apply(lambda x: x[8:-8]) strand_by_geneID_dict = gather_strand_by_geneID_dict(genome_gtf) scikit_data_dict = import_scikit_data(sralist) scikit_data_mat, seq_codons_dict, seq_aa_dict = build_mat_scikit_strandOriented(sralist, scikit_data_dict) with open('../data/processed/scikit_mat.pkl', 'wb') as f: pickle.dump(scikit_data_mat, f) with open('../data/processed/scikit_codonseq.pkl', 'wb') as f_seq: pickle.dump(seq_codons_dict, f_seq) return scikit_data_mat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def consolidate_results(path='./Data'):\n model_files = [load(os.path.join(path, f)) \n for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and f.startswith('model_')]\n df_final = pd.DataFrame(columns=['model_name','train_accuracy','test_accuracy',\n 'macro_avg_precision','macro_avg_recall',\n 'macro_avg_f1-score','weighted_avg_precision',\n 'weighted_avg_recall','weighted_avg_f1-score'])\n for model_file in model_files:\n results = model_file['model_results']\n class_report = classification_report(results.category, results.pred, output_dict=True)\n df_final = df_final.append({'model_name':model_file['model_name'],\n 'train_accuracy':'{0:.2f}'.format(model_file['model_CV'].best_score_),\n 'test_accuracy':'{0:.2f}'.format(class_report['accuracy']),\n 'macro_avg_precision':class_report['macro avg']['precision'],\n 'macro_avg_recall':class_report['macro avg']['recall'],\n 'macro_avg_f1-score':class_report['macro avg']['f1-score'],\n 'weighted_avg_precision':class_report['weighted avg']['precision'],\n 'weighted_avg_recall':class_report['weighted avg']['recall'],\n 'weighted_avg_f1-score':class_report['weighted avg']['f1-score']\n },ignore_index=True)\n return(df_final)", "def create_dataframe(result):\n # List of elements in the search result\n names = []\n snippet = []\n url = []\n \n # Append search results to list\n for j,item in enumerate(result):\n for i,element in enumerate(result[j]['items']):\n names.append(result[j]['items'][i]['title'])\n snippet.append(result[j]['items'][i]['snippet'])\n url.append(result[j]['items'][i]['link'])\n \n # Create a dataframe\n df = pd.DataFrame(list(zip(names, snippet,url)), \n columns =['name', 'snippet','url']) \n \n return df", "def _load_results(self):\n\n _LOG.debug(\"stats colnames: %s\", \", \".join(self._stats_colnames))\n _LOG.debug(\"additional colnames: %s\", \", \".join(self._more_colnames))\n\n for res in self.rsts:\n _LOG.debug(\"hover colnames: %s\", \", \".join(self._hov_colnames[res.reportid]))\n\n colnames = []\n for colname in self._hov_colnames[res.reportid] + self._more_colnames:\n if colname in res.colnames_set:\n colnames.append(colname)\n\n csel = Trivial.list_dedup(self._stats_colnames + colnames)\n res.clear_filts()\n res.set_csel(csel)\n res.load_df()\n\n # We'll be dropping columns and adding temporary columns, so we'll affect the original\n # dataframe. This is more effecient than creating copies.\n self._mangle_loaded_res(res)", "def make_results_df(results):\n max_val = max(x[1] for x in results)\n\n df = []\n for i in range(max_val + 1):\n df.append([])\n for j in range(max_val + 1):\n df[-1].append(results.get((i, j), np.nan))\n return pd.DataFrame(df)", "def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict", "def collect_results( results_dir = \"experiments\" ) :\n #%%\n import pandas as pd\n exps_fn = os.listdir( results_dir )\n dics = []\n for fname in exps_fn :\n with open( results_dir + \"/\" + fname, \"rt\", encoding=\"utf8\" ) as f_out :\n dics.append( json.load( f_out ) )\n\n results_df = pd.DataFrame( dics )\n #%%\n return results_df", "def prepare_wg_data(results):\n wg_df = pd.DataFrame(results)\n wg_df['search_engine'] = 'wg-gesucht.de'\n return wg_df", "def _process_extraction_results(self, result, metapaths, start_ids, end_ids, start_name, end_name,\n return_sparse=False, sparse_df=True, verbose=False):\n from itertools import product\n\n if return_sparse:\n # Turn each result matrix into a series\n if verbose:\n print('\\nReshaping Result Matrices...')\n time.sleep(0.5)\n\n size = result[0].shape[0]*result[0].shape[1]\n if verbose:\n result = [mt.reshape(res, (size, 1)) for res in tqdm(result)]\n else:\n result = [mt.reshape(res, (size, 1)) for res in result]\n\n if verbose:\n print('Stacking columns...')\n result = hstack(result)\n\n if sparse_df:\n if verbose:\n # Past all the series together into a DataFrame\n print('\\nGenerating DataFrame...')\n result = pd.SparseDataFrame(result, columns=metapaths, default_fill_value=0.0)\n\n start_end_df = pd.DataFrame(list(product(start_ids, end_ids)), columns=[start_name, end_name])\n\n # Return a list of the metapath names that indicies correspond to result columns\n if not sparse_df:\n return (start_end_df, metapaths), result\n\n return start_end_df, result\n\n # Turn each result matrix into a series\n if verbose:\n print('\\nFormatting results to series...')\n time.sleep(0.5)\n\n # Currently running in series. Extensive testing has found no incense in speed via Parallel processing\n # However, parallel usually results in an inaccurate counter.\n if verbose:\n for i in tqdm(range(len(metapaths))):\n result[i] = mt.to_series(result[i], name=metapaths[i]).reset_index(drop=True)\n else:\n for i in range(len(metapaths)):\n result[i] = mt.to_series(result[i], name=metapaths[i]).reset_index(drop=True)\n\n # Paste all the series together into a DataFrame\n if verbose:\n print('\\nConcatenating series to DataFrame...')\n start_end_df = pd.DataFrame(list(product(start_ids, end_ids)), columns=[start_name, end_name])\n\n return start_end_df, pd.concat(result, axis=1)", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def get_results(r):\n myDict = {}\n for name in r[\"results\"]:\n myDict[name[\"name\"]] = {\n \"rank\": name[\"rank\"],\n \"ticker\": name[\"ticker\"],\n \"upvotes\": name[\"upvotes\"],\n \"mentions\": name[\"mentions\"],\n \"mentions_24h_ago\": name[\"mentions_24h_ago\"],\n }\n df = pd.DataFrame.from_dict(myDict, orient=\"index\")\n df[\"rank\"] = df[\"rank\"].astype(int)\n df[\"upvotes\"] = df[\"upvotes\"].astype(int)\n df[\"mentions\"] = df[\"mentions\"].astype(int)\n df[\"mentions_24h_ago\"] = df[\"mentions_24h_ago\"].astype(int)\n\n df[\"delta_mentions_24h\"] = df[\"mentions\"] - df[\"mentions_24h_ago\"]\n df = df[~(df[\"upvotes\"] <= 1000)]\n df = df.sort_values(by=[\"delta_mentions_24h\"], ascending=False)\n return df", "def multi_query(db, queries):\n return pd.concat((query_to_df(db, q) for q in queries), ignore_index=True)", "def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df", "def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results", "def make_summary_tables( res ):\n\n # transform second table to csv and read this as a dataFrame\n result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=\",\",index_col=0)\n result_fit_df.columns = [i.strip() for i in result_fit_df.columns]\n result_fit_df.index = [i.strip() for i in result_fit_df.index]\n\n # first table is trickier because the data is spread on to columns, and there is title line\n L = res.tables[0].as_html().split('\\n')\n L.pop(1) # get rid of the title\n tmp = pd.read_html('\\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns \n\n names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names\n values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values\n # NB : I exclude the last 2 elements which are empty \n \n result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )\n \n return result_general_df , result_fit_df", "def fetchall_df(result_proxy):\n# result = result_proxy.fetchall(keep_col_names=T) ???\n result = [row for row in tqdm(result_proxy)]\n return pd.DataFrame(result, columns=result[0].keys())", "def _gather_deep_data(self):\n\n cleaned_data_from_website = list()\n\n for i, search_result in self.data_from_website.iterrows():\n cleaned_data_from_website.append(self._deep_data(search_result.url))\n\n cleaned_data_from_website = pd.DataFrame(cleaned_data_from_website)\n if len(cleaned_data_from_website) == 0:\n cleaned_data_from_website['@id'] = '0'\n cleaned_data_from_website.set_index('@id', inplace=True)\n self.data_from_website = cleaned_data_from_website", "def set_results(self, results, unique_keys):\n self._results = results\n self._compute_logic()\n\n for _, query in enumerate(self._results):\n\n flat = query.flatten_results(unique_keys)\n filename = 'flattened_{0}.csv'.format('_'.join(sorted(query.in_sets)))\n flat.to_csv(\n os.path.join(\n Configuration().csv.output_directory,\n '{0}'.format(filename)\n ),\n sep='\\t'\n )", "def combine_results(voting = 'hard',clf_list = ['test_small','rt_small','test2_small']):\n \n start = time.clock()\n df = load_all_dfs(clf_list)\n\n print('combining the data and voting ', voting)\n\n if voting == 'hard':\n print('voting')\n\n label_tupel_list = list(df.groupby(level=['id'])['std'].idxmax())#idmax \n num_samples = len(label_tupel_list)\n index = [label_tupel_list[i][0] for i in range(num_samples)]\n df.index\n time_need = []\n t2 = 0\n\n print(\"doing god's work\")\n df_new = df.ix[index]\n df_new = df.ix[label_tupel_list]\n end = time.clock()\n print('done', end-start)\n #return df_new\n \n \n cols = ['Class_1',\n 'Class_2',\n 'Class_3',\n 'Class_4',\n 'Class_5',\n 'Class_6',\n 'Class_7',\n 'Class_8',\n 'Class_9']\n df_new2 = df_new.reset_index()\n del df_new2['std']\n del df_new2['id']\n del df_new2['df']\n\n print('zero')\n try:\n print('first')\n clf_names = 'with_'\n print('second')\n for i in range(len(clf_list)):\n print(clf_list[i])\n clf_names = clf_names + '_' + clf_list[i]\n \n df_new2.to_csv('Pikki'+clf_names+ '.csv',header = cols,index_label = ['id'])\n \n df_new2.index +=1\n\n print('written to')\n print('Pikki'+clf_names+ '.csv')\n \n df_new2.to_csv('combined_Pikki'+clf_names+ '.csv',header = cols,index_label = ['id'])\n except:\n df_new2.to_csv('combined_Pikki.csv',header = cols,index_label = ['id'])\n return df_new", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def get_flat_results(self):\n test_results, error_dict, framestats = self.get_results()\n test_results = self._merge_test_results(test_results, error_dict)\n\n results = copy.deepcopy(test_results)\n results.update(framestats)\n\n return results", "def _prepare_geocode_result(results):\n # Prepare the data for the DataFrame as a dict of lists\n d = defaultdict(list)\n index = []\n\n for i, s in iteritems(results):\n address, loc = s\n\n # loc is lat, lon and we want lon, lat\n if loc is None:\n p = Point()\n else:\n p = Point(loc[1], loc[0])\n\n if address is None:\n address = np.nan\n\n d['geometry'].append(p)\n d['address'].append(address)\n index.append(i)\n\n df = gpd.GeoDataFrame(d, index=index)\n df.crs = from_epsg(4326)\n\n return df", "def interpret_results(rules):\n df_res = rules.sort_values(by=['lift'], ascending=False)\n # df_res.head()\n return df_res", "def analyzeResults(self):\n results = [self.analyzeClusterPerformance(c) for c in self.clusterLabels]\n rDF = pd.DataFrame(results)\n self.resultList.append(rDF)", "def check_results_as_data_frame(check_to_check_results: Dict[Check, CheckResult]) -> DataFrame:\n check_names = []\n status = []\n descriptions = []\n for check_result in check_to_check_results.values():\n check_names.append(check_result.check)\n status.append(check_result.status)\n descriptions.append(check_result.description)\n return DataFrame(zip(check_names, status, descriptions), columns=[\"check_name\", \"status\", \"description\"])", "def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output", "def Combined_Non_Compound_Results(level):\r\n \r\n CombinedResults = pd.DataFrame({},columns=['Name','Club','Score',\r\n 'Golds', 'Hits'])\r\n # Initial empty dataframe to append in to\r\n \r\n for i in level:\r\n CombinedResults = CombinedResults.append(CategoryScore(i))\r\n \r\n CombinedResults = CombinedResults.sort_values(['Score','Golds','Hits'],\r\n ascending=[False,False,False],na_position='last')\r\n \r\n CombinedResults = CombinedResults.reset_index(drop=True)\r\n # print(CombinedResults) # uncomment to see complete almost results\r\n return CombinedResults", "def get_filtered_df(db, mama_id_list, garfield_id_list):\n df_mama = pd.DataFrame(list(db.mama.find({'_id': {\"$in\": mama_id_list}, \"active\": True})))\n df_mama = df_mama.set_index('_id')\n if len(mama_id_list) == len(df_mama):\n print(\"\\nMama database successfully extracted from Mongo\")\n print(\"Mama DF Length:\", len(df_mama))\n print(\"Expected Mama DF Length:\", len(mama_id_list))\n else:\n print(\"\\nUnexpected row count in mama DF\")\n print(\"Mama DF Length:\", len(df_mama))\n print(\"Expected Mama DF Length:\", len(mama_id_list))\n\n\n df_garfield = pd.DataFrame(list(db.garfield.find({'_id': {\"$in\": garfield_id_list}, \"active\": True})))\n df_garfield = df_garfield.set_index('_id')\n df_garfield['request_date'] = pd.to_datetime(df_garfield['request_date'])\n if len(df_garfield) == len(garfield_id_list): \n print(\"\\nGarfield database successfully extracted from Mongo\")\n print(\"Garfield DF Length:\", len(df_garfield))\n print(\"Expected Garfield DF Length:\", len(garfield_id_list)) \n else:\n print(\"\\nUnexpected row count in Garfield DF\")\n print(\"Mama DF Length:\", len(df_garfield))\n print(\"Expected Mama DF Length:\", len(garfield_id_list))\n \n if 'address_concat' not in df_garfield.columns:\n df_garfield[\"address_concat\"] = df_garfield[\"address\"]+\", \"+ df_garfield[\"city\"]+\", \"+ df_garfield[\"state\"]+\", \"+ df_garfield[\"zip_code\"]\n \n return df_mama, df_garfield", "def aggregateResultsToDfResults(self, arrays=True, fillna=False):\n nan_value = np.nan\n # defines which variable types will be saved in the results dataframe\n SUPPORTED_TYPES = (float, int, np.ndarray, list)\n SCALAR_TYPES = (float, int)\n ARRAY_TYPES = (np.ndarray, list)\n\n logging.info(\"Aggregating results to `dfResults` ...\")\n for runId, parameters in tqdm.tqdm(self.dfResults.iterrows(), total=len(self.dfResults)):\n # if the results were previously loaded into memory, use them\n if hasattr(self, \"results\"):\n # only if the length matches the number of results\n if len(self.results) == len(self.dfResults):\n result = self.results[runId]\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n\n for key, value in result.items():\n # only save floats, ints and arrays\n if isinstance(value, SUPPORTED_TYPES):\n # save 1-dim arrays\n if isinstance(value, ARRAY_TYPES) and arrays:\n # to save a numpy array, convert column to object type\n if key not in self.dfResults:\n self.dfResults[key] = None\n self.dfResults[key] = self.dfResults[key].astype(object)\n self.dfResults.at[runId, key] = value\n elif isinstance(value, SCALAR_TYPES):\n # save scalars\n self.dfResults.loc[runId, key] = value\n else:\n self.dfResults.loc[runId, key] = nan_value\n # drop nan columns\n self.dfResults = self.dfResults.dropna(axis=\"columns\", how=\"all\")\n\n if fillna:\n self.dfResults = self.dfResults.fillna(0)", "def dataframe(sorter, run_function):\n\n df = None\n for run in sorter:\n data = {\n 'mouse': [run.mouse],\n 'date': [run.date],\n 'run': [run.run],\n 'reversed': [flow.metadata.reversal(run.mouse) < run.date],\n 'dprime': [calc.performance.dprime(run.parent)],\n }\n\n t2p = run.trace2p()\n for cs in config.stimuli():\n for err in [0, 1]:\n evs = t2p.csonsets(cs, errortrials=err)\n csdata = deepcopy(data)\n for key in csdata:\n csdata[key] = csdata[key]*len(evs)\n\n csdata['frame'] = evs\n csdata['stimulus'] = [cs]*len(evs)\n csdata['error'] = [err]*len(evs)\n\n default = pd.DataFrame(csdata)\n date_df = run_function(run, default)\n\n if df is None:\n df = date_df\n else:\n df = pd.concat([df, date_df], ignore_index=True, sort=True)\n\n return df", "def get_all_mkresults(self):\n\n all_results = ()\n self._logger.debug(\"Getting all mk results\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT result_id, mk_ind_first, mk_ind_second, mk_ind_third, \\\nmk_ind_fourth, course, time FROM mk_ind_result ORDER BY time DESC\")\n results = cursor.fetchall()\n\n for result_id, first_id, second_id, third_id, fourth_id, course, timestamp in results:\n intermediate_results = ()\n\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\nplayer WHERE player_id = {0}\".format(first_id))\n first = cursor.fetchall()\n first_name_first, last_name_first, \\\n nickname_first = first[0]\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\nplayer WHERE player_id = {0}\".format(second_id))\n second = cursor.fetchall()\n first_name_second, last_name_second, \\\n nickname_second = second[0]\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(third_id))\n third = cursor.fetchall()\n first_name_third, last_name_third, \\\n nickname_third = third[0]\n except MySQLdb.OperationalError:\n first_name_third = ''\n last_name_third = ''\n nickname_third = ''\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(fourth_id))\n fourth = cursor.fetchall()\n first_name_fourth, last_name_fourth, \\\n nickname_fourth = fourth[0]\n except MySQLdb.OperationalError:\n first_name_fourth = ''\n last_name_fourth = ''\n nickname_fourth = ''\n\n intermediate_results = intermediate_results + \\\n (result_id, first_name_first, last_name_first,\n nickname_first, first_name_second, last_name_second,\n nickname_second, first_name_third,\n last_name_third, nickname_third, first_name_fourth,\n last_name_fourth, nickname_fourth, course,\n timestamp.strftime('%Y-%m-%d'))\n\n all_results = all_results + (intermediate_results,)\n del intermediate_results\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return all_results", "def concat_all_evaluation_results(list_of_folders):\n\n\n train_eval_df_list = []\n val_eval_df_list = []\n train_val_eval_df_list = []\n\n\n for item in list_of_folders:\n path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)\n files = os.listdir(path_to_eval_folder)\n\n for f in files:\n\n # for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\"):\n\n if \"random\" in f:\n if \"random_training_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"random_training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n\n elif \"random_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n else:\n if \"triplet\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n elif \"validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n # add 'training_' or 'validation_' to the column names of evaluation results coming from training and validation sets.\n # This is to be able to distinguish them in the final general csv file.\n\n columns = list(train_val_eval_df_list[0])\n train_columns = [\"training_\"+item for item in columns[1:]]\n train_columns = [columns[0]] + train_columns\n train_columns_dict ={}\n \n val_columns = [\"validation_\"+item for item in columns[1:]]\n val_columns = [columns[0]] + val_columns\n val_columns_dict ={}\n\n #train_and_val_columns = [\"train_and_validation_\"+item for item in columns[1:]]\n #train_and_val_columns = [columns[0]] + train_and_val_columns\n #train_and_val_columns_dict ={}\n\n\n for i in range(len(columns)):\n train_columns_dict[columns[i]] = train_columns[i]\n val_columns_dict[columns[i]] = val_columns[i]\n #train_and_val_columns_dict[columns[i]] = train_and_val_columns[i]\n\n\n concatenated_training_df = pd.concat(train_eval_df_list, sort=False)\n concatenated_training_df = concatenated_training_df.rename(columns=train_columns_dict)\n\n concatenated_validation_df = pd.concat(val_eval_df_list, sort=False)\n concatenated_validation_df = concatenated_validation_df.rename(columns=val_columns_dict)\n \n concatenated_train_and_validation_df = pd.concat(train_val_eval_df_list, sort=False)\n #concatenated_train_and_validation_df = concatenated_train_and_validation_df.rename(columns=train_and_val_columns_dict)\n\n\n concatenated_training_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"training_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_validation_df.to_csv(os.path.join(EMBEDDING_DEST, \"compare_with_no_sz\", \"validation_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_train_and_validation_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\",\"training_and_validation_all_evaluation_result_top_tri.csv\"), index=None)\n\n # ---------\n # If you have columns on arguments, keep them in training but drop them in validation and train_and_val to prevent duplicates\n list_of_cols_in_validation_df = list(concatenated_validation_df)\n list_of_cols_in_train_val_df = list(concatenated_train_and_validation_df)\n args_cols = get_json_argument_list()\n\n args_cols_val = [\"validation_\"+item for item in args_cols]\n \n if len(list_of_cols_in_train_val_df) == len(list_of_cols_in_validation_df) and len(list_of_cols_in_train_val_df) > 7:\n concatenated_validation_df = concatenated_validation_df.drop(args_cols_val, axis=1, errors='ignore')\n concatenated_train_and_validation_df = concatenated_train_and_validation_df.drop(args_cols, axis=1, errors='ignore')\n\n\n # ---------\n\n all_three_df_list = [concatenated_training_df, concatenated_validation_df, concatenated_train_and_validation_df]\n concatenated_all_df = pd.concat(all_three_df_list, axis=1)\n concatenated_all_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"all_evaluation_result_top_tri.csv\"), index=None)", "def extract_results(grid_search):\n results = grid_search.cv_results_.copy()\n params = pd.DataFrame(results.pop('params'))\n values = pd.DataFrame(results)\n values = values.loc[:, ~values.columns.str.contains('param_')]\n df = pd.concat([params, values], axis=1)\n df = df.set_index(list(params.columns))\n df = df.sort_values('rank_test_neg_mean_squared_error')\n return df", "def aggregate_results(output_files, agg_filename):\n\n print(file_marker + \"STARTING AGGREGATION\")\n feather_files = output_files\n\n results = []\n for i in range(len(feather_files)):\n print(file_marker + str(i))\n x = pd.read_feather(feather_files[i])\n results.append(x)\n \n overall_results = pd.concat(results, ignore_index=True, sort=False)\n opt_diff_results = overall_results\n\n opt_diff_results.reset_index(inplace=True, drop=True) \n # drop=True: column 'index' gets removed\n\n opt_diff_results.to_feather(agg_filename)\n print(file_marker + \"Aggregated results saved to: \" + agg_filename)", "def _merge_test_output(self, dict_env, env_name):\n for iir, test in enumerate(self.params[\"tests\"]):\n with self.reports[env_name][iir].open() as f:\n report = json.load(f)\n report = _check_dict(report, test[\"name\"])\n # for some plots it's easier to use \"flat\" test structure\n report_flat = _flatten_dict_test(report)\n if iir == 0:\n try:\n df = pd.DataFrame(report)\n except ValueError: # if results are not list\n df = pd.DataFrame(report, index=[0])\n df_flat = pd.DataFrame(report_flat, index=[0])\n else:\n try:\n df = df.merge(pd.DataFrame(report), how=\"outer\")\n except ValueError: # if results are not list\n df = df.merge(pd.DataFrame(report, index=[0]), how=\"outer\")\n df_flat = pd.concat(\n [df_flat, pd.DataFrame(report_flat, index=[0])], axis=1\n )\n\n df_env = pd.DataFrame(dict_env, index=[0])\n df_flat = pd.concat([df_env, df_flat], axis=1)\n df_env = pd.concat([df_env] * len(df)).reset_index(drop=True)\n df = pd.concat([df_env, df], axis=1)\n return df, df_flat", "def dataExtract(queryResults):\n days = ['MondayCollect',\n 'TuesdayCollect',\n 'WednesdayCollect',\n 'ThursdayCollect',\n 'FridayCollect',\n 'SaturdayCollect',\n 'SundayCollect']\n\n #counting the instances of bin collections\n parkCount = 0\n roadingCount = 0\n otherCount = 0\n\n #output totals of bin collections\n parkOutput = []\n roadingOutput = []\n otherOutput = []\n \n #iterate over each day\n for day in days:\n \n #iterate over the number of bins\n for i in range(len(queryResults)):\n \n #check if the bin was collected on the day...\n if str(queryResults[i]['attributes'][day]).strip().lower() == 'yes':\n \n #unknown formatting issue with the data, these lines fix it\n strResult = str(queryResults[i]['attributes']['Owner'])\n strResultForm = strResult.lower().strip()\n \n #update the counts if True\n if strResultForm == 'roading':\n roadingCount += 1\n elif strResultForm == 'parks':\n parkCount += 1\n elif strResultForm == 'private':\n otherCount += 1\n else:\n otherCount +=1\n\n #print \"Day: {} \\nparkCount: {} \\nroadingCount: {} \\notherCount: {} \\n\\n\".format(day,parkCount,roadingCount,otherCount)\n \n parkOutput.append(parkCount)\n roadingOutput.append(roadingCount)\n otherOutput.append(otherCount)\n \n parkCount = 0\n roadingCount =0\n otherCount =0\n \n return parkOutput,roadingOutput,otherOutput", "def get_and_reset_results(self):\n results = self.results\n self.results = {}\n for db_name, bulkOp in self.__dbs.iteritems():\n bulkOp.results = self.results\n return results", "def result_to_dataframe(data):\n letters, statistics = zip(*data)\n dataframe = pd.DataFrame(data=list(statistics), index=letters, columns=['SUM', 'SUM_OF_SQUARES', 'MAX', 'MIN', 'COUNT']).sort_index()\n dataframe['MEAN'] = dataframe['SUM'] / dataframe['COUNT']\n dataframe['VARIANCE'] = dataframe['SUM_OF_SQUARES'] / dataframe['COUNT'] - dataframe['MEAN']**2\n dataframe['STANDARD_DEVIATION'] = dataframe['VARIANCE']**0.5\n logging.info(\"Total datapoints read: {}.\".format(dataframe['COUNT'].sum()))\n return dataframe", "def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df", "def result_df(self, regex=None) -> pd.DataFrame:\n if regex:\n # get one random item from dict, and get keys from this random (dict) item\n # FIXME: how to do this better? - this is not efficient...\n keys = self.result[next(iter(self.result))].keys()\n\n if type(regex) == str:\n comp_regexe = re.compile(regex)\n columns = list(filter(comp_regexe.search, keys))\n else:\n columns = list(filter(regex.search, keys))\n\n df = pd.DataFrame.from_dict(self.result, orient='index')\n return df[columns]\n else:\n return pd.DataFrame.from_dict(self.result, orient='index')", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def dwn_all_saved_results(request):\n \n sources = []\n for i in Source.objects.filter(user=request.user):\n sources.append((i.source_id, i.datetime_extracted.strftime('%d/%m/%Y %H:%M'), i.source))\n \n data = []\n for s, timee, s_name in sources:\n objs = ExtractedRelation.objects.filter(source=s)\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, timee, s_name, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Extraction Time', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/all_analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/all_analysis_results.csv','rb'))", "def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df", "def merge_survey(self) -> pd.DataFrame:\n\n df_list = []\n for survey_id in self.survey_id:\n self.log.debug(f\"Reading: {survey_id}\")\n temp_df = self.get_survey_responses(survey_id)\n df_list.append(temp_df[2:])\n\n df_col = reduce(pd.Index.union, (df.columns for df in df_list))\n\n merged_df = pd.DataFrame()\n for df in df_list:\n temp_df = df.reindex(columns=df_col, fill_value=0)\n merged_df = merged_df.append([temp_df], ignore_index=True)\n return merged_df", "def res_futures(dict_nb: dict) -> pd.DataFrame:\n res = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=11, thread_name_prefix=\"thread\") as executor:\n # Start the load operations and mark each future with its URL\n future_to_req = {executor.submit(query, df): df for df in dict_nb.values()}\n for future in concurrent.futures.as_completed(future_to_req):\n req = future_to_req[future]\n try:\n data = future.result()\n res.append(data)\n jointure = pd.concat(res)\n except Exception as exc:\n print('%r generated an exception: %s' % (req, exc), flush=True)\n\n return jointure", "def _api_scrape(json_inp, ndx):\n\n try:\n headers = json_inp['resultSets'][ndx]['headers']\n values = json_inp['resultSets'][ndx]['rowSet']\n except KeyError:\n # This is so ugly but this is what you get when your data comes out\n # in not a standard format\n try:\n headers = json_inp['resultSet'][ndx]['headers']\n values = json_inp['resultSet'][ndx]['rowSet']\n except KeyError:\n # Added for results that only include one set (ex. LeagueLeaders)\n headers = json_inp['resultSet']['headers']\n values = json_inp['resultSet']['rowSet']\n return DataFrame(values, columns=headers)", "def make_dataframes(self):\n self._data_frame_30days = pd.DataFrame(self._all30_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_60days = pd.DataFrame(self._all60_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_90days = pd.DataFrame(self._all90_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_counts = pd.DataFrame(\n {\n \"Created\": {\"totals\": self._data_frame_30days.count()[\"Created\"]},\n \"Closed\": {\"totals\": self._data_frame_30days.count()[\"Closed\"]},\n \"Owner\": (self._data_frame_30days[\"Owner\"].value_counts().to_dict()),\n \"Resolution\": (self._data_frame_30days[\"Resolution\"].value_counts().to_dict()),\n \"Severity\": (self._data_frame_30days[\"Severity\"].value_counts().to_dict()),\n },\n index=self.counts_frame_INDEX,\n )\n self._data_frame_counts.fillna(0, inplace=True)", "def _wrap_result(data, columns, index_col=None, coerce_float=True,\n parse_dates=None):\n\n frame = DataFrame.from_records(data, columns=columns,\n coerce_float=coerce_float)\n\n _parse_date_columns(frame, parse_dates)\n\n if index_col is not None:\n frame.set_index(index_col, inplace=True)\n\n return frame", "def _consolidate_spont_results(self):\n\n # SPONT vs. SPONT\n\n # 1) deal with numeric results for spont spont\n df = self.numeric_results.copy()\n mean_cols = [c for c in df.columns if '_sem' not in c]\n err_cols = [c for c in df.columns if '_sem' in c]\n\n spont_spont_mean = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :][mean_cols].groupby(by='n_components').mean()\n spont_spont_sem = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :][err_cols].groupby(by='n_components').apply(error_prop)\n spont_spont = pd.concat([spont_spont_mean, spont_spont_sem], axis=1)\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_spont', n_components)) \n for n_components in spont_spont.index], names=['combo', 'n_components'])\n spont_spont.set_index(new_idx, inplace=True)\n\n # drop individual spont_spont pairs from master df\n df = df[~df.index.get_level_values('combo').isin(self.spont_stimulus_pairs)]\n\n # add new spont results to df\n df = spont_spont.append(df)\n self.numeric_results = df.copy()\n\n\n # 2) deal with array results for spont_spont\n for obj in self.object_keys:\n df = self.array_results[obj].copy()\n sp_df = df.loc[pd.IndexSlice[self.spont_stimulus_pairs, :], :]\n\n if 'evecs' in obj:\n m = [np.nanmean(reflect_eigenvectors(x), axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n else:\n m = [np.nanmean(x, axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n \n components = [arr[0] for arr in sp_df.groupby('n_components')]\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_spont', n_components)) \n for n_components in components], names=['combo', 'n_components'])\n new_df = pd.DataFrame(index=new_idx, columns=['mean', 'sem'])\n new_df['mean'] = m\n new_df['sem'] = sem\n\n df = df[~df.index.get_level_values('combo').isin(self.spont_stimulus_pairs)]\n df = new_df.append(df)\n \n self.array_results[obj] = df.copy()\n\n self.spont_stimulus_pairs = ['spont_spont']\n\n\n # SPONT vs. EVOKED\n df = self.numeric_results.copy()\n unique_evoked_bins = np.unique([[c.split('_')[0], c.split('_')[1]] for c in self.evoked_stimulus_pairs])\n\n # 1) deal with numeric results\n new_sp_ev_pairs = []\n for stim in unique_evoked_bins:\n # get all spont / evoked combos\n sp_ev = np.unique([c for c in self.spont_evoked_stimulus_pairs if stim in c])\n m = df.loc[pd.IndexSlice[sp_ev, :], :][mean_cols].groupby(by='n_components').mean()\n sem = df.loc[pd.IndexSlice[sp_ev, :], :][err_cols].groupby(by='n_components').apply(error_prop)\n sp_ev_df = pd.concat([m, sem], axis=1)\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_{}'.format(stim), n_components)) \n for n_components in sp_ev_df.index], names=['combo', 'n_components']) \n sp_ev_df.set_index(new_idx, inplace=True)\n df = sp_ev_df.append(df)\n new_sp_ev_pairs.append('spont_{}'.format(stim))\n\n # remove inividual spont_evoked pairs \n df = df[~df.index.get_level_values('combo').isin(self.spont_evoked_stimulus_pairs)] \n\n # save updated dataframe for numeric results\n self.numeric_results = df.copy()\n\n # 2) deal with object results\n for obj in self.object_keys:\n df = self.array_results[obj].copy()\n for stim in unique_evoked_bins:\n sp_ev = np.unique([c for c in self.spont_evoked_stimulus_pairs if stim in c])\n sp_df = df.loc[pd.IndexSlice[sp_ev, :], :]\n\n if 'evecs' in obj:\n m = [np.nanmean(reflect_eigenvectors(x), axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n else:\n m = [np.nanmean(x, axis=0) for x in [np.stack([a for a in arr[1]['mean'].values]) for arr in sp_df.groupby('n_components')]]\n sem = [error_prop(x, axis=0) for x in [np.stack([a for a in arr[1]['sem'].values]) for arr in sp_df.groupby('n_components')]]\n components = [arr[0] for arr in sp_df.groupby('n_components')]\n new_idx = pd.MultiIndex.from_tuples([pd.Categorical(('spont_{}'.format(stim), n_components)) \n for n_components in components], names=['combo', 'n_components'])\n new_df = pd.DataFrame(index=new_idx, columns=['mean', 'sem'])\n new_df['mean'] = m\n new_df['sem'] = sem\n\n df = df[~df.index.get_level_values('combo').isin(self.spont_evoked_stimulus_pairs)]\n df = new_df.append(df)\n self.array_results[obj] = df\n\n # update self.spont_evoked_stimulus_pairs\n self.spont_evoked_stimulus_pairs = new_sp_ev_pairs \n\n # no need to return anything... just update object attributes", "def cleanup_queries(results):\n data = []\n for result in results:\n result.__dict__.pop('_sa_instance_state', 'None')\n item = result.__dict__\n if 'date' in item and item['date']:\n t = item['date']\n formatted_date = t.strftime('%m/%d/%Y')\n item['date'] = formatted_date\n data.append(item)\n return data", "def get_all_ssresults(self):\n\n all_results = ()\n self._logger.debug(\"Getting all ss results\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT result_id, ss_ind_first, ss_first_char, ss_ind_second, ss_second_char, ss_ind_third, \\\nss_third_char, ss_ind_fourth, ss_fourth_char, ss_ind_fifth, ss_fifth_char, ss_ind_sixth, ss_sixth_char, ss_ind_seventh, ss_seventh_char, \\\nss_ind_eighth, ss_eighth_char, time FROM ss_ind_result ORDER BY time DESC\")\n results = cursor.fetchall()\n\n for result_id, first_id, first_char, second_id, second_char, third_id, third_char, fourth_id, fourth_char, fifth_id, fifth_char, sixth_id, sixth_char, seventh_id, seventh_char, eighth_id, eighth_char, timestamp in results:\n intermediate_results = ()\n\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\nplayer WHERE player_id = {0}\".format(first_id))\n first = cursor.fetchall()\n first_name_first, last_name_first, \\\n nickname_first = first[0]\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\nplayer WHERE player_id = {0}\".format(second_id))\n second = cursor.fetchall()\n first_name_second, last_name_second, \\\n nickname_second = second[0]\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(third_id))\n third = cursor.fetchall()\n first_name_third, last_name_third, \\\n nickname_third = third[0]\n except MySQLdb.OperationalError:\n first_name_third = ''\n last_name_third = ''\n nickname_third = ''\n third_char = ''\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(fourth_id))\n fourth = cursor.fetchall()\n first_name_fourth, last_name_fourth, \\\n nickname_fourth = fourth[0]\n except MySQLdb.OperationalError:\n first_name_fourth = ''\n last_name_fourth = ''\n nickname_fourth = ''\n fourth_char = ''\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(fifth_id))\n fifth = cursor.fetchall()\n first_name_fifth, last_name_fifth, \\\n nickname_fifth = fifth[0]\n except MySQLdb.OperationalError:\n first_name_fifth = ''\n last_name_fifth = ''\n nickname_fifth = ''\n fifth_char = ''\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(sixth_id))\n sixth = cursor.fetchall()\n first_name_sixth, last_name_sixth, \\\n nickname_sixth = sixth[0]\n except MySQLdb.OperationalError:\n first_name_sixth = ''\n last_name_sixth = ''\n nickname_sixth = ''\n sixth_char = ''\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(seventh_id))\n seventh = cursor.fetchall()\n first_name_seventh, last_name_seventh, \\\n nickname_seventh = seventh[0]\n except MySQLdb.OperationalError:\n first_name_seventh = ''\n last_name_seventh = ''\n nickname_seventh = ''\n seventh_char = ''\n try:\n cursor.execute(\"SELECT first_name, last_name, nickname FROM \\\n player WHERE player_id = {0}\".format(eighth_id))\n eighth = cursor.fetchall()\n first_name_eighth, last_name_eighth, \\\n nickname_eighth = eighth[0]\n except MySQLdb.OperationalError:\n first_name_eighth = ''\n last_name_eighth = ''\n nickname_eighth = ''\n eighth_char = ''\n\n intermediate_results = intermediate_results + \\\n (result_id, first_name_first, last_name_first,\n nickname_first, first_char, first_name_second, last_name_second,\n nickname_second, second_char, first_name_third,\n last_name_third, nickname_third, third_char, first_name_fourth,\n last_name_fourth, nickname_fourth, fourth_char, first_name_fifth,\n last_name_fifth, nickname_fifth, fifth_char, first_name_sixth,\n last_name_sixth, nickname_sixth, sixth_char, first_name_seventh,\n last_name_seventh, nickname_seventh, seventh_char, first_name_eighth,\n last_name_eighth, nickname_eighth, eighth_char,\n timestamp.strftime('%Y-%m-%d'))\n\n all_results = all_results + (intermediate_results,)\n del intermediate_results\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return all_results", "def _run_queries(self, urls: Dict[str, str]) -> Dict[str, pd.DataFrame]:\n # TODO: optimize and make async\n return {k: self._url_to_df(v) for k, v in urls.items()}", "def concat_disease_evaluation_results(study, list_of_folders):\n\n eval_df_list = []\n\n for item in list_of_folders:\n if item == \"random\" or \"resnet\" in item:\n path_to_eval_folder = os.path.join(DATA_DIR, study, \"segmentation_embeddings\", item)\n base_case = True\n else:\n path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)\n base_case = False\n\n files = os.listdir(path_to_eval_folder)\n\n for f in files:\n\n # for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation\n\n if base_case == True:\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\"):\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n eval_df_list.append(df)\n else:\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\") and study in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n eval_df_list.append(df)\n\n\n columns = list(eval_df_list[0])\n\n concatenated_df = pd.concat(eval_df_list, sort=False)\n \n concatenated_df.to_csv(os.path.join(EMBEDDING_DEST, study+ \"_all_evaluation_result_top_tri.csv\"),index=None)", "def FetchQueryResultToDF(data, col_name: List[str]) -> pd.DataFrame:\r\n result = []\r\n for row in data:\r\n to_be_append = []\r\n for col in row:\r\n to_be_append.append(col)\r\n result.append(to_be_append)\r\n df = pd.DataFrame(result, columns=col_name)\r\n print(df)\r\n return df", "def extract_data():\n client = MongoClient(HOST, PORT)\n collection = client[DB][COLLECTION]\n df = pd.DataFrame(collection.find().limit(10))\n return df", "def get_data(list_data_tuples):\n \n \n benchmark_symbol=list_data_tuples[0][0]; # First element is the benchmark symbol\n \n #print benchmark_symbol\n \n df=pd.DataFrame(index=list_data_tuples[0][1]['data'].index) # First dataframe index is nothing but date\n \n for tpl in list_data_tuples:\n #print tpl[0]\n df_temp = pd.DataFrame(tpl[1]['data']['Adj. Close'],index=tpl[1]['data'].index)\n df_temp = df_temp.rename(columns={'Adj. Close': tpl[0]}) # tpl[0] is the symbol\n #print df_temp,tpl[0]\n df = df.join(df_temp)\n if tpl[0] == benchmark_symbol: # drop dates SPY did not trade\n df = df.dropna(subset=[benchmark_symbol])\n\n df=df.dropna(axis=0) # This drops any NaN values especially if the stock price has no information\n \n return df", "def run_analytics_queries(cur, conn):\n \n output = []\n\n for query in analytics_queries:\n cur.execute(query)\n records = cur.fetchall()\n column_names = list(map(lambda x: x[0], cur.description))\n output.append(pd.DataFrame(records, columns=column_names))\n \n for table in output:\n print(table, end='\\n\\n')", "def process_and_merge(s):\n l = [preprocessing(df) for df in s]\n d = {x.name: x for x in l}\n df = pd.DataFrame(d)\n df.index.names = [x.lower() for x in df.index.names]\n return pd.DataFrame(d)", "def initialize_output_dfs() -> Tuple[pd.DataFrame]:\n df_city_no_zip = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_zip_no_city = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_zipCity_no_address = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_address_no_zipCity = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_no_address_at_all = pd.DataFrame(columns=[\"memberid\", \"source\", \"action\"])\n df_invalid_matrices = pd.DataFrame(\n columns=[\"memberid\", \"DataMatrix\", \"source\", \"action\"]\n )\n df_employees = pd.DataFrame(\n columns=[\"memberid\", \"MemberName\", \"MemberStatus\", \"source\", \"action\"]\n )\n return (\n df_city_no_zip,\n df_zip_no_city,\n df_zipCity_no_address,\n df_address_no_zipCity,\n df_no_address_at_all,\n df_invalid_matrices,\n df_employees,\n )", "def new_df(companies_filtered):\n name = []\n city = []\n latitude = []\n longitude = []\n zip_code = []\n for i in companies_filtered:\n name.append(i['name'])\n try: \n if i['offices'][0]['city'] == '':\n city.append(np.nan)\n else:\n city.append(i['offices'][0]['city'])\n latitude.append(i['offices'][0]['latitude'])\n longitude.append(i['offices'][0]['longitude'])\n except:\n city.append(np.nan)\n latitude.append(np.nan)\n longitude.append(np.nan)\n zip_code.append(np.nan)\n dict_ = {'company' : name, 'city' : city, 'latitude' : latitude, 'longitude': longitude}\n companies_df = pd.DataFrame.from_dict(dict_, orient='columns')\n \n return companies_df", "def get_news():\n # empty dataframe\n df = pd.DataFrame() \n # read each url in list\n for url in inshorts_urls(): \n # add each dataframe of cards to df\n df = pd.concat([df, get_article(url)])\n # return all urls' cards\n return df", "def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test", "def get_datasets_summary(rs):\n\n\tif rs == \"rs1\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets WHERE dataset NOT LIKE 'CEMBA_RS2_%%'\").fetchall()\n\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset NOT LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"rs2\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets WHERE dataset LIKE 'CEMBA_RS2_%%'\").fetchall()\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells WHERE dataset LIKE 'CEMBA_RS2_%%' GROUP BY dataset\").fetchall()\n\telif rs == \"all\":\n\t\tdataset_list = db.get_engine(current_app, 'methylation_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\tdataset_list += db.get_engine(current_app, 'snATAC_data').execute(\"SELECT * FROM datasets\").fetchall()\n\t\t# This is a hack to get unique values in a list of dictionaries\n\t\tdataset_list = list({x['dataset']:x for x in dataset_list}.values());\n\t\ttotal_methylation_cell_each_dataset = db.get_engine(current_app, 'methylation_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\t\ttotal_snATAC_cell_each_dataset = db.get_engine(current_app, 'snATAC_data').execute(\"SELECT dataset, COUNT(*) as `num` FROM cells GROUP BY dataset\").fetchall()\n\telse:\n\t\treturn\n\n\ttotal_methylation_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_methylation_cell_each_dataset ]\n\ttotal_methylation_cell_each_dataset = { k: v for d in total_methylation_cell_each_dataset for k, v in d.items() }\n\ttotal_snATAC_cell_each_dataset = [ {d['dataset']: d['num']} for d in total_snATAC_cell_each_dataset ]\n\ttotal_snATAC_cell_each_dataset = { k: v for d in total_snATAC_cell_each_dataset for k, v in d.items() }\n\n\tdataset_cell_counts = []\n\tfor dataset in dataset_list:\n\t\ttry:\n\t\t\tnum_snATAC_cells = total_snATAC_cell_each_dataset[dataset['dataset']]\n\t\texcept KeyError as e:\n\t\t\tnum_snATAC_cells = 0\n\n\t\tif \"RS2\" not in dataset['dataset']:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[1]\n\t\t\tresearch_segment = \"RS1\"\n\t\telse:\n\t\t\tbrain_region_code = dataset['dataset'].split('_')[2]\n\t\t\tbrain_region_code = brain_region_code[-2:]\n\t\t\tresearch_segment = \"RS2\"\n\n\t\tregions_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['brain_region']).fetchone()\n\t\tif regions_sql is not None:\n\t\t\tABA_regions_descriptive = regions_sql['ABA_description'].replace('+', ', ')\n\t\telse:\n\t\t\tABA_regions_descriptive = \"\"\n\n\t\tif rs == \"rs1\":\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'] })\n\t\telse:\n\t\t\ttarget_region_sql = db.get_engine(current_app, 'methylation_data').execute(\"SELECT ABA_description FROM ABA_regions WHERE ABA_acronym='%s'\" % dataset['target_region']).fetchone()\n\t\t\tif target_region_sql is not None:\n\t\t\t\ttarget_region_descriptive = target_region_sql['ABA_description'].replace('+', ', ')\n\t\t\telse:\n\t\t\t\ttarget_region_descriptive = \"\"\n\n\t\t\ttry:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": total_methylation_cell_each_dataset[dataset['dataset']],\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\t\t\texcept:\n\t\t\t\tdataset_cell_counts.append( {\"dataset_name\": dataset['dataset'],\n\t\t\t\t\t\t\t\t\t\t\t \"research_segment\": research_segment,\n\t\t\t\t\t\t\t\t\t\t\t \"sex\": dataset['sex'],\n\t\t\t\t\t\t\t\t\t\t\t \"methylation_cell_count\": 0,\n\t\t\t\t\t\t\t\t\t\t\t \"snATAC_cell_count\": num_snATAC_cells,\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_acronym\": dataset['brain_region'].replace('+', ', '),\n\t\t\t\t\t\t\t\t\t\t\t \"ABA_regions_descriptive\": ABA_regions_descriptive,\n\t\t\t\t\t\t\t\t\t\t\t \"slice\": brain_region_code,\n\t\t\t\t\t\t\t\t\t\t\t \"date_added\": str(dataset['date_online']),\n\t\t\t\t\t\t\t\t\t\t\t \"description\": dataset['description'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_acronym\": dataset['target_region'],\n\t\t\t\t\t\t\t\t\t\t\t \"target_region_descriptive\": target_region_descriptive})\n\n\treturn json.dumps(dataset_cell_counts)", "def results_to_df(ary, ks, ns):\n \n # create columns as dictionaries\n results = {}\n results['algorithm'] = ['knn' for i in range(ary.size / 4)] + ['cnn' for j in range(ary.size / 4)]\n results['sample_size'] = ns * (2 * len(ks))\n k = []\n for ii in range(len(ks)):\n k += [ks[ii] for jj in range(len(ns))]\n results['k'] = k + k\n results['run_time'] = ary[0].reshape(60)\n results['accuracy'] = ary[1].reshape(60)\n \n return pd.DataFrame(results)", "def create_dataframe(self):\n sessions = pandas.DataFrame().from_dict(self.values)\n sessions_lists = pandas.DataFrame().from_dict(self.lists)\n return sessions, sessions_lists", "def out(settings, df, result):\n if settings.join:\n return df.join(result[result.columns.difference(df.columns)])\n return result.squeeze()", "def _replica_results_dedup(queries):\n deduplicated_queries = []\n for query in queries:\n new_query = query.copy()\n\n if \"results\" in query:\n objects_seen = {}\n dedup_results = []\n results = query[\"results\"]\n\n for result in results:\n if result[\"type\"] == \"dataobject\":\n full_name = result[\"full_name\"]\n if full_name not in objects_seen:\n objects_seen[full_name] = 1\n dedup_results.append(result)\n else:\n dedup_results.append(result)\n\n new_query[\"results\"] = dedup_results\n\n deduplicated_queries.append(new_query)\n\n return deduplicated_queries", "def save_to_dataframe(self):\n titles, years, months, days, authors = list(), list(), list(), list(), list()\n for doc in self.results[\"documents\"]:\n titles.append(doc['title'])\n years.append(doc['year'])\n months.append(doc['month'])\n days.append(doc['day'])\n authors.append(doc['authors'])\n return pd.DataFrame({\"title\": titles, \"years\": years, \"months\": months, \"days\": days, \"author\": authors})", "def process_to_dataframe(chkpts, tags=['final_energy', 'job_name']):\n\n pass", "def prepareDataframeForPivot(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n if self._isIndexedDataframe(df):\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.select_dtypes(include=['float64', 'int64'])\n if df.size == 0:\n df[\"values\"] = np.nan\n # try to keep group measures\n try:\n df.groupMeasures = result.groupMeasures\n except:\n pass\n # try to keep aggMeasures\n try:\n df.aggMeasures = result.aggMeasures\n except:\n pass\n\n return df", "def get_df_all_results(self, file):\n # read csv into dataframe\n df = pd.read_csv(file)\n # rename columns\n names = [\"index\", \"samp1\", \"samp2\", \"es\", \"sd1\", \"sd2\", \"k\", \"perm\",\n \"t_test\"]\n df.columns = names\n return df", "def extract_format_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"output\": [\n {\n \"file_name\": \"BSA1.mzML\",\n \"spec_id\": 1337,\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"scaling_factor\": 100,\n \"score\": 1,\n \"charge\": 2,\n },\n {\n \"file_name\": \"BSA1.mzML\",\n \"spec_id\": 1338,\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"scaling_factor\": 100,\n \"score\": 0.9,\n \"charge\": 2,\n },\n {\n \"file_name\": \"BSA2.mzML\",\n \"spec_id\": 1337,\n \"formula\": \"C(43)H(75)N(15)O(17)S(2)\",\n \"scaling_factor\": 10,\n \"score\": 1,\n \"charge\": 3,\n },\n ]\n }\n ]\n for test_dict in TESTS:\n values = self.results.format_all_results()\n\n assert isinstance(values, pd.DataFrame)\n\n for out_data in test_dict[\"output\"]:\n result = values.loc[\n (values[\"file_name\"] == out_data[\"file_name\"])\n & (values[\"spec_id\"] == out_data[\"spec_id\"])\n ]\n assert (result[\"formula\"] == out_data[\"formula\"]).all()\n assert (result[\"scaling_factor\"] == out_data[\"scaling_factor\"]).all()\n assert (result[\"score\"] == out_data[\"score\"]).all()\n assert (result[\"charge\"] == out_data[\"charge\"]).all()", "def build(self):\n list_of_mafs = []\n maf_generator = self.get_dataframe()\n\n for maf_as_dict in maf_generator:\n list_of_mafs.extend(maf_as_dict)\n\n reporting_path = os.path.join(app.config.get('REPORTING_ROOT_PATH'), app.config.get('REPORTING_PATH'), 'global')\n combined_maf = None\n try:\n combined_maf = pandas.DataFrame(list_of_mafs)\n except Exception as e:\n logger.error(f'Problem creating dataframe from list of dicts: {str(e)}')\n try:\n combined_maf.to_csv(\n os.path.join(reporting_path, f'{self.method}_combined_maf.tsv'),\n sep=\"\\t\",\n encoding='utf-8',\n index='false'\n )\n except Exception as e:\n # bad practice here catching base exception, but the pandas documentation did not reveal what errors or\n # exceptions to expect\n logger.error(f'Problem writing the combined maf file to csv:{str(e)}')\n abort(500)", "def cashladder_to_df(response) -> pd.DataFrame:\n\n base_df = new_data_frame()\n\n for element in response.values:\n shks = {shk: value.value.label_value for shk, value in element.sub_holding_keys.items()}\n\n for record in element.records:\n working_df = add_open_row(element, record)\n\n for activity, value in record.activities.items():\n working_df = add_activity_row(element, record, working_df, activity, value)\n working_df = add_close_row(element, record, working_df)\n\n for k, v in shks.items():\n working_df[k] = v\n base_df = base_df.append(working_df, ignore_index=True)\n\n return base_df", "def combineAllListsIntoPandasDataframe(lss):\n # combine all lists into a dataframe\n #print lss\n df = p.DataFrame(range(len(lss[0])))\n for i in lss:\n try:\n df[i.columns[0]] = i\n except AttributeError, e:\n print \"{0}: {1}\".format(i, e)\n return df", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def merge_outputs(self):\n df_el_l = []\n env_tests = []\n for ii, soft_d in enumerate(self.matrix_of_envs):\n el_dict = self._soft_to_str(soft_d)\n el_dict[\"env\"] = self.env_names[ii]\n # if this is the reference environment, nothing will be added\n if self.env_names[ii] == self.ref_env_ind:\n el_dict[\"env\"] = el_dict[\"env\"] = f\"{el_dict['env']}: ref env\"\n df_el_l.append(pd.DataFrame(el_dict, index=[0]))\n elif self.docker_status[ii] == \"docker ok\":\n if self.params[\"tests\"]:\n # merging results from tests and updating self.res_all, self.res_all_flat\n df_el, df_el_flat = self._merge_test_output(\n dict_env=el_dict, env_name=self.env_names[ii]\n )\n df_el_l.append(df_el)\n else: # when tests not defined only env infor should be saved\n df_env = pd.DataFrame(el_dict, index=[0])\n df_el_l.append(df_env)\n env_tests.append(el_dict[\"env\"])\n else:\n el_dict[\"env\"] = f\"{el_dict['env']}: build failed\"\n df_el_l.append(pd.DataFrame(el_dict, index=[0]))\n\n # TODO: not sure if I need both\n self.res_all_df = pd.concat(df_el_l).reset_index(drop=True)\n self.res_all_df.fillna('N/A', inplace=True)\n if \"index_name\" in self.res_all_df and \\\n all([(el == \"N/A\") or pd.notna(el) for el in self.res_all_df[\"index_name\"]]):\n self.res_all_df.drop(\"index_name\", axis=1, inplace=True)\n self.res_all_df.to_csv(self.working_dir / \"output_all.csv\", index=False)\n # data frame with environments that were tested only\n self.res_tests_df = self.res_all_df[self.res_all_df.env.isin(env_tests)]\n self.res_tests_df.to_csv(self.working_dir / \"output.csv\", index=False)\n\n # saving detailed describtion about the environment\n soft_vers_description = {}\n for key, val in self._soft_vers_spec.items():\n soft_vers_description[key] = []\n for ii, spec in enumerate(val):\n if isinstance(spec, dict) and \"description\" in spec:\n descr = spec[\"description\"]\n else:\n descr = str(spec)\n soft_vers_description[key].append(\n {\"version\": \"ver_{}\".format(ii), \"description\": descr}\n )\n with (self.working_dir / \"envs_descr.json\").open(mode=\"w\") as f:\n json.dump(soft_vers_description, f)", "def GridSearchResultToDF(search):\n return(pd.concat([pd.DataFrame(data=search.cv_results_['params']),\n pd.DataFrame(data={'mean': search.cv_results_['mean_test_score'],\n 'std': search.cv_results_['std_test_score']}),\n pd.DataFrame(data={'mean_fit_time': search.cv_results_['mean_fit_time']})],\n axis = 1))", "def merge_cached_results(*results):\r\n if len(results) == 1:\r\n return list(results[0])\r\n\r\n #make sure the sorts match\r\n sort = results[0].query._sort\r\n assert(all(r.query._sort == sort for r in results[1:]))\r\n\r\n def thing_cmp(t1, t2):\r\n for i, s in enumerate(sort):\r\n #t1 and t2 are tuples of (fullname, *sort_cols), so we can\r\n #get the value to compare right out of the tuple\r\n v1, v2 = t1[i + 1], t2[i + 1]\r\n if v1 != v2:\r\n return cmp(v1, v2) if isinstance(s, asc) else cmp(v2, v1)\r\n #they're equal\r\n return 0\r\n\r\n all_items = []\r\n for r in results:\r\n r.fetch()\r\n all_items.extend(r.data)\r\n\r\n #all_items = Thing._by_fullname(all_items, return_dict = False)\r\n return [i[0] for i in sorted(all_items, cmp = thing_cmp)]", "def create_dataframe_from_analyse(cls, list_analyse, lvl=3, player=10, move=30):\n import pandas as pd\n import numpy as np\n\n ######## CREATE EMPTY NAN DATAFRAME ########\n index_lvl = list(\"lvl_{}\".format(i) for i in range(0, lvl))\n index_player = list(\"player_{}\".format(i) for i in range(0, player))\n index_move = list(\"move_{}\".format(i) for i in range(0, move))\n index = pd.MultiIndex.from_product([index_lvl, index_player, index_move])\n\n measure_lvl = [\"start_cm\", \"rel_pt\", \"amp_max_cop\", 'amp_max_pel', 'amp_max_c7', \"vel_max_cop\", 'vel_max_pel',\n 'vel_max_c7', \"overshoot\", \"dcm\", \"dtml\", \"rcm\"]\n spatio_temp_lvl = ['index', 'value']\n columns = pd.MultiIndex.from_product([measure_lvl, spatio_temp_lvl])\n\n n_row = lvl * player * move\n n_col = len(measure_lvl) * len(spatio_temp_lvl)\n data = np.empty((n_row, n_col))\n data[:] = np.nan\n\n df = pd.DataFrame(data, index=index, columns=columns)\n\n ######## ADD DATA ########\n\n for a in list_analyse:\n for p in a.players:\n for m in p.valid_moves:\n lvl, player_id, move_id = \"lvl_{}\".format(int(m.rep[-1]) - 2), \"player_{}\".format(\n m.player_id), \"move_{}\".format(m.move_id)\n # print(lvl, player_id, move_id)\n # start_cm\n df.loc[(lvl, player_id, move_id)][(\"start_cm\", 'index')] = m.cof.cm.start_cm.index\n df.loc[(lvl, player_id, move_id)][(\"start_cm\", 'value')] = m.cof.cm.start_cm.val\n\n # rel_pt\n df.loc[(lvl, player_id, move_id)][(\"rel_pt\", 'index')] = m.cof.cm.rel_pt.index\n df.loc[(lvl, player_id, move_id)][(\"rel_pt\", 'value')] = m.cof.cm.rel_pt.val\n\n # \"amp_max_cop\", 'amp_max_pel', 'amp_max_c7',\n df.loc[(lvl, player_id, move_id)][(\"amp_max_cop\", 'index')] = m.cof.max_amp.index\n df.loc[(lvl, player_id, move_id)][(\"amp_max_cop\", 'value')] = m.cof.max_amp.val\n\n df.loc[(lvl, player_id, move_id)][(\"amp_max_pel\", 'index')] = m.pelvis.max_amp.index\n df.loc[(lvl, player_id, move_id)][(\"amp_max_pel\", 'value')] = m.pelvis.max_amp.val\n\n df.loc[(lvl, player_id, move_id)][(\"amp_max_c7\", 'index')] = m.c7.max_amp.index\n df.loc[(lvl, player_id, move_id)][(\"amp_max_c7\", 'value')] = m.c7.max_amp.val\n\n # \"vel_max_cop\", 'vel_max_pel', 'vel_max_c7'\n\n df.loc[(lvl, player_id, move_id)][(\"vel_max_cop\", 'index')] = m.cof.max_vel.index\n df.loc[(lvl, player_id, move_id)][(\"vel_max_cop\", 'value')] = m.cof.max_vel.max_vel\n\n df.loc[(lvl, player_id, move_id)][(\"vel_max_pel\", 'index')] = m.pelvis.max_vel.index\n df.loc[(lvl, player_id, move_id)][(\"vel_max_pel\", 'value')] = m.pelvis.max_vel.max_vel\n\n df.loc[(lvl, player_id, move_id)][(\"vel_max_c7\", 'index')] = m.c7.max_vel.index\n df.loc[(lvl, player_id, move_id)][(\"vel_max_c7\", 'value')] = m.c7.max_vel.max_vel\n\n # \"overshoot\", \"dcm\", \"dtml\", \"rcm\"\n\n overshoot = abs(m.cof.max_amp.val - m.cof.x_arr[m.end_drop_median])\n overshoot_time = m.cof_rel.max_amp.index - m.end_drop_median\n df.loc[(lvl, player_id, move_id)][(\"overshoot\", 'value')] = overshoot\n df.loc[(lvl, player_id, move_id)][(\"overshoot\", 'index')] = overshoot_time\n\n dcm = abs(m.cof.cm.start_cm.val - m.cof.cm.rel_pt.val)\n dcm_time = abs(m.cof.cm.start_cm.index - m.cof.cm.rel_pt.index)\n df.loc[(lvl, player_id, move_id)][('dcm', 'value')] = dcm\n df.loc[(lvl, player_id, move_id)][('dcm', 'index')] = dcm_time\n\n dtml = abs(m.cof.cm.start_cm.val - m.cof.x_arr[m.cof_rel.pdispl_100.index])\n dtml_time = abs(m.cof.cm.start_cm.index - m.cof_rel.pdispl_target.index)\n df.loc[(lvl, player_id, move_id)][('dtml', 'value')] = dtml\n df.loc[(lvl, player_id, move_id)][('dtml', 'index')] = dtml_time\n\n rcm = dcm / dtml\n rcm_time_ratio = dcm_time / dtml_time\n df.loc[(lvl, player_id, move_id)][('rcm', 'value')] = rcm\n df.loc[(lvl, player_id, move_id)][('rcm', 'index')] = rcm_time_ratio\n\n return df", "def load_stats_dataframe(files, aggregated_results=None):\n if os.path.exists(aggregated_results) and all(\n [os.path.getmtime(f) < os.path.getmtime(aggregated_results) for f in files]):\n return pd.read_pickle(aggregated_results)\n\n df = pd.DataFrame()\n for f in files:\n tmp_dict = pd.read_pickle(f)\n tmp_dict['emb_size'] = f.split('_')[2]\n tmp_dict['negative_ratio'] = f.split('_')[4]\n tmp_dict['batch_size'] = f.split('_')[6]\n tmp_dict['epochs'] = f.split('_')[8]\n tmp_dict['classification'] = f.split('_')[-1].split('.')[0]\n\n tmp_df = pd.DataFrame.from_dict(tmp_dict)\n df = pd.concat([df, tmp_df])\n\n if aggregated_results:\n df.to_pickle(aggregated_results)\n\n return df", "def extract_data():\n logging.info(f'Reading data from {impftermine.agg_export_file_name()}...')\n df_wl = pd.read_csv(impftermine.agg_export_file_name())\n vacc_report_file = os.path.join(credentials.vmdl_path, 'vaccination_report_bs_age_group_long.csv')\n logging.info(f'Reading data from {vacc_report_file}...')\n df_impf = pd.read_csv(vacc_report_file)\n return df_wl, df_impf", "def pack_result_data(mean, upper, lower, x):\n if len(upper) == 0 and len(lower) == 0:\n upper = mean\n lower = mean\n d = {\"mean\": mean, \"upper\": upper, \"lower\": lower, \"x\": x}\n return pd.DataFrame(data=d)", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def transfers_dataframe(tables_list):\r\n return pd.concat([pd.DataFrame(table[1:], columns=table[0]) for table in tables_list])", "def parseResults(result):\n # Split the results based on newline characters\n results_cut = result.text.split('\\n')[12:-49]\n # Initialize lists of the values to be parsed from results_cut \n visit_id = []\n name = []\n ra_hour = []\n ra_min = []\n ra_sec = []\n dec_deg = []\n dec_min = []\n dec_sec = []\n v_mag = []\n ra_motion = []\n dec_motion = []\n # Iterate through results_cut and append them to the respective lists\n for line in results_cut:\n visit_id.append(int(line[6:12]))\n name.append(line[12:36])\n ra_hour.append(int(line[38:40]))\n ra_min.append(int(line[41:43]))\n ra_sec.append(float(line[44:48]))\n dec_deg.append(int(line[49:52]))\n dec_min.append(int(line[53:55]))\n dec_sec.append(int(line[56:58]))\n try:\n v_mag.append(float(line[60:64]))\n except ValueError:\n # If there is no reported v_mag for the object, return -99\n v_mag.append(-99.0)\n ra_motion.append('%s%i' % (line[84], int(line[82:84])))\n dec_motion.append('%s%i' % (line[91], int(line[89:91])))\n # Initialize the pandas dataframe to be returned\n results_df = pd.DataFrame(np.array([visit_id, name, ra_hour, ra_min, ra_sec, \n dec_deg, dec_min, dec_sec, v_mag, \n ra_motion, dec_motion]).T, \n columns=['visit_id', 'name', 'ra_hour', 'ra_min', 'ra_sec', \n 'dec_deg', 'dec_min', 'dec_sec', 'v_mag', \n 'ra_motion', 'dec_motion'])\n # Add the lists to the dataframe\n results_df['visit_id'] = pd.to_numeric(results_df['visit_id'])\n results_df['ra_hour'] = pd.to_numeric(results_df['ra_hour'])\n results_df['ra_min'] = pd.to_numeric(results_df['ra_min'])\n results_df['ra_sec'] = pd.to_numeric(results_df['ra_sec'])\n results_df['dec_deg'] = pd.to_numeric(results_df['dec_deg'])\n results_df['dec_min'] = pd.to_numeric(results_df['dec_min'])\n results_df['dec_sec'] = pd.to_numeric(results_df['dec_sec'])\n results_df['v_mag'] = pd.to_numeric(results_df['v_mag'])\n results_df['ra_motion'] = pd.to_numeric(results_df['ra_motion'])\n results_df['dec_motion'] = pd.to_numeric(results_df['dec_motion'])\n \n return results_df", "def collect_scores(true_values, pred_df):\n csv_data = []\n for index in true_values.index.unique():\n if index not in pred_df.index:\n continue\n true_confirmed = true_values.loc[index][\"confirmed\"]\n pred_confirmed = pred_df.loc[index][\"prediction_confirmed\"]\n\n csv_data.append(\n [\n index[0],\n index[1],\n true_values.loc[index][\"geoname_code\"],\n ale(true_confirmed, pred_confirmed),\n ]\n )\n\n csv_data = pd.DataFrame(csv_data)\n csv_data.columns = [\"region_code\", \"date\", \"geoname_code\", \"cases_male\"]\n return csv_data.set_index([\"region_code\", \"geoname_code\", \"date\"])", "def getResults():", "def out_put_data(OOS_result: dir, category: str) -> pandas.core.frame.DataFrame:\n \n header = ['SKU', 'Store', 'category', 'OOS_days', 'date_list', 'OOS_lastDay','avg_loss_sale_quantity',\n 'avg_loss_net_sale','avg_loss_mergin', 'total_loss_sale_quantity','total_loss_net_sale','total_loss_mergin']\n output_data = pd.DataFrame(columns = header)\n new_row = {}\n \n for key, value in OOS_result.items():\n new_row['Store'] = key[1]\n new_row['SKU'] = key[0]\n new_row['Category'] = category\n new_row['OOS_days'] = value[0]\n new_row['date_list'] = value[5]\n new_row['OOS_lastDay'] = value[4]\n new_row['avg_loss_sale_quantity'] = value[3]\n new_row['avg_loss_net_sale'] = value[2]\n new_row['avg_loss_mergin'] = value[1]\n new_row['total_loss_sale_quantity'] = value[3] *value[0]\n new_row['total_loss_net_sale'] = value[2] *value[0]\n new_row['total_loss_mergin'] = value[1] *value[0]\n \n ## insert the new row \n output_data = output_data.append(new_row, ignore_index=True) \n return output_data", "def get_data( filepath_query, filepath_results ):\n with open( filepath_query, 'r' ) as query_file:\n query = json.load( query_file )\n \n query_text = query['query']['multi_match']['query']\n query_scores = query['nlp_scores']\n query_data = {\n 'query_text' : query_text,\n 'bias_score' : query_scores['bias_score'],\n 'vocab_richness' : query_scores['stylo_scores']['vocab_richness'],\n 'hapax_legomena' : query_scores['stylo_scores']['hepax_legomena'],\n 'wordlength' : query_scores['stylo_scores']['readability_measures']['average_wordlength'],\n 'sentlength' : query_scores['stylo_scores']['readability_measures']['average_sentlength'],\n 'spelling_errors' : query_scores['stylo_scores']['spelling_errors'],\n 'topics' : query_scores['topics']\n }\n\n with open( filepath_results ) as results_file:\n results = json.load( results_file )\n \n results_data = []\n for doc in results:\n argID = doc['_source']['argsMeID']\n premise = doc['_source']['premise']\n average_wordlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_wordlength']\n average_sentlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_sentlength']\n bias_score = doc['nlp_scores']['bias_score']\n bias_distance = doc['bias_distance']\n stylo_distance = doc['stylo_distance']\n topic_match_count = doc['topic_match_count']\n old_score = doc['old_score']\n new_score = doc['new_score']\n scoring_distance = doc['scoring_distance']\n old_rank = doc['old_rank']\n new_rank = doc['new_rank']\n \n doc_data = {\n 'argID' : argID,\n 'premise' : premise,\n 'wordlength' : average_wordlength,\n 'sentlength' : average_sentlength,\n 'bias_score' : bias_score,\n 'bias_distance' : bias_distance,\n 'stylo_distance' : stylo_distance,\n 'topic_match_count' : topic_match_count,\n 'old_score' : old_score,\n 'new_score' : new_score,\n 'scoring_distance' : scoring_distance,\n 'old_rank' : old_rank,\n 'new_rank' : new_rank\n }\n results_data.append( doc_data )\n\n data_tuple = ( query_data, results_data )\n return data_tuple", "def read_results(\n self,\n model_run_names: list,\n model_names: list,\n output_names: list,\n timesteps: list = None,\n decisions: list = None,\n time_decision_tuples: list = None,\n ):\n\n self.validate_names(model_run_names, model_names, output_names)\n\n results_dict = self._store.get_results(\n model_run_names,\n model_names[0],\n output_names,\n timesteps,\n decisions,\n time_decision_tuples,\n )\n\n # Keep tabs on the units for each output\n for model_run_name in model_run_names:\n for output_name in output_names:\n res = results_dict[model_run_name][output_name]\n self._output_units[res.name] = res.unit\n\n # For each output, concatenate all requested model runs into a single data frame\n formatted_frames = []\n for output_name in output_names:\n # Get each DataArray as a pandas data frame and concatenate, resetting the index to\n # give back a flat data array\n list_of_df = [results_dict[x][output_name].as_df() for x in model_run_names]\n names_of_df = [x for x in results_dict.keys()]\n\n formatted_frames.append(\n pd.concat(\n list_of_df, keys=names_of_df, names=[\"model_run\"]\n ).reset_index()\n )\n\n # Append the other output columns to the first data frame\n formatted_frame = formatted_frames.pop(0)\n output_names.pop(0)\n\n for other_frame, output_name in zip(formatted_frames, output_names):\n assert (formatted_frame[\"model_run\"] == other_frame[\"model_run\"]).all()\n assert (\n formatted_frame[\"timestep_decision\"] == other_frame[\"timestep_decision\"]\n ).all()\n formatted_frame[output_name] = other_frame[output_name]\n\n # Unpack the timestep_decision tuples into individual columns and drop the combined\n formatted_frame[[\"timestep\", \"decision\"]] = pd.DataFrame(\n formatted_frame[\"timestep_decision\"].tolist(), index=formatted_frame.index\n )\n\n formatted_frame = formatted_frame.drop(columns=[\"timestep_decision\"])\n\n # Now reorder the columns. Want model_run then timestep then decision\n cols = formatted_frame.columns.tolist()\n\n assert cols[0] == \"model_run\"\n cols.insert(1, cols.pop(cols.index(\"timestep\")))\n cols.insert(2, cols.pop(cols.index(\"decision\")))\n assert cols[0:3] == [\"model_run\", \"timestep\", \"decision\"]\n\n return formatted_frame[cols]", "def concat_all_dfs(dflist):\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n # reduced\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n # add col for function name\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n # reorder cols\n dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',\n 'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]\n\n # complete runs only:\n return dfa[dfa['Actions'].values > 0]", "def process_data(self):\n structure_data = self.parse_root(self.root)\n\n dict_data = {}\n for d in structure_data:\n dict_data = {**dict_data, **d}\n df = pd.DataFrame(data=list(dict_data.values()), index=dict_data.keys()).T\n\n return df", "def dataframe_for_domain(domain, algorithm):\n df = []\n files = glob.glob(os.path.join(outdir, algorithm, domain, '*', '*.txt'))\n for filename in files:\n query, reference = os.path.basename(filename).replace('.txt', '').split('_vs_')\n try:\n _df = pd.read_csv(filename, comment='#', sep='\\t')\n except pd.errors.EmptyDataError:\n _df = pd.DataFrame([dict(value=np.nan)])\n\n _df['query'] = query\n _df['reference'] = reference\n df.append(\n _df.iloc[0].to_dict()\n )\n return pd.DataFrame(df)", "def get_updated_dataframe():\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.io import fetch_paginated_data\n records = fetch_paginated_data(constants.API_RECORDS_ENDPOINT.value)\n df = parse_records_to_dataframe(records) # pylint: disable=invalid-name\n return df", "def logreg_results_to_pandas(common_molids_cache=False):\n results = ResultInDisk.collect_results_under_dir(MALARIA_LOGREGS_EXPERIMENT_ROOT,\n factory=malaria_result_factory)\n\n # --- molids cache\n molids_cache = None\n if common_molids_cache:\n rf_lab, rf_amb, rf_unl, rf_scr = malaria_logreg_fpt_providers(None)\n # Labelled molids\n lab_molids = rf_lab.ids()\n amb_molids = rf_amb.ids() # To prioritize confirmatory tests on labelled data\n # Unlabelled molids\n unl_molids = rf_unl.ids()\n scr_molids = rf_scr.ids()\n # Let's avoid the need to reread them...\n molids_cache = {\n 'lab': lab_molids,\n 'amb': amb_molids,\n 'unl': unl_molids,\n 'scr': scr_molids\n }\n\n results_dict_of_dicts = {}\n for result in results:\n if common_molids_cache:\n result.ids_cache = molids_cache # dodgy, rework with a copying constructor\n rdict = copy(result.info())\n rdict['result'] = result\n rdict['class_weight'] = 'uniform' if rdict['class_weight'] is None else rdict['class_weight']\n # Some more ad-hoc keys for the model\n rdict['num_present_folds'] = result.num_present_folds()\n rdict['auc_mean'] = result.auc_mean()\n rdict['enrichement5_mean'] = result.enrichement5_mean()\n # Some more ad-hoc keys for the fingerprint folder\n folder = result.fingerprint_folder()\n rdict['folder_seed'] = int(folder.seed) if folder is not None else -1\n rdict['folder_size'] = int(folder.fold_size) if folder is not None else 0\n # Add this result to the data frame\n results_dict_of_dicts[result.root_key()] = rdict\n\n return DataFrame(results_dict_of_dicts).T", "def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans", "def covidstats_dfs(covidstats_meta_df):\n dfs=[]\n for _,r in covidstats_meta_df.iterrows():\n location=r['LOCATION']\n print('Procesando {}'.format(location))\n data = get_data(r['ID departamento CovidStats'])\n assert data['fecha_inicial']=='2020-01-01T00:00:00-03:00'\n df = covidstats_df(data)\n df['LOCATION']=location\n dfs.append(df)\n return pd.concat(dfs).reset_index().rename(columns={'index':'DATE'})", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def test_fetchall(self):\n result = export.processExport(houseId=1)\n #We should have 2 locations * 1 sensor * 10 days of data here\n # 2 * 1 * (288 * 10) == 5670\n #print result.shape\n\n #result.to_csv(\"temp.csv\")\n #Do we get the right object\n self.assertEqual(type(result), pandas.DataFrame)\n #And is it the right size\n self.assertEqual(result.shape, (2880, 2)) #So 2880 samples from two sensors\n #And the right range of data\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))" ]
[ "0.6732909", "0.66638446", "0.6632721", "0.65633905", "0.65257215", "0.6472688", "0.6428347", "0.63872194", "0.6289251", "0.6240572", "0.6240572", "0.62322944", "0.6207928", "0.61921304", "0.61735207", "0.61727184", "0.6156177", "0.61521775", "0.6060341", "0.6056508", "0.6026985", "0.60023385", "0.59613776", "0.5952447", "0.5907167", "0.5876662", "0.5871431", "0.58697796", "0.58522487", "0.5844317", "0.5840883", "0.58384365", "0.58074063", "0.5803908", "0.57890415", "0.5787568", "0.57808155", "0.577452", "0.57726634", "0.5757843", "0.5749198", "0.57377744", "0.5734092", "0.5726888", "0.57208705", "0.5712008", "0.56968224", "0.5696073", "0.569288", "0.5686146", "0.568094", "0.5674263", "0.5671214", "0.56671417", "0.5657621", "0.56568915", "0.5656868", "0.5652091", "0.5650685", "0.565037", "0.5649128", "0.5644381", "0.56346095", "0.5623435", "0.5616887", "0.5600718", "0.55971074", "0.5594253", "0.5591771", "0.558989", "0.55792546", "0.5579124", "0.55723107", "0.55721307", "0.55582595", "0.55571103", "0.5536578", "0.5533405", "0.55319184", "0.552399", "0.5521277", "0.55212486", "0.55061084", "0.5501664", "0.5498262", "0.54910773", "0.5490424", "0.5489799", "0.54874533", "0.54840827", "0.5479956", "0.54796344", "0.5478591", "0.5478134", "0.54765874", "0.54723346", "0.54686433", "0.54682565", "0.5464113", "0.5463321", "0.5462615" ]
0.0
-1
Returns dictionary with strand orientation as values and geneIDs as Keys/
def gather_strand_by_geneID_dict(genome_gtf): strand_by_geneID_dict = {} with open(genome_gtf) as f: for line in f: current_line = line.split('\t') if current_line[2] == "CDS": current_orf = current_line[8].split(';')[2].split()[1].strip('\"') current_strand = current_line[6] strand_by_geneID_dict[current_orf] = current_strand return strand_by_geneID_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes", "def organize_by_chromosome(genes, transcripts):\n gene_dict = {}\n transcript_dict = {}\n\n for ID in genes:\n gene = genes[ID]\n chromosome = gene.chromosome\n if chromosome not in gene_dict:\n chrom_genes = {}\n chrom_genes[ID] = gene\n gene_dict[chromosome] = chrom_genes\n gene_dict[chromosome][ID] = gene\n\n for ID in transcripts:\n transcript = transcripts[ID]\n chromosome = transcript.chromosome\n if chromosome not in transcript_dict:\n chrom_transcripts = {}\n chrom_transcripts[ID] = transcript\n transcript_dict[chromosome] = chrom_transcripts\n transcript_dict[chromosome][ID] = transcript\n transcript_dict[chromosome][ID] = transcript\n\n return gene_dict, transcript_dict", "def _load_orgs_and_genes(self):\n organisms = {}\n genes = {}\n for gene in self.gene_ids:\n org_file_path = self._get_organisms_file_path(gene[self.GENE_NAME_IDX], gene[self.GENE_ID_IDX])\n with open(org_file_path, \"r\") as orgs:\n org = orgs.read().splitlines()\n genes[gene[self.GENE_NAME_IDX]] = {}\n # we only care about unique organisms\n for o in org:\n if not o.startswith(\">\"):\n continue\n clean_o = o.replace(\">\", \"\", 1).replace(\"_\", \" \").title()\n # I hate to do this but there's a special case for Canis Familiaris\n # EBI does not recognize it but it does recognize Canis Lupus (Canis Lupus Familiaris)\n if \"Canis Familiaris\" in clean_o:\n clean_o = \"Canis lupus\"\n if not organisms.get(clean_o):\n organisms[clean_o] = {self.FREQ_KEY: 1, self.GENE_IDS_KEY: [gene]}\n else:\n organisms[clean_o][self.FREQ_KEY] = organisms[clean_o][self.FREQ_KEY] + 1\n organisms[clean_o][self.GENE_IDS_KEY].append(gene)\n genes[gene[self.GENE_NAME_IDX]][clean_o] = 1\n return organisms, genes", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details", "def genomic_tx_data():\n return dict(\n gene=\"BRAF\",\n strand=\"-\",\n tx_pos_range=(2053, 2188),\n alt_pos_range=(140439611, 140439746),\n alt_aln_method=\"splign\",\n tx_exon_id=780496,\n alt_exon_id=1927265,\n pos_change=(92, 43),\n alt_pos_change_range=(140439703, 140439703),\n tx_ac=\"NM_004333.4\",\n alt_ac=\"NC_000007.13\"\n )", "def create_species_encode():\n\tdata = pd.read_csv(\"../train.csv\")\n\tspecies = sorted(data.species.unique())\n\tspecies_dict = {species: index for index, species in enumerate(species)}\n\treturn species_dict", "def inizializzazione(fileInput, geneNames):\n\t\n\tdictTranscript \t= {}\n\tdictGenes \t\t= {}\n\tdictEsoni \t\t= {}\n\tdictIntroni \t= {}\n\tdictGeneChr \t= {}\n\n\t# - Filtraggio file di annotazione in input per 'exon' e per nome gene\n\t# - Calcolo delle coordinate dei geni nei cromosomi\n\t#\n\tlines, dictGeneChr = filtraFileDiAnn(fileInput, geneNames)\n\t\n\t\n\t# Indici all'interno del dizionario degli esoni\n\t#\n\tidx_starts \t= 0\n\tidx_ends \t= 1\n\tidx_strand \t= 2\n\t\n\t# Indici all'interno del dizionario dei Geni\n\t#\n\tidx_transcripts = 2\n\n\n\t# Creazione dei dizionari utili alla risoluzione del problema B\n\t#\n\tfor riga in lines:\n\t\tcromosoma \t\t= riga[0]\n\t\tstart_esone \t= riga[3]\n\t\tend_esone \t\t= riga[4]\n\t\tstrand \t\t\t= riga[6]\n\t\tgeneName \t\t= riga[11]\n\t\ttranscriptName \t= riga[12]\n\t\t\n\t\tTranscriptID \t= riga[9]\n\t\tGeneID \t\t\t= riga[8]\n\t\n\t\t# Creazione del dizionario dei transcritti\n\t\t#\n\t\tdictTranscript[TranscriptID] = [transcriptName, GeneID]\n\t\t\n\t\t# Creazione del dizionario dei geni\n\t\t#\n\t\tif not dictGenes.has_key(GeneID):\t\t\t\t\t\t\t\t\t\t# Se il GeneID non e' presente..\n\t\t\tdictGenes[GeneID] = [geneName, cromosoma, [TranscriptID]]\t\t\t# ..nel dizionario (come key)\n\t\telif TranscriptID not in dictGenes[GeneID][idx_transcripts]:\t\t\t# Se il GeneID e' presente ma non lo e'..\n\t\t\tdictGenes[GeneID][idx_transcripts].append(TranscriptID)\t\t\t\t# ..il TranscriptID questo si aggiunge alla lista\n\t\t\n\t\t# Creazione del dizionario degli esoni\n\t\t#\n\t\tif not dictEsoni.has_key(TranscriptID):\t\t\t\t\t\t \t# Se il TranscriptID non e' presente.. \n\t\t\tdictEsoni[TranscriptID] = [[start_esone],[end_esone],strand] \t# ..nel dizionario (come key)\n\t\telse:\n\t\t\tdictEsoni[TranscriptID][idx_starts].append(start_esone)\t\t\t \t# Il TranscriptID e' gia' presente quindi..\n\t\t\tdictEsoni[TranscriptID][idx_ends].append(end_esone)\t\t\t \t# ..si aggiunge l'esone alla lista degli esoni\n\t\t\t\n\t\t\t\n\t# Creazione del dizionario degli introni\n\t#\n\tfor TranscriptID in dictEsoni:\n\t\tesoniPerTranscript = len(dictEsoni[TranscriptID][idx_starts])\t \t# Si valuta il nr di esoni per TranscriptID corrente\n\t\t\n\t\tif int(esoniPerTranscript) > 1:\n\t\t\tstart_introni \t= []\t\t\t\t\t\t\t\t\t\t\t # Si preparano le variabili necessarie\n\t\t\tend_introni \t= []\n\t\t\t\n\t\t\tstart_esoni \t= []\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tend_esoni \t\t= []\n\t\t\t\n\t\t\t# Si considera lo strand relativo al TranscriptID\n\t\t\t#\n\t\t\tif dictEsoni[TranscriptID][idx_strand] == '+':\t\t\t\t\t \t# Strand positivo -> esoni scritti in ordine crescente\n\t\t\t\tstrand = True\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts]\n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends]\n\t\t\t\t\n\t\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t# Strand negativo -> esoni scritti in ordine inverso..\n\t\t\t\tstrand = False\t\t\t\t\t\t\t\t\t\t\t\t \t# ..e per comodita' sono invertiti in ordine crescente\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts][::-1] \t \n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends][::-1]\n\n\t\t\t# Calcolo delle regioni introniche\n\t\t\t#\n\t\t\ti = 0\n\t\t\twhile i < int(esoniPerTranscript) - 1:\t\t\t\t\t\t\t \t# Per ogni coppia di esoni\n\t\t\t\tif (int(start_esoni[i+1]) - int(end_esoni[i])) > 2:\t\t\t \t# Se la regione tra due esoni consecutivi e' > 2..\n\t\t\t\t\tstart_introni.append(int(end_esoni[i]) + 1)\t\t\t \t# ..(considerando che gli estremi dell'introne sono..\n\t\t\t\t\tend_introni.append(int(start_esoni[i+1]) - 1)\t\t \t \t#..interni a quelli dei due esoni consecutivi correnti)\n\t\t\t\ti += 1\n\t\t\t\n\t\t\tif not strand:\t\t\t\t\t\t\t\t\t\t\t\t \t# Si mantiene traccia del fatto che derivano da un..\n\t\t\t\tstart_introni.reverse()\t\t\t\t\t\t\t\t\t \t# ..TranscriptID con strand negativo..\n\t\t\t\tend_introni.reverse()\t\t\t\t\t\t\t\t\t\t\t# ..(si inverte l'ordine degli introni)\n\t\t\n\t\t\tdictIntroni[TranscriptID] = [start_introni, end_introni]\n\n\n\t# Si eliminano i geni che non presentano regioni introniche:\n\t# \t- dalla lista di tutti i geni si rimuovono quelli che hanno introni;\n\t#\t- dal dizionario si rimuovono quelli rimasti nella lista.\n\t#\n\ttuttiIGeni = geneNames.keys()\n\tfor TranscriptID in dictIntroni:\n\t\tgeneID = dictTranscript[TranscriptID][1]\n\t\tnomeGene = dictGenes[geneID][0]\n\t\t\n\t\tif nomeGene in tuttiIGeni:\n\t\t\ttuttiIGeni.remove(nomeGene)\n\n\n\tfor nomeGene in tuttiIGeni:\n\t\tdel geneNames[nomeGene]\n\t\tprint 'Il gene %s non presenta regioni introniche.' % nomeGene\n\n\n\treturn [dictTranscript, dictGenes, dictEsoni, dictIntroni, dictGeneChr]", "def get_gene_transcript_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col).reset_index()\n r = {}\n for gene_id, s in df.groupby('GeneId'):\n r[gene_id] = s.TranscriptId.tolist()\n return r", "def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict", "def get_transcript_gene_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.index, df.GeneId)))", "def gene_ID_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene ID\"]\n resD[keyI] = valueI\n\n return resD", "def gencode_dic(gencode_file,gene_type_dic):\n gen_dic = {}\n for i in range(1,len(gencode_file)):\n words_gen = gencode_file[i].strip().split('\\t')\n chr_no = words_gen[2]\n trans_id = words_gen[1]\n cds_info = words_gen[13]\n cde_info = words_gen[14]\n gene_type = gene_type_dic[trans_id]\n gene_name = words_gen[12]\n TSS_start = int(words_gen[4])\n TSS_end = int(words_gen[5])\n CDS_start = int(words_gen[6])\n CDS_end = int(words_gen[7])\n strand = words_gen[3]\n start_list = [int(x) for x in words_gen[9].split(',')[:-1]]\n end_list = [int(x) for x in words_gen[10].split(',')[:-1]]\n exon_no = int(words_gen[8])\n# if (chr_no,trans_id) in gen_dic: #Some trans_id are not unique, especially transcripts in chrX and chrY\n# print trans_id\n interval_list = [P.closedopen(start_list[x],end_list[x]) for x in range(0,exon_no)]\n interval_merge = P.empty()\n for i in range(0,len(interval_list)):\n interval_merge = interval_merge | interval_list[i]\n if gene_type == 'protein_coding':\n if (cds_info == 'cmpl') and (cde_info == 'cmpl'):\n # print (interval_merge)\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n else:\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n return gen_dic", "def get_text_mining_mir_dictionary():\n if logger.getEffectiveLevel() == logging.DEBUG or not os.path.exists(OUT_MIR_ALIAS_FILE):\n __create_mir_alias_dictionary__()\n\n mir_alias_to_identifier = {}\n with gzip.open(OUT_MIR_ALIAS_FILE, 'rb') as mir_alias_file:\n for line in mir_alias_file:\n tax_id, mir_id, mir_alias = line.rstrip('\\r\\n').split('\\t')\n mir_alias_to_identifier[(tax_id, mir_alias)] = mir_id\n return mir_alias_to_identifier", "def genorates_to_dict(store: GenoRates) -> GenoDistribSerialisable:\n to_return = dict()\n num_alleles = store.shape[0]\n\n for gene in range(num_alleles):\n for allele in range(3):\n to_return[(geno_to_str(gene, allele))] = store[gene][allele]\n\n return to_return", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def get_genes_organisms(self):\n path = os.path.join(self.parent_path, \"genes_organisms.txt\")\n with open(path, \"w\") as f:\n f.write(\"Gene,Organisms\\n\")\n for gene in self.genes.keys():\n f.write(\"{},{}\".format(gene, \"/\".join(self.genes.get(gene).keys()) + \"\\n\"))", "def create_gene_dict(self, variants):\n \n # organise the variants into entries for each gene\n genes = {}\n for var in variants:\n # variants (particularly CNVs) can span multiple genes, so we need\n # to check each gene separately, and then collapse duplicates later\n for gene_list in var.get_genes():\n for gene in gene_list:\n if gene not in genes:\n genes[gene] = []\n # add the variant to the gene entry\n genes[gene].append(var)\n \n return genes", "def produce_geneName_dict(inPath, spList, outPath):\n with open(spList, 'r') as f:\n swissProtIDs = set(f.read().split())\n with open(inPath, 'r') as fIn:\n idMap = {}\n for line in fIn:\n uniprotID, otherIDtype, otherID = line.strip().split('\\t')\n if otherIDtype == 'Gene_Name':\n if uniprotID in swissProtIDs:\n idMap[uniprotID] = otherID.upper()\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)", "def get_snps(self):\n d = {}\n with open(self.snp_file, 'r') as infile:\n for row in infile:\n if row:\n row_split = row.strip().split('\\t')\n chrom = row_split[0]\n pos = row_split[1]\n name = row_split[3].split('|')\n snp_id = name[0]\n gene = name[1]\n ref_allele = name[2]\n alt_alleles = name[3]\n freq = name[4]\n genome = name[5]\n d[snp_id] = {\n 'chrom': chrom,\n 'pos': pos,\n 'ref': ref_allele,\n 'alt': alt_alleles,\n 'gene': gene,\n 'maf': freq,\n 'genome_build': genome\n }\n return d", "def update_strandinfo(self):\n params = ['x','y','rho','theta','spiral','inward','outward']\n infos = {'min':np.min,\n 'max':np.max,\n 'count':lambda x:len(set(x))}\n\n self.strands = {}\n\n for f in ['pwm','channel']:\n self.strands[f] = [ s[f] for s in self.strands_config]\n\n for f in params:\n if f in self.strands_config[0]:\n self.strands[f] = np.array([ s[f] for s in self.strands_config],dtype=np.int16)\n\n for f in ['intensity','last_intensity']:\n self.strands[f] = np.zeros_like(self.strands['x'],dtype=np.int16)\n\n self.strandinfo = { param: { info : None for info in infos} for param in params }\n for p in params:\n for ik,iv in infos.items():\n self.strandinfo[p][ik] = iv(self.strands[p])\n\n print('self.strands:', self.strands)\n print('strandinfo:',self.strandinfo)", "def group_data_by_gs(data_table):\n gene_data = collections.defaultdict(lambda: collections.defaultdict(list))\n for _idx, row in data_table.iterrows():\n samp = row['sample']\n gene = row['gene']\n gene_data[gene][samp].append({\n 'muttype': row['type'].strip(),\n 'normalized': row['Normalized'], # NMAF in the manuscript\n 'consequence': row['MissenseConsequence'].strip(),\n })\n return gene_data", "def get_gene_biotype_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.GeneId, df.GeneBiotype)))", "def get_keys():\n SCALE_DICT = {\n 'major': [2,2,1,2,2,2,1],\n 'minor':[2,1,2,2,1,2,2],\n 'chrom':[1,1,1,1,1,1,1,1,1,1,1,1],\n 'ionanian':[2,2,1,2,2,2,1],\n 'dorian':[2,1,2,2,2,1,2],\n 'phrygian':[1,2,2,2,1,2,2],\n 'lydian':[2,2,2,1,2,2,1],\n 'mixolydian':[2,2,1,2,2,1,2],\n 'aeolian':[2,1,2,2,1,2,2],\n 'locrian':[1,2,2,1,2,2,2],\n 'minor_pent':[3,2,2,3,2],\n 'major_pent':[2,2,3,2,3],\n 'pent_6':[2,2,3,1,3],\n 'pent_2':[1,3,3,2,3],\n 'pent_3':[2,1,4,2,3],\n 'pent_5':[2,2,2,3,3],\n 'mixo_pent':[2,2,3,3,2],\n 'phryg_pent':[1,2,3,1,3],\n 'dim_pent':[2,1,3,1,3],\n 'blues':[3,2,1,1,3,2],\n 'harmonic_minor':[2,1,2,2,1,3,2],\n 'melodic_mimnor':[2,1,2,2,1,3,2],\n 'whole_tone':[2,2,2,2,2,2],\n 'whole_half':[2,1,2,1,2,1,2,1],\n 'half_whole':[1,2,1,2,1,2,1,2],\n 'lydian_flat7':[2,2,2,1,2,1,2]\n }\n\n return SCALE_DICT", "def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict", "def genome_index_to_dict(self, index):\n chrom_pos = self.chrom_and_pos(index)\n return {'Chromosome': chrom_pos[0], 'Position': chrom_pos[1]}", "def read_cDNA_file_to_dict(filename):\n \n #initialize dictionary\n cDNA_dictionary = {}\n\n #open file\n with open(cDNA_file) as f:\n \n #loop through file line by line\n for line in f:\n\n #remove newline\n line = line.rstrip()\n \n #get gene name\n if line.startswith(\">\"):#If the line starts with the character \">\" then,\n gene_name = line.split(\"|\")[1]#I separate the line by the character \"|\" and assign index 1 to gene_name\n \n #read in sequence in uppercase\n if not line.startswith(\">\"):#If the line does not start with the character \">\" then,\n line = line.upper()#I make all of the characters within the line uppercase\n\n #put name and sequence in dictionary\n cDNA_dictionary[gene_name] = line#I assign the gene_name as the key and the line (sequence) as the value\n\n #return dictionary \n return cDNA_dictionary", "def produce_isoform_geneName_dict(geneMapFile, isoformFile, outPath):\n isoformData = pd.read_table(isoformFile, sep=\"\\t\")\n with open(geneMapFile, 'rb') as f:\n geneMap = pickle.load(f)\n isoformGeneMap = {}\n isoformData[\"refID\"] = isoformData[\"Isoform\"].apply(lambda x: x if x.find('-') == -1 else x[:x.find('-')])\n for _, row in isoformData.iterrows():\n if row.refID in geneMap:\n isoformGeneMap[row.Isoform] = geneMap[row.refID]\n with open(outPath, 'wb') as fOut:\n pickle.dump(isoformGeneMap, fOut)", "def get_indices_convert_dict(fn):\n pdb_inp = pdb.input(file_name=fn)\n pdb_hierarchy = pdb_inp.construct_hierarchy()\n \n newids = OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_hierarchy.atoms()))\n oldids= OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_inp.atoms()))\n \n return {'p2a': np.array([newids[atom.id_str()] for atom in pdb_inp.atoms()]),\n 'a2p': np.array([oldids[atom.id_str()] for atom in pdb_hierarchy.atoms()])}", "def get_gene_values(hotel_ids):\n hotel_genes = {}\n subcats = get_subcat_axes()\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, genome\n FROM hotel_genome\n WHERE hotel_id in (%s)\n \"\"\" % \",\".join([str(h) for h in hotel_ids])\n )\n for hotel_id, genome_str in cursor.fetchall():\n genome = [float(g.strip()) for g in genome_str.split(\",\")]\n hotel_genes[hotel_id] = get_hotel_genes_by_subcat(\n subcats, genome)\n return subcats, hotel_genes", "def data_from_result():\n return dict(\n gene=\"BRAF\",\n strand=\"-\",\n tx_pos_range=(1802, 1921),\n alt_pos_range=(140453074, 140453193),\n alt_aln_method=\"splign\",\n tx_exon_id=780494,\n alt_exon_id=1927263\n )", "def tr_nc_dict(dfin):\n\n\ttr_nc_index_dict = OrderedDict()\n\t\n\ttrCount = 0\n\tpreviousTrIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_tr=\"\"\n\n\tfor i in range(len(dfin)):\n# print dfin.loc[i]\n\n\t\tif dfin.loc[i,'feature'] == 'transcript':\n\t\t\ttrdict = parse_mod_entry(dfin.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\n\t\t\tif trCount != 0:\n\t\t\t\tnewTrIndex = i\n\t\t\t\ttr_nc_index_dict[current_id] = [previousTrIndex,newTrIndex]\n\t\t\t\tpreviousTrIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\ttrCount += 1\n\n\t\t\telse:\n\t\t\t\tnewTrIndex = 0\n\t\t\t\ttrCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\t\n\t\tif i == (len(dfin)-1):\n\t\t\tnewTrIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\ttr_nc_index_dict[current_id] = [previousTrIndex,newTrIndex]\n\t\t\t\n\treturn tr_nc_index_dict", "def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions", "def make_tRNA_fasta_dict(tRNAdf):\n\n\n\ttRNA_fasta_outdict = OrderedDict()\n\n\tfor i in tRNAdf.index:\n\n\t\tif tRNAdf.loc[i,'feature'] == 'tRNA':\n\t\t\tchrom = tRNAdf.loc[i,'#chrom']\n\t\t\tchrStart = int(tRNAdf.loc[i,'chromStart'])\n\t\t\tchrEnd = int(tRNAdf.loc[i,'chromEnd'])\n\t\t\tstrand = tRNAdf.loc[i,'strand']\n\t\t\t\n\t\t\tif strand == \"+\":\n\t\t\t\tchrStart = chrStart-1 ### gtf files are 1-based, convert to 0-based\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\t\t\t\n\t\t\telse: # for neg strand\n\t\t\t\tchrStart = chrStart-1\n\t\t\t\ttrSeq = SeqIO.Seq(genome[chrom][chrStart:chrEnd])\n\t\t\t\ttrSeq = trSeq.reverse_complement()\n\t\t\t\ttrdict = parse_entry(tRNAdf.loc[i,'transcript_id'])\n\n\t\t\ttrID = \"tRNA_\"+trdict['gene_id'][0]\n\t\t\tdesc = \"| tRNA | \"+trdict['gene_type'][0] + \" | %s; %s; %s:%s\" % (chrom, strand, chrStart, chrEnd)\n\n\t\t\ttrSeqRec = SeqRecord(trSeq, id=trID, name=trdict['gene_name'][0], description=desc)\n\t\t\ttRNA_fasta_outdict[trID] = trSeqRec\n\t\n\treturn tRNA_fasta_outdict", "def convert_trsp_index(geneDictNonCoding, df, TR_index_dict):\n\n\n\tgeneDictCanon = OrderedDict()\n\t\n\tfor gene in geneDictNonCoding:\n\t\ttrDF = df.iloc[geneDictNonCoding[gene][0]:geneDictNonCoding[gene][1]]\n\t\ttrDFz = trDF.reset_index(drop=True)\n\t\t\n\t\ttrCount = 0\n\t\ttrDictLoc = OrderedDict()\n\t\t\n\t\tfor i in range(len(trDFz)):\n\t\t\tif trDFz.loc[i, 'feature'] == 'transcript':\n\t\t\t\ttr = trDFz.loc[i, 'transcript_id']\n\t\t\t\ttrdict = parse_entry(tr)\n\t\t\t\ttrName = trdict['transcript_id'][0]\n\t\t\t\ttrDictLoc[trName] = [trDFz.loc[i, 'chromStart'], trDFz.loc[i, 'chromEnd']]\n\t\t\t\ttrCount += 1\n\t\t\n\t\tif trCount > 1:\n# print gene, \"more than 1 trsp !!! \\n\"\n\t\t\t\n\t\t\trangeDict = OrderedDict() ## store the ranges, and take the longest\n\t\t\tfor key in trDictLoc:\n\t\t\t\ttrRange = len(range(int(trDictLoc[key][0]),int(trDictLoc[key][1])))\n\t\t\t\trangeDict[key] = trRange\n\t\t\t\t\n\t\t\tv=list(rangeDict.values())\n\t\t\tk=list(rangeDict.keys())\n\t\t\ttrOut = k[v.index(max(v))]\n# print trOut\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\t\t\t\n\t\t\t\n\n\t\telse: ## for genes with single transcripts\n\t\t\ttrOut = trDictLoc.keys()[0]\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\treturn geneDictCanon", "def estado2dic(self, estado):\n return {self.vertices[i]: (estado[2 * i], estado[2 * i + 1])\n for i in range(len(self.vertices))}", "def read_geneset_file():\n geneset_dict = {}\n with open(GENESET_FILENAME) as geneset_file:\n lines = geneset_file.readlines()\n i = 0\n for l in lines:\n i += 1\n entries = l.split()\n geneset_name = entries[0]\n geneset = set(entries[2:])\n geneset_dict[geneset_name] = geneset\n\n return geneset_dict", "def get_organisms_genes(self):\n path = os.path.join(self.parent_path, \"organisms_genes.txt\")\n with open(path, \"w\") as freqs:\n freqs.write(\"Organism,Genes\\n\")\n for org, data in self.organisms.items():\n genes = \"\"\n for gene in data.get(self.GENE_IDS_KEY):\n genes = genes + \"{} \".format(gene[self.GENE_NAME_IDX])\n freqs.write(\"{},{}\\n\".format(org, genes))", "def to_dict(self) -> dict:\n\n return {\n LEFT_PES: self.left_pes,\n RIGHT_PES: self.right_pes,\n LEFT_MANUS: self.left_manus,\n RIGHT_MANUS: self.right_manus\n }", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def segments_to_dict(segments):\n seg_dict = {}\n for chrom in segments.chromosome.unique():\n seg_dict[chrom] = segments[segments['chromosome'] == chrom]\n return(seg_dict)", "def get_patients_dict(table):\n\tf = open(table)\n\tpatients = f.readline().strip().split(\"\\t\")[1:]\n\t\t \n\tpatients_dict = {}\n\tfor i in patients:\n\t\tpatients_dict[i.replace('\"', '')] = {}\n\t\t \n\tfor i in f:\n\t\tl = i.strip().split(\"\\t\")\n\t\tgene = l[0]\n\n\t\tfor j in range(len(l[1:])):\n\t\t\tpatients_dict[patients[j]][gene] = int(l[1:][j])\n\treturn patients_dict", "def readSoft2Dict(softFileName,index=11):\n import gzip\n probe2Entrez = {}\n Flag = False\n if softFileName[-2:] == \"gz\":\n softHandle = gzip.open(softFileName,\"rt\")\n else:\n softHandle = open(softFileName,\"r\")\n softMatrix = softHandle.readlines()\n for line in softMatrix:\n line = line.split(\"\\t\")\n #if len(line[0]) <5 :\n # print(line[0].lower())\n if len(line) <= index:\n continue\n if Flag:\n #print(line)\n if line[0] in probe2Entrez.keys():\n probe2Entrez[line[0]].append(line)\n else:\n probe2Entrez[line[0]] = [line]\n if line[0].lower() == 'id':\n Flag = True\n multipleKeyList = []\n for key in probe2Entrez: #discard probs refer to multiple genes\n if len(probe2Entrez[key]) > 1:\n multipleKeyList.append(key)\n for key in multipleKeyList: #can't del keys of dictionary when iterating it\n del probe2Entrez[key]\n return probe2Entrez", "def getSHSIDDict():\n m = {}\n fin = open(\"SHSDataset/Chromas/msd_keys_mapping.cly\")\n for l in fin.readlines():\n l = l.rstrip()\n f = l.split(\",\")\n m[f[0]] = int(f[1])\n fin.close()\n return m", "def get_orienationDict(self,orienation='zyx'):\n try:\n _str_func = 'rootShape_update'\n log.debug(cgmGEN.logString_start(_str_func))\n \n _d = {}\n _mOrientation = VALID.simpleOrientation('zyx')#cgmValid.simpleOrientation(str(modules.returnSettingsData('jointOrientation')) or 'zyx')\n _d['str'] = _mOrientation.p_string\n _d['mOrientation'] = _mOrientation\n _d['vectorAim'] = _mOrientation.p_aim.p_vector\n _d['vectorUp'] = _mOrientation.p_up.p_vector\n _d['vectorOut'] = _mOrientation.p_out.p_vector\n \n _d['vectorAimNeg'] = _mOrientation.p_aimNegative.p_vector\n _d['vectorUpNeg'] = _mOrientation.p_upNegative.p_vector\n _d['vectorOutNeg'] = _mOrientation.p_outNegative.p_vector\n \n \n _d['stringAim'] = _mOrientation.p_aim.p_string\n _d['stringUp'] = _mOrientation.p_up.p_string\n _d['stringOut'] = _mOrientation.p_out.p_string\n \n _d['stringAimNeg'] = _mOrientation.p_aimNegative.p_string\n _d['stringUpNeg'] = _mOrientation.p_upNegative.p_string\n _d['stringOutNeg'] = _mOrientation.p_outNegative.p_string \n return _d\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def tallying_genes():\n #Creating a tallying Mechanism of genes with multiple sequences in file and\n # an output file for future alignment of sequences \n blast_hit_results = open('blast_hits_report.txt', 'r')\n gene_dict={}\n\n for line in blast_hit_results:\n data = line.split(\"\\t\")\n \n if line.startswith('SeqID'):\n continue\n else:\n #Test to see if organism in dictionary\n verdict = gene_dict.get(data[6])\n \n if str(verdict) == \"None\":\n #creating new entry\n key = data[6]\n seq_info=str(data[0])+\"|\"+str(data[1])\n counter = 1\n #Value[Counts, Trimmed_Length, Blast Length, Blast_Score, Blast_Percent_Identity]\n value=[data[5], counter, [seq_info]]\n gene_dict.update({key:value})\n else:\n #Fills dictionary based on organism name\n seq_info=str(data[0])+\"|\"+str(data[1])\n gene_dict[data[6]][1]+=1\n gene_dict[data[6]][2].append(seq_info)\n blast_hit_results.close()\n return(gene_dict)", "def processFile(filename):\n geneDict = {}\n pat = re.compile(r'gene_index (\\d+)')\n gf = open(filename)\n for line in gf:\n line = line.strip()\n if line.startswith('#'):\n continue\n t = line.split('\\t')\n if len(t) != 9 and len(t) !=8:\n continue\n if t[2] not in ['CDS', 'UTR']:\n continue\n s = t[8].split(';')\n m = re.search(pat, s[1])\n if m is None:\n raise RuntimeError('bad regex, unable to pull out gene index for line %s' % line)\n geneIndex = m.group(1)\n if geneIndex not in geneDict:\n g = Gene()\n geneDict[geneIndex] = g\n else:\n g = geneDict[geneIndex]\n if t[2] == 'CDS':\n g.numCds += 1\n elif t[2] == 'UTR':\n g.numUtr += 1\n if g.left > int(t[3]):\n g.left = int(t[3])\n g.updateLength()\n if g.right < int(t[4]):\n g.right = int(t[4])\n g.updateLength()\n return geneDict", "def instruments():\n instr_dict = {}\n #\n instr_dict['LRISr'] = 2**0\n instr_dict['LRISb'] = 2**1\n instr_dict['Kastb'] = 2**2\n instr_dict['shane_kast_red'] = 2**3\n instr_dict['shane_kast_red_ret'] = 2**3\n instr_dict['DEIMOS'] = 2**4\n instr_dict['NIRSPEC'] = 2**5\n instr_dict['GMOS'] = 2**6\n instr_dict['DBSP'] = 2**7\n #\n return instr_dict", "def readGenes(gtf, tid=False):\n gs = {}\n #get all genes information\n print(\"reading annotaions from %s\" % gtf)\n for line in tqdm(open(gtf).read().split(\"\\n\")[:-1]):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != \"exon\":\n continue\n e = parseGtfLine(line, tid)\n if e.name not in gs:\n g = Gene()\n g.chrom = e.chrom\n g.start = e.start\n g.end = e.end\n g.strand = e.strand\n g.name = e.name\n g.id = e.id\n g.exons = {(e.start, e.end): e}\n gs[g.name] = g\n else:\n #same position exons\n if (e.start, e.end) in gs[e.name].exons:\n continue\n else:\n g = gs[e.name]\n if e.start < g.start:\n g.start = e.start\n if e.end > g.end:\n g.end = e.end\n g.exons[(e.start, e.end)] = e\n #get all genes information\n ngs = {} #key is chromosome\n for k, g in gs.items():\n if g.chrom not in ngs:\n ngs[g.chrom] = {}\n if g.strand == \"+\":\n tss = g.start\n else:\n tss = g.end\n #tss position is key, other information is value, for following search\n if tss not in ngs[g.chrom]:\n ngs[g.chrom][tss] = g\n return ngs", "def gather_term_into_dict(rgi_df, genome_ids, column):\n data_dict = {x: [] for x in genome_ids}\n for row in rgi_df.fillna('').iterrows():\n data_dict[row[1]['Sample']] += row[1][column].split(';')\n return data_dict", "def getGeneCodesToIdDict(conn, tuple_of_gene_codes):\n gene_code_to_id_dict = conn.db_connection.convertGeneCodeToId(tuple_of_gene_codes)\n\n return gene_code_to_id_dict", "def read_fasta_to_dictionary(genome_file):\n filename = genome_file\n dct = {}\n\n id_name = \"\"\n sequence = \"\"\n first_pass = 1\n\n read_fh = open(filename, 'r')\n for i, line in enumerate(read_fh):\n line = line.rstrip()\n if re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(\\s+)(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r',', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n\n elif re.search(r'^>(\\S+)(.*)', line):\n\n match_obj = re.search(r'^>(\\S+)(.*)', line)\n if not first_pass:\n dct[id_name] = sequence\n id_name = match_obj.group(1)\n id_name = re.sub(r'(\\d+)_', \"\", id_name)\n id_name = re.sub(r'.*\\|', \"\", id_name)\n first_pass = 0\n sequence = \"\"\n else:\n sequence += line\n dct[id_name] = sequence\n\n return dct", "def build_df_dict(df, geneDictCanon):\n\n\toutDict = OrderedDict()\n\n\tfor tr in geneDictCanon:\n\t\toutDict[geneDictCanon[tr][0]] = df.iloc[geneDictCanon[tr][1][0]:geneDictCanon[tr][1][1]]\n\n\treturn outDict", "def nomenclatura():\n df = pd.read_csv(\"Data/nomenclatura_1.csv\", encoding = \"latin1\")\n #dict_axis = df.set_index('id').T.to_dict('list')\n dict_axis = dict( [ (i, [a,b]) for i, a,b in zip(df.id, df.latitude, df.longitude) ] )\n\n return dict_axis", "def get_gene_disease_pairs(gene_disease_filename, do_mesh_filename):\n random.seed(100) # reproducibility\n gene_disease_df = pd.read_csv(gene_disease_filename, sep=\"\\t\")\n do_mesh_df = pd.read_csv(do_mesh_filename, sep=\"\\t\")\n\n # create doid-mesh list\n do_mesh_pairs = dict(zip(do_mesh_df.doid_code, \"MESH:\" + do_mesh_df.mesh_id))\n gene_disease_df[\"mesh_id\"] = gene_disease_df[\"doid_id\"].replace(do_mesh_pairs)\n # remove rows that don't have a DOID-MESH id mapping\n # gene_disease_df = gene_disease_df.query(\"~mesh_id.str.contains('DOID:')\")\n gene_disease_df = gene_disease_df[~gene_disease_df.mesh_id.str.contains(\"DOID:\")]\n # get positive pairs\n positive_pairs = gene_disease_df[[\"mesh_id\", \"entrez_gene_id\"]].values.tolist()\n\n # randomize pairings to create negative pairs\n gene_disease_df[\"random_gene\"] = random.sample(\n gene_disease_df[\"entrez_gene_id\"].values.tolist(),\n len(gene_disease_df[\"entrez_gene_id\"].values.tolist()),\n )\n randomized_pairs = gene_disease_df[[\"mesh_id\", \"random_gene\"]].values.tolist()\n negative_pairs = []\n for pair in random.sample(randomized_pairs, len(randomized_pairs)):\n if pair not in positive_pairs:\n negative_pairs.append(pair)\n\n # append class to each pair\n for pair in positive_pairs:\n pair.append(1)\n for pair in negative_pairs:\n pair.append(0)\n gene_disease_pairs = positive_pairs + negative_pairs\n\n return gene_disease_pairs", "def set_chrom_dict():\n chrom_dict = {\n str(i):'chr' + str(i) for i in range(1, MAXCHROM)\n }\n chrom_dict.update({\n 'X':'chr23',\n 'Y':'chr24',\n 'XY':'chr25',\n 'M':'chr26',\n 'MT':'chr26',\n 'chrX':'chr23',\n 'chrY':'chr24',\n 'chrXY':'chr25',\n 'chrM':'chr26',\n 'chrMT':'chr26'\n })\n return chrom_dict, MAXCHROM", "def gen_dict(self):\n stimuli_dict = dict()\n for i, stim in enumerate(STIMULI):\n stimuli_dict[stim.name] = dict(stimulus_path=stim.value)\n rel_df = self.df.iloc[:, i * 2 : i * 2 + 2]\n stimuli_dict[stim.name][\"responses\"] = rel_df\n return stimuli_dict", "def build_transcript_indexes(geneDictCoding, df):\n\n\tTR_index_dict = OrderedDict()\n\n\tfor gene in geneDictCoding:\n\n\t\ttrDF = df.iloc[geneDictCoding[gene][0]:geneDictCoding[gene][1]]\n\t\n\t\ttrPrev = -1\n\t\ttrNamePrev = \"\"\n\t\t\n\t\t### iterate through a slice of the data frame for each gene\n\t\t### search for transcripts over that slice\n\t\t### find transcript slices\n\t\tfor i in range(geneDictCoding[gene][0], geneDictCoding[gene][1]):\n\t\t\tif trDF.loc[i,'feature'] == 'transcript':\n\t\t\t\ttrdict = parse_entry(trDF.loc[i,'transcript_id'])\n\t\t\t\ttrCur = i\n\t\t\t\ttrNameCur = trdict['transcript_id'][0]\n\t\t\t\t\n\t\t\t\tif trPrev != -1: # do not make an entry for the first transcript\n\t\t\t\t\tTR_index_dict[trNamePrev] = [trPrev, trCur]\n\n\t\t\t\ttrPrev = trCur\n\t\t\t\ttrNamePrev = trNameCur\n\t\t\t\n\t\t\t### for the final transcript\n\t\t\tif i == geneDictCoding[gene][1]-1:\n\t\t\t\ttrdict = parse_entry(trDF.loc[i,'transcript_id'])\n\t\t\t\tTR_index_dict[trdict['transcript_id'][0]] = [trCur, i+1]\n\treturn TR_index_dict", "def create_metadata_indices(meta_file):\n logging.info('Loading gene metadata...')\n # genes\n header = {}\n #gene_idx = {}\n idx = collections.defaultdict(dict)\n tax_levs = ['domain','phylum','class','order','family','genus','species']\n with open(meta_file) as inF:\n for i,line in enumerate(inF):\n line = line.rstrip().split('\\t')\n if i == 0:\n header = {x.lower():i for i,x in enumerate(line)}\n continue\n # genes\n cluster_id = line[header['annotation']]\n gene_uuid = line[header['gene_uuid']]\n genomeID = line[header['genomeid']]\n genome_len = line[header['genome_len']]\n tax = tuple([line[header[x]] for x in tax_levs])\n try:\n idx[cluster_id][tax][genomeID]['gene_ids'].append(gene_uuid)\n except KeyError:\n idx[cluster_id][tax] = {genomeID : {'gene_ids' : [gene_uuid],\n 'genome_len' : genome_len}}\n metadata_summary(idx)\n return idx", "def map_detector_to_basis_dict(detector):\n\n root = detector.hierarchy()\n\n d = 0 # only allow one detector for now\n metro = {(d,):basis(panelgroup=root)}\n\n for q, quad in enumerate(root):\n metro[(d,q)] = basis(panelgroup=quad)\n for s, sensor in enumerate(quad):\n metro[(d,q,s)] = basis(panelgroup=sensor)\n for a, asic in enumerate(sensor):\n # at the asic level, need to subtract off the d0 vector so this asic's basis does not include\n # the shift from the asic center to the asic corner. Also need to flip the Y back around\n # to be consistent with how it was originally stored\n d_mat = asic.get_local_d_matrix()\n fast = matrix.col((d_mat[0],d_mat[3],d_mat[6])).normalize()\n slow = matrix.col((d_mat[1],d_mat[4],d_mat[7])).normalize()\n orig = matrix.col((d_mat[2],d_mat[5],d_mat[8]))\n\n v3 = fast.cross(slow).normalize()\n\n r3 = matrix.sqr((fast[0],slow[0],v3[0],\n fast[1],slow[1],v3[1],\n fast[2],slow[2],v3[2]))\n\n transform = matrix.sqr((1, 0, 0,\n 0,-1, 0,\n 0, 0,-1))\n\n pix_size = asic.get_pixel_size()\n img_size = asic.get_image_size()\n\n offset = matrix.col((-pix_size[0]*(img_size[0])/2,\n +pix_size[1]*(img_size[1])/2,0))\n\n metro[(d,q,s,a)] = basis(orientation=(r3*transform).r3_rotation_matrix_as_unit_quaternion(),\n translation=orig-offset)\n\n return metro", "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def read_in_DRMs(drm_file):\n import pandas as pd\n import numpy as np\n\n DRMs = {}\n drmPositions = []\n\n df = pd.read_csv(drm_file, sep='\\t')\n for mi, m in df.iterrows():\n pos = m.GENOMIC_POSITION-1 #put in python numbering\n drmPositions.append(pos)\n\n if pos in DRMs:\n DRMs[pos]['base'].append(m.ALT_BASE)\n DRMs[pos]['AA'].append(m.SUBSTITUTION)\n else:\n DRMs[pos] = {}\n DRMs[pos]['base'] = [m.ALT_BASE]\n DRMs[pos]['drug'] = m.DRUG\n DRMs[pos]['AA'] = [m.SUBSTITUTION]\n DRMs[pos]['gene'] = m.GENE\n\n drmPositions = np.array(drmPositions)\n drmPositions = np.unique(drmPositions)\n drmPositions = np.sort(drmPositions)\n\n DRM_info = {'DRMs': DRMs,\n 'drmPositions': drmPositions}\n\n return DRM_info", "def get_all_sequences(self):\n seqs_dict = {}\n\n all_seqs = Sequences.objects.all().values('code_id',\n 'gene_code',\n 'sequences').order_by('code_id')\n for seq in all_seqs:\n code = seq['code_id']\n gene_code = seq['gene_code']\n\n if code in self.voucher_codes and gene_code in self.gene_codes:\n if code not in seqs_dict:\n seqs_dict[code] = {gene_code: ''}\n seqs_dict[code][gene_code] = seq\n return seqs_dict", "def count_nucleotides(strand: str) -> dict:\n return dict(Counter(strand))", "def produce_rnaToProtein_refseqID_dict (inPath, outPath):\n idMap = {}\n with open(inPath, 'r') as f:\n next(f)\n for line in f:\n tax_id, gene_id, symbol, rsg, lrg, rna, t, protein, p, category = line.strip().split('\\t')\n if (len(rna) > 0) and (len(protein) > 0):\n idMap[rna] = protein\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)", "def build_mat_scikit_strandOriented(sralist, scikit_data):\n\n scikit_mat = {}\n seq_codons = {}\n seq_aa = {}\n\n for geneID in scikit_data[sralist[0]][0].keys():\n for ix, dataset in enumerate(sralist):\n\n if geneID in scikit_data[dataset][0].keys():\n current_profile = scikit_data[dataset][0].get(geneID, np.nan)\n current_ribo = current_profile[0]\n current_ribo = current_ribo[8:-8]\n N = len(sralist)\n M = len(current_ribo)\n print(geneID, M)\n\n if ix == 0:\n current_matrix = np.zeros((N,M)) * np.nan\n\n current_seq_codons = current_profile[1]\n current_seq_codons = current_seq_codons[8:-8]\n\n current_seq_aa = current_profile[2]\n current_seq_aa = current_seq_aa[8:-8]\n\n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n seq_codons[geneID] = current_seq_codons\n seq_aa[geneID] = current_seq_aa\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n seq_codons[geneID] = current_seq_codons[::-1]\n seq_aa[geneID] = current_seq_aa[::-1]\n \n \n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n current_matrix[ix,:] = current_ribo\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n current_matrix[ix,:] = current_ribo[::-1]\n \n if np.sum(current_matrix) > 0: \n scikit_mat[geneID] = current_matrix\n\n# scikit_df = pd.DataFrame(values_list, columns=columns_list)\n\n return scikit_mat, seq_codons, seq_aa", "def get_dict(self):\n subt_map = {}\n for seqkey,seqs in self._seqdict.iteritems():\n for seq,seqentry in seqs.iteritems():\n subt_map[seqentry['name']] = {\n 'subtype': seqentry['subtype'],\n 'accessions': seqentry['accessions'],\n 'loci': seqentry['loci']\n }\n\n return subt_map", "def mel_gene_set(dict): # this uses the flanking genes, specifically\n\tmel_gene_set = set()\n\tfor k, v in dict.iteritems():\n\t\t#v[0] is up, v[1] is down\n\t\t#print \"this is v:\", v\n\t\tfor mg in v[0]:\n\t\t\tmel_gene_set.add(mg)\n\t\tfor mg in v[1]:\n\t\t\tmel_gene_set.add(mg)\n\treturn mel_gene_set", "def toDict(self):\n \n d = {}\n d['sp'] = self.species\n d['gns'] = self.genera\n d['fam'] = self.families\n d['ord'] = self.orders\n d['cls'] = self.classes\n d['phy'] = self.phyla\n d['kng'] = self.kingdoms\n \n return d", "def antenna_loc_todict():\n\n r = {}\n f = open(\"data/location_withgps.txt\",\"rb\")\n for line in f:\n a = line.split(\"\\t\")\n r[a[0]] = (a[3],a[4])\n dill.dump(r,open(os.path.join(output_path_files,\"mobility\",\"antenna_loc.dill\"),\"wb\"))\n return r", "def anno_gene_stats(anno_gene, loc_file, gene_file, isConvert):\r\n LocationNum = collections.Counter()\r\n LocationGene = collections.defaultdict(list)\r\n\r\n\r\n GeneCatSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n CatGeneSample = collections.defaultdict(lambda: collections.defaultdict(list))\r\n\r\n allLocations = set()\r\n anno_h = open(anno_gene, \"r\")\r\n for line in anno_h:\r\n lines = line.strip().split(\"\\t\")\r\n sample, location, number, gene = lines[:4]\r\n number = int(number)\r\n\r\n ### whether convert the category to \"Exon\" or \"Intron\"\r\n if isConvert == \"True\":\r\n if location == \"Intron\":\r\n newLoc = \"Intron\"\r\n else:\r\n newLoc = \"Exon\"\r\n elif isConvert == \"False\":\r\n newLoc = location\r\n else:\r\n print(\"Please check whether convert the original category to 'Intron' or 'Exon' based on True of False.\")\r\n sys.exit(1)\r\n\r\n allLocations.add(newLoc)\r\n ### get the dict of gene -> location -> sample\r\n genes = gene.split(\",\")\r\n for g in genes:\r\n GeneCatSample[g][newLoc].append(sample)\r\n\r\n ### get the location -> gene -> sample\r\n CatGeneSample[newLoc][g].append(sample)\r\n anno_h.close()\r\n\r\n\r\n ## output gene and number in samples\r\n ### sort all locations\r\n sortedAllLocation = sorted(list(allLocations))\r\n\r\n gene_h = open(gene_file, \"w\")\r\n\r\n headerSample = [l + \"_samples\" for l in sortedAllLocation]\r\n gene_h.write(\"Gene\\tTotal\\t%s\\t%s\\n\" % (\"\\t\".join(sortedAllLocation), \"\\t\".join(headerSample)))\r\n\r\n GeneRecord = {}\r\n GeneNumber = {}\r\n\r\n allGenes = sorted(list(GeneCatSample.keys()))\r\n for ge in allGenes:\r\n ### get the number and samples for each location of each gene\r\n GeneNum = []\r\n GeneSample = []\r\n\r\n for loc in sortedAllLocation:\r\n if loc in GeneCatSample[ge]:\r\n samples = GeneCatSample[ge][loc]\r\n ##############################\r\n ####### unique for samples\r\n samples = sorted(list(set(samples)))\r\n sampleNum = len(samples)\r\n else:\r\n sampleNum = 0\r\n samples = [\"-\"]\r\n\r\n GeneNum.append(sampleNum)\r\n GeneSample.append(samples)\r\n\r\n GeneNumSum = sum(GeneNum)\r\n CatNumOut = \"\\t\".join([str(g) for g in GeneNum])\r\n CatSampleOut = \"\\t\".join([\",\".join(s) for s in GeneSample])\r\n\r\n record = \"%s\\t%d\\t%s\\t%s\\t\" % (ge, GeneNumSum, CatNumOut, CatSampleOut)\r\n GeneNumber[ge] = GeneNumSum\r\n GeneRecord[ge] = record\r\n \r\n ### output\r\n GeneNumSorted = sort_dict_value(GeneNumber)\r\n for g, n in GeneNumSorted:\r\n r = GeneRecord[g]\r\n gene_h.write(\"%s\\n\" % r)\r\n\r\n gene_h.close() \r\n\r\n\r\n ### location and genes\r\n loc_h = open(loc_file, \"w\")\r\n loc_h.write(\"Location\\tGeneNumber\\tGenes\\tSampleNumber\\tSamples\\n\")\r\n for loc in sortedAllLocation:\r\n geneSample = CatGeneSample[loc]\r\n genes = sorted(list(geneSample.keys()))\r\n geneNum = len(genes)\r\n samNum = 0\r\n samList = []\r\n for ge in geneSample:\r\n sam = geneSample[ge]\r\n samList.append(sam)\r\n samNum += len(sam)\r\n samOut = \";\".join([\",\".join(s) for s in samList])\r\n loc_h.write(\"%s\\t%d\\t%s\\t%d\\t%s\\n\" % (loc, geneNum, \",\".join(genes), samNum, samOut))\r\n loc_h.close()", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def serotype_escherichia(metadata, analysistype):\n for sample in metadata:\n # Initialise negative results to be overwritten when necessary\n sample[analysistype].best_o_pid = '-'\n sample[analysistype].o_genes = ['-']\n sample[analysistype].o_set = ['-']\n sample[analysistype].best_h_pid = '-'\n sample[analysistype].h_genes = ['-']\n sample[analysistype].h_set = ['-']\n if sample.general.bestassemblyfile != 'NA':\n if sample.general.closestrefseqgenus in ['Escherichia', 'Shigella']:\n o = dict()\n h = dict()\n for result, percentid in sample[analysistype].blastresults.items():\n if 'O' in result.split('_')[-1]:\n o.update({result: float(percentid)})\n if 'H' in result.split('_')[-1]:\n h.update({result: float(percentid)})\n # O\n try:\n sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_o_pid = str(sorted_o[0][1])\n\n sample[analysistype].o_genes = [gene for gene, pid in o.items()\n if str(pid) == sample[analysistype].best_o_pid]\n sample[analysistype].o_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].o_genes))\n except (KeyError, IndexError):\n pass\n # H\n try:\n sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_h_pid = str(sorted_h[0][1])\n sample[analysistype].h_genes = [gene for gene, pid in h.items()\n if str(pid) == sample[analysistype].best_h_pid]\n sample[analysistype].h_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].h_genes))\n except (KeyError, IndexError):\n pass\n return metadata", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def get_gene_symbols(self):\n # TODO: could be made much nicer with join in DB via SQL Alchemy\n bins = binning.containing_bins(self.start - 1, self.end)\n gene_intervals = list(\n GeneInterval.objects.filter(\n database=\"ensembl\",\n release=self.release,\n chromosome=self.chromosome,\n bin__in=bins,\n start__lte=self.end,\n end__gte=self.start,\n )\n )\n gene_ids = [itv.gene_id for itv in gene_intervals]\n symbols1 = {\n o.gene_symbol for o in EnsemblToGeneSymbol.objects.filter(ensembl_gene_id__in=gene_ids)\n }\n symbols2 = {o.symbol for o in Hgnc.objects.filter(ensembl_gene_id__in=gene_ids)}\n return sorted(symbols1 | symbols2)", "def get_gene_symbols(self):\n # TODO: could be made much nicer with join in DB via SQL Alchemy\n bins = binning.containing_bins(self.start - 1, self.end)\n gene_intervals = list(\n GeneInterval.objects.filter(\n database=\"ensembl\",\n release=self.release,\n chromosome=self.chromosome,\n bin__in=bins,\n start__lte=self.end,\n end__gte=self.start,\n )\n )\n gene_ids = [itv.gene_id for itv in gene_intervals]\n symbols1 = {\n o.gene_symbol for o in EnsemblToGeneSymbol.objects.filter(ensembl_gene_id__in=gene_ids)\n }\n symbols2 = {o.symbol for o in Hgnc.objects.filter(ensembl_gene_id__in=gene_ids)}\n return sorted(symbols1 | symbols2)", "def Extract_gene_type(gtf_file):\n gene_type_dic = {}\n for i in range(0,len(gtf_file)):\n if '##' not in gtf_file[i]:\n row = gtf_file[i].strip().split('\\t')\n if row[2] == 'transcript':\n trans_id = row[8].split('transcript_id \"')[1].split('\";')[0]\n #print trans_id\n gene_type_dic[trans_id] = row[8].split('transcript_type \"')[1].split('\";')[0]\n return gene_type_dic", "def get_gene_sets(table, dominant):\n \n known = table[table[\"hgnc\"].isin(dominant)]\n gwide = set(known[\"hgnc\"][known[\"genomewide\"]])\n sugg = set(known[\"hgnc\"][known[\"suggestive\"]])\n \n gene_sets = {\"genomewide\": gwide, \"suggestive\": sugg}\n \n return gene_sets", "def covariate_to_index(self):\n covariate_df = self.dismod_file.covariate\n return dict(covariate_df[[\"covariate_name\", \"covariate_id\"]].to_records(index=False))", "def get_species_list() -> list:\n c2h2_xyz = {'symbols': ('C', 'C', 'H', 'H'), 'isotopes': (12, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.203142), (0.0, -0.0, 2.265747), (-0.0, -0.0, -1.062605))}\n ch4_xyz = {'symbols': ('C', 'H', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.08744517), (1.02525314, 0.0, -0.36248173),\n (-0.51262658, 0.88789525, -0.36248173), (-0.51262658, -0.88789525, -0.36248173))}\n co2_xyz = {'symbols': ('C', 'O', 'O'), 'isotopes': (12, 16, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1594846), (0.0, 0.0, -1.1594846))}\n co_xyz = {'symbols': ('O', 'C'), 'isotopes': (16, 12), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12960815))}\n f2_xyz = {'symbols': ('F', 'F'), 'isotopes': (19, 19), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.3952041))}\n ch2o_xyz = {'symbols': ('O', 'C', 'H', 'H'), 'isotopes': (16, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.674622), (0.0, 0.0, -0.529707),\n (0.0, 0.935488, -1.109367), (0.0, -0.935488, -1.109367))}\n h2o_xyz = {'symbols': ('O', 'H', 'H'), 'isotopes': (16, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.95691441), (0.92636305, 0.0, -0.23986808))}\n h2_xyz = {'symbols': ('H', 'H'), 'isotopes': (1, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.74187646))}\n hcn_xyz = {'symbols': ('C', 'N', 'H'), 'isotopes': (12, 14, 1),\n 'coords': ((0.0, 0.0, -0.500365), (0.0, 0.0, 0.65264), (0.0, 0.0, -1.566291))}\n hf_xyz = {'symbols': ('F', 'H'), 'isotopes': (19, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.91538107))}\n n2o_xyz = {'symbols': ('N', 'N', 'O'), 'isotopes': (14, 14, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12056262), (0.0, 0.0, 2.30761092))}\n n2_xyz = {'symbols': ('N', 'N'), 'isotopes': (14, 14), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.09710935))}\n nh3_xyz = {'symbols': ('N', 'H', 'H', 'H'), 'isotopes': (14, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.11289), (0.0, 0.938024, -0.263409),\n (0.812353, -0.469012, -0.263409), (-0.812353, -0.469012, -0.263409))}\n oh_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.967))}\n cl2_xyz = {'symbols': ('Cl', 'Cl'), 'isotopes': (35, 35), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1))}\n\n c2h2 = ARCSpecies(label='C2H2', smiles='C#C', multiplicity=1, charge=0)\n c2h2.initial_xyz = c2h2_xyz\n\n ch4 = ARCSpecies(label='CH4', smiles='C', multiplicity=1, charge=0)\n ch4.initial_xyz = ch4_xyz\n\n co2 = ARCSpecies(label='CO2', smiles='O=C=O', multiplicity=1, charge=0)\n co2.initial_xyz = co2_xyz\n\n co = ARCSpecies(label='CO', smiles='[C-]#[O+]', multiplicity=1, charge=0)\n co.initial_xyz = co_xyz\n\n f2 = ARCSpecies(label='F2', smiles='[F][F]', multiplicity=1, charge=0)\n f2.initial_xyz = f2_xyz\n\n ch2o = ARCSpecies(label='CH2O', smiles='C=O', multiplicity=1, charge=0)\n ch2o.initial_xyz = ch2o_xyz\n\n h2o = ARCSpecies(label='H2O', smiles='O', multiplicity=1, charge=0)\n h2o.initial_xyz = h2o_xyz\n\n h2 = ARCSpecies(label='H2', smiles='[H][H]', multiplicity=1, charge=0)\n h2.initial_xyz = h2_xyz\n\n hcn = ARCSpecies(label='HCN', smiles='C#N', multiplicity=1, charge=0)\n hcn.initial_xyz = hcn_xyz\n\n hf = ARCSpecies(label='HF', smiles='F', multiplicity=1, charge=0)\n hf.initial_xyz = hf_xyz\n\n n2o = ARCSpecies(label='N2O', smiles='[N-]=[N+]=O', multiplicity=1, charge=0)\n n2o.initial_xyz = n2o_xyz\n\n n2 = ARCSpecies(label='N2', smiles='N#N', multiplicity=1, charge=0)\n n2.initial_xyz = n2_xyz\n\n nh3 = ARCSpecies(label='NH3', smiles='N', multiplicity=1, charge=0)\n nh3.initial_xyz = nh3_xyz\n\n oh = ARCSpecies(label='OH', smiles='[OH]', multiplicity=2, charge=0)\n oh.initial_xyz = oh_xyz\n\n cl2 = ARCSpecies(label='Cl2', smiles='[Cl][Cl]', multiplicity=1, charge=0)\n cl2.initial_xyz = cl2_xyz\n\n species_list = [c2h2, ch4, co2, co, f2, ch2o, h2o, h2, hcn, hf, n2o, n2, nh3, oh, cl2]\n\n return species_list", "def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES):\n ignore += params.ANTITARGET_ALIASES\n # Tabulate the genes in the selected region\n genes = collections.OrderedDict()\n for row in probes.in_range(chrom, start, end):\n name = str(row.gene)\n if name in genes:\n genes[name][1] = row.end\n elif name not in ignore:\n genes[name] = [row.start, row.end]\n # Reorganize the data structure\n return {\n chrom: [(gstart, gend, name) for name, (gstart, gend) in list(genes.items())]\n }", "def build_dict(infile):\n\n coords = {}\n sizes = {}\n\n for line in infile:\n fields = line.split()\n ref_st, ref_end, qry_st, qry_end = map(int, fields[0:4])\n qry_chr, qry_size = fields[14], int(fields[8])\n if qry_chr not in coords:\n coords[qry_chr] = {0:[], 1:[]} # 0=ref; 1=qry\n sizes[qry_chr] = qry_size\n coords[qry_chr][0].append([ref_st, ref_end])\n coords[qry_chr][1].append(sorted([qry_st, qry_end]))\n \n return coords, sizes", "def build_mim2entrez_dict(mim2gene_file):\n mim2entrez_dict = {}\n\n mim2gene_fh = open(mim2gene_file, 'r')\n\n for line in mim2gene_fh: # Loop based on loop from Dima @ Princeton\n toks = line.split('\\t')\n\n try: # This is to catch lines that are not in the format we want.\n mim = toks[0]\n mim_type = toks[1]\n entrez_gid = toks[2]\n except IndexError:\n continue\n\n if mim_type in TYPE_FILTER:\n if entrez_gid == '':\n logger.warning(\"Gene Entrez ID was blank for MIM ID '%s' in %s\"\n \" mim-to-gene mapping file\", mim, mim2gene_file)\n continue\n if mim in mim2entrez_dict:\n logger.warning(\"MIM already exists in mim2entrez_dict: %s\", mim)\n mim2entrez_dict[mim] = entrez_gid\n return mim2entrez_dict", "def gene_symbol_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene Symbol\"]\n resD[keyI] = valueI\n\n return resD", "def smp_dict():\n out = base_dict()\n out['mro']['current'] = ['Sample']\n out['name']['current'] = 'Sample'\n ao(out, 'idx', 'Integer', attr=['Hidden'])\n ao(out, 'ii', 'Integer', attr=['Hidden'])\n ao(out, 'initialDimension', 'Float', 0., name='Initial Dimension')\n return out", "def asdict(self):\n return _osgAnimation.mapVertexInfluence_asdict(self)", "def _get_GPS_data(self, gpsdata: dict) -> dict:\r\n ans = {}\r\n\r\n # Replace all exif tags with english titles\r\n for i in gpsdata:\r\n ans[ExifTags.GPSTAGS[i]] = gpsdata[i]\r\n \r\n return ans", "def network_nodes_species(self):\n G, mapping = self.network()\n waste, resources, intmed_products = self.amenities()\n\n node_dict = {}\n\n for nd in G:\n # print(nd)\n if isinstance(nd, int):\n node_dict[nd] = \"r\"\n elif nd in self.commodity:\n node_dict[nd] = \"Xc\"\n elif nd in waste:\n node_dict[nd] = \"w\"\n elif nd in resources:\n node_dict[nd] = \"Xr\"\n elif nd in intmed_products:\n node_dict[nd] = \"InPr\"\n\n return node_dict", "def get_identifier_map(self) -> None:\n id_mapping_dict = self._get_identifiers_from_kbs()\n id_mapping_dict = self._add_uniprot_identifiers(id_mapping_dict)\n id_mapping_dict = self._add_chebi_identifiers(id_mapping_dict)\n id_mapping_dict = self._add_bridge_db_identifiers(id_mapping_dict)\n\n print(\"merging similar...\")\n id_mapping_dict = pathway_utils.merge_similar(id_mapping_dict)\n\n print(\"generating local identifiers...\")\n self.forward_map, self.backward_map = self._generate_local_identifiers(id_mapping_dict)\n self.save_id_dict()", "def make_homologues_mirnas(phylogenetic_tree, mirna_seqs):\n species = [leaf.taxon.label for leaf in phylogenetic_tree.leaf_iter()]\n mirhomologues = pd.DataFrame({sp: {mirid: mirna_seqs[mirid][:21]\n for mirid in mirna_seqs.keys()}\n for sp in species}).transpose()\n return mirhomologues", "def metadata(self):\n return {\n \"wildtype\" : self.wildtype,\n \"genotypes\" : self.genotypes,\n \"phenotypes\" : self.Raw.phenotypes,\n \"stdeviations\" : self.stdeviations,\n \"n_replicates\" : self.n_replicates,\n \"mutations\" : self.mutations,\n \"log_transform\" : self.log_transform,\n \"order\" : self.order,\n \"epistasis\" : {\n \"keys\" : self.epistasis.keys,\n \"values\" : self.epistasis.values,\n }\n }", "def caricaReadsEsIn(fileInput):\n\n\tidx_gene \t= 4 \n\tidx_chrom \t= 0\n\tidx_start\t= 1\n\tidx_end\t\t= 2\n\tidx_reads\t= 6\n\n\tdictReadsEsIn = {}\n\n\tlines = [x.strip('\\n').split('\\t') for x in open(fileInput)]\n\t\n\tfor riga in lines:\n\t\tgeneName \t= riga[idx_gene]\n\t\tchrom\t\t= riga[idx_chrom]\n\t\tstart\t\t= riga[idx_start]\n\t\tend\t\t\t= riga[idx_end]\n\t\treads\t\t= riga[idx_reads]\n\n\t\tif not geneName in dictReadsEsIn:\n\t\t\tdictReadsEsIn[geneName] = {}\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\t# Il primo campo indica se il cromosoma ha almeno..\n\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..una regione con reads\n\t\telif chrom not in dictReadsEsIn[geneName]:\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\n\t\telse:\n\t\t\tdictReadsEsIn[geneName][chrom][idx_start].append(start)\n\t\t\tdictReadsEsIn[geneName][chrom][idx_end].append(end)\n\t\t\tdictReadsEsIn[geneName][chrom][3].append(reads)\n\n\t\ti = len(dictReadsEsIn[geneName][chrom][3])\n\t\tif int(dictReadsEsIn[geneName][chrom][3][i-1]) != 0:\n\t\t\tdictReadsEsIn[geneName][chrom][0] = True\t\t\t\t\t\t\t# Indica se c'e' almeno una regione esonica/intronica\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# che mappa delle reads\n\n\t# Si eliminano i cromosomi che non hanno mappato reads ne' su introni\n\t# ne' su esoni (primo value del dizionario = FALSE)\n\t#\n\tgeneKeys = dictReadsEsIn.keys()\n\tfor geneName in geneKeys:\n\t\tchromKeys = dictReadsEsIn[geneName].keys()\n\t\tfor chrom in chromKeys:\n\t\t\tif not dictReadsEsIn[geneName][chrom][0]:\n\t\t\t\tdel dictReadsEsIn[geneName][chrom]\n\t\t\t\t# Si eliminano i geni che non hanno piu' cromosomi\n\t\t\t\t#\n\t\t\t\tif not dictReadsEsIn[geneName]:\n\t\t\t\t\tdel dictReadsEsIn[geneName]\n\t\t\t\t\tprint 'Il gene %s non presenta cromosomi con reads mappanti.\\n' % geneName,\n\n\treturn dictReadsEsIn", "def to_dictionary(self):\n return {'pubkey': self.pubkey.to_dictionary(), 'T': self.T,\n 'C': self.C.to_dictionary(), 'D': self.D.to_dictionary(), 'sigma': self.sigma.to_dictionary()}", "def extract_gene_args(self, line):\n result = {'seq_name': line[0], 'source': line[1], \\\n 'indices': [int(line[3]), int(line[4])], 'strand': line[6]}\n attribs = self.parse_attributes(line[8])\n\n if not attribs:\n return None\n\n result.update(attribs)\n return result", "def dict() -> Dict[str, Pin]:", "def generate_bed_dict(line, bed_header):\n out_dict = dict((key, value) for key, value in izip(bed_header, line))\n return(out_dict)", "def get_chromosome_object(agp):\n\n chr = {}\n\n agp = agp.split('\\n')\n\n for i, line in enumerate(agp):\n if len(line) == 0 or line[0] == '#':\n continue\n tabs = line.split(\"\\t\")\n acc = tabs[0]\n start = int(tabs[1])\n stop = int(tabs[2])\n comp_type = tabs[6]\n if 'acc' not in chr:\n chr['accession'] = acc\n chr['type'] = 'nuclear'\n if comp_type == 'centromere':\n chr['centromere'] = {\n 'start': start,\n 'length': stop - start\n }\n if i == len(agp) - 2:\n chr['length'] = stop\n return chr", "def Genes_Per_Genome(Input, Gene_Separator,Contig_Separator):\n Number_Genes = {}\n Gene_Length = {}\n with open(Input) as FastAInput:\n for line in FastAInput:\n if \">\" in line:\n Gene = line.split()[0].replace(\">\",\"\")\n Gene_Length[Gene] = 0\n Genome = Gene.split(Contig_Separator)\n Genome = Contig_Separator.join(Genome[:-1])\n Number_Genes[Genome] = Number_Genes.get(Genome, 0) + 1\n else:\n line = line.strip()\n Gene_Length[Gene] += len(line)\n return (Number_Genes, Gene_Length)", "def refseqTSS():\n refSeqs=fetchRefSeq()\n output={}\n for chr in genomelib.chr_names:\n output[chr]=[]\n for strand in ['+','-']:\n for k in refSeqs[chr][strand]:\n v=refSeqs[chr][strand][k]\n if v['strand'] == \"+\":\n tss=v['txStart']\n elif v['strand'] == \"-\":\n tss=v['txEnd']\n tssInfo=(v['name'],v['chrom'],int(tss),v['strand'])\n output[chr].append(tssInfo)\n output[chr].sort(lambda x,y:cmp(x[2],y[2]))\n return output" ]
[ "0.6775984", "0.63313013", "0.6185268", "0.6166058", "0.61002773", "0.5993038", "0.5952857", "0.5945249", "0.5915649", "0.59024686", "0.5877856", "0.5875492", "0.5870065", "0.5817717", "0.57851946", "0.57501155", "0.57454574", "0.5728968", "0.57258415", "0.56919414", "0.5636224", "0.5629619", "0.5620638", "0.5593645", "0.5579922", "0.5567293", "0.5565742", "0.5550464", "0.5533519", "0.55269367", "0.5522447", "0.5515292", "0.55075634", "0.5506308", "0.5492099", "0.54906213", "0.5479611", "0.5474655", "0.5468882", "0.5461308", "0.5443919", "0.5433991", "0.5426283", "0.54206896", "0.5416218", "0.5414628", "0.5408926", "0.5408014", "0.5407047", "0.5341424", "0.5324042", "0.53217983", "0.53206414", "0.5317002", "0.5311696", "0.53023165", "0.5301727", "0.52982706", "0.52929693", "0.5287297", "0.52868915", "0.5284912", "0.527357", "0.5272986", "0.5258881", "0.52478456", "0.52427447", "0.52337176", "0.52248275", "0.51973265", "0.5190615", "0.5187654", "0.51818776", "0.51725554", "0.5167313", "0.5157603", "0.5157603", "0.51470643", "0.5143163", "0.513783", "0.5137368", "0.5134111", "0.51299745", "0.5126763", "0.51225084", "0.51127446", "0.51088244", "0.51067996", "0.50946504", "0.50849766", "0.5079652", "0.5077797", "0.50720096", "0.5070462", "0.5066514", "0.50649095", "0.5059925", "0.5051603", "0.50500375", "0.5047195" ]
0.6580928
1
Import results from scikit pipeline for all datasets contained in datsets_names.
def import_scikit_data(sralist): scikit_data_dict = {} for dataset in sralist: with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data: scikit_data_dict[dataset] = [json.load(scikit_data)] return scikit_data_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_processed_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n path = os.path.join('downloads', name)\n path_processed = os.path.join(path, 'processed')\n\n if name == 'iris':\n return pd.read_csv(os.path.join(path_processed, 'iris.csv'))\n\n elif name == 'wine':\n return pd.read_csv(os.path.join(path_processed, 'wine.csv'))\n\n elif name == 'titanic':\n return pd.read_csv(os.path.join(path_processed, 'titanic.csv'))\n\n elif name == 'lanl':\n with open(os.path.join(path_processed, 'train_data.pkl'), 'rb') as f:\n x = pkl.load(f)\n with open(os.path.join(path_processed, 'train_targets.pkl'), 'rb') as f:\n y = pkl.load(f)\n return x, y\n\n elif name == 'MNIST' or name == 'FashionMNIST':\n training = torch.load(os.path.join(path_processed, 'training.pt'))\n test = torch.load(os.path.join(path_processed, 'test.pt'))\n return training, test", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def import_func(path_):\n\n datasets_dic = {}\n\n for dataset_path in path_:\n # Parse labels from filenames\n dataset_label = os.path.split(dataset_path)[1].split('.')[0]\n\n # Read from csv to Pandas\n dataset = pd.read_csv(dataset_path)\n\n # insert dataset label to the dataframes\n dataset.insert(0, 'trial', dataset_label)\n dataset.insert(0, 'maneuver', dataset_label.split('_')[0])\n\n # Datasets are stored in a dictionary\n datasets_dic.update({dataset_label: dataset})\n\n # list of imported maneuvers\n dataset_names = list(datasets_dic.keys())\n\n return datasets_dic, dataset_names", "def load_datasets(self):\n if self.processed_extension == '.npz':\n logger.info(f'Loading sets from npz:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = sparse.load_npz(self.train_path)\n\n logger.info(f'val: {self.val_path}')\n self.val_data = sparse.load_npz(self.val_path)\n\n logger.info(f'test: {self.test_path}')\n self.test_data = sparse.load_npz(self.test_path)\n \n # Split x and y\n self.train_data = [sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,-1])]\n \n self.val_data = [sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,-1])]\n \n self.test_data = [sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,-1])]\n \n elif self.processed_extension == '.csv':\n logger.info(f'Loading sets from csv:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = pd.read_csv(self.train_path)\n train_cols = self.train_data.columns\n self.train_data = [self.train_data[train_cols.difference(['TARGET'])],\n self.train_data['TARGET']]\n \n logger.info(f'val: {self.val_path}')\n self.val_data = pd.read_csv(self.val_path)\n self.val_data = [self.val_data[train_cols.difference(['TARGET'])],\n self.val_data['TARGET']]\n \n logger.info(f'test: {self.test_path}')\n self.test_data = pd.read_csv(self.test_path)\n self.test_data = [self.test_data[train_cols.difference(['TARGET'])],\n self.test_data['TARGET']]\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n self.n_train = self.train_data[0].shape[0]\n self.n_val = self.val_data[0].shape[0]\n self.n_test = self.test_data[0].shape[0]\n self.input_size = self.train_data[0].shape[1]\n self.n_examples = self.n_train + self.n_val + self.n_test\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))", "def datasets(self):\n pass", "def getDatasets(self, dirname, dataset_list):\r\n \r\n files = self.loadDirectory(dirname)\r\n \r\n result = []\r\n for dataset_name in dataset_list:\r\n arr = np.concatenate([f[dataset_name] for f in files])\r\n result.append(arr)\r\n \r\n return result", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load_datasets():\n idx, data_paths, data_names, desc_paths, descrips, sql_paths, \\\n sql_names, loaded, table_size, \\\n loaded_names = mgr.build_datasets_table()\n return render_template('load_datasets.html',\n zip=zip(idx, data_paths, data_names, desc_paths,\n descrips, sql_paths, sql_names, loaded,\n table_size),\n data_names=loaded_names)", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def process_datasets(size, counts):\n global FUNCTION_LOGS\n FUNCTION_LOGS.append((f\"-----> Processing size {size}\", counts))\n # process small data sets\n counts = import_data('data',\n f'products_{size}.csv',\n f'customers_{size}.csv',\n f'rentals_{size}.csv')\n logging.info('Imported %d products, %d customers, and %d rentals', *counts)\n\n show_available_products()\n show_rentals('prd0000')\n\n drop_data()", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "def Get_datasets(**kwargs):\n from .utils import option_printer, get_conn, get_param_dict, get_logger_instance\n from .cohort_tables import make_target_comp_tables\n from .table2rawseq import table_to_rawseq\n from .rawseq2multihot import rawseq_to_multihot\n from .multihot2datasets import multihot_to_datasets\n import os, logging\n from importlib import reload\n \n ## get params\n param_dict = get_param_dict(kwargs['DS_PARAMS_FILE_NAME'], kwargs['CONFIG_FOLDER_PATH'])\n param_dict.update(kwargs)\n if not os.path.exists(param_dict['DATA_FOLDER_PATH']): os.makedirs(param_dict['DATA_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['CDM_DB']\n \n param_dict['DUMPING_PATH'] = os.path.join(param_dict['RESULT_FOLDER_PATH'], \n param_dict['PROJECT_NAME'], \n param_dict['CDM_DB_NAME'])\n if not os.path.exists(param_dict['DUMPING_PATH']): \n os.makedirs(param_dict['DUMPING_PATH'])\n \n if param_dict['PIPELINE_START_LEVEL']<3:\n param_dict['DB_CONN'], CDM_DB_NAME, RESULT_DB_NAME = get_conn(param_dict['DB_CONN_FILENAME'], \n param_dict['CONFIG_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = CDM_DB_NAME\n param_dict['RESULT_DB_NAME'] = RESULT_DB_NAME\n else:\n param_dict['RESULT_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['RESULT_DB']\n \n ## logger\n logging.shutdown()\n reload(logging)\n main_logger = get_logger_instance(logger_name='ds_pipeline', \n DUMPING_PATH=param_dict['DUMPING_PATH'], \n parent_name=False,\n stream=True)\n \n ## print params\n main_logger.info(\"\\n (params) \\n\")\n try: option_printer(main_logger, param_dict['DB_CONN'], **param_dict)\n except: pass\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [1] Make_target_comp_tables\n if param_dict['PIPELINE_START_LEVEL']<=1:\n main_logger.info(\"\\n[Level 1] Make_TARGET_COMP_tables\\n\")\n make_target_comp_tables(**param_dict)\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [2] Table to rawSeq\n if param_dict['PIPELINE_START_LEVEL']<=2:\n main_logger.info(\"\\n[Level 2] Table to rawSeq\\n\")\n table_to_rawseq(param_dict['DUMPING_PATH'], \n param_dict['DB_CONN'], param_dict['CDM_DB_NAME'], \n param_dict['DATA_FOLDER_PATH'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [3] rawSeq to multihot\n if param_dict['PIPELINE_START_LEVEL']<=3:\n main_logger.info(\"\\n[Level 3] Convert to multihot\\n\")\n rawseq_to_multihot(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['MAX_TIME_STEP'], \n param_dict['DX_ONLY'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [4] Multihot to Dataset\n if param_dict['PIPELINE_START_LEVEL']<=4:\n main_logger.info(\"\\n[Level 4] Multihot to Dataset\\n\")\n datasets = multihot_to_datasets(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['TR_RATIO'])\n \n #add info\n if param_dict['PIPELINE_START_LEVEL']<3: \n datasets.info['DB_CONN'] = param_dict['DB_CONN']\n datasets.info['CONFIG_FOLDER_PATH'] = param_dict['CONFIG_FOLDER_PATH']\n datasets.info['DATA_FOLDER_PATH'] = param_dict['DATA_FOLDER_PATH']\n datasets.info['RESULT_FOLDER_PATH'] = param_dict['RESULT_FOLDER_PATH']\n datasets.info['DB_CONN_FILENAME'] = param_dict['DB_CONN_FILENAME']\n datasets.info['DS_PARAMS_FILE_NAME'] = param_dict['DS_PARAMS_FILE_NAME']\n datasets.info['CDM_DB_NAME'] = param_dict['CDM_DB_NAME']\n datasets.info['RESULT_DB_NAME'] = param_dict['RESULT_DB_NAME']\n \n main_logger.info(\"\\n[Datasets Info.]\\n\")\n main_logger.info(\"{0:>26} {1:}\".format('[OPTION]', '[VALUE]'))\n for k in sorted(datasets.info.keys()):\n main_logger.info(\" {0:>23}: {1:}\".format(k, datasets.info[k]))\n \n #print(\"\\nALL DONE!!\")\n main_logger.info(\"\\n[ALL DONE!!]\\n\\n\")\n for h in list(main_logger.handlers):\n main_logger.removeHandler(h)\n h.flush()\n h.close()\n return datasets", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def load_datasets(path_sets, path_images):\n dataset_files = tuple(path_set_file.name \n for path_set_file in path_sets.glob('*.csv'))\n\n set_names = [dataset_file[: dataset_file.find('_')]\n for dataset_file in dataset_files]\n \n if len(dataset_files) == 3:\n name_order = ['training', 'validation', 'test']\n set_order = tuple(dataset_files.index(f'{name}_set.csv')\n for name in name_order)\n num_sets = 3\n else:\n training_index = dataset_files.index('training_set.csv')\n set_order = (training_index, 1 - training_index)\n num_sets = 2\n\n images_and_labels = [None] * num_sets * 2\n \n for k in range(num_sets):\n path_dataset_file = path_sets.joinpath(dataset_files[set_order[k]])\n\n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n dataset = list(csv_reader)\n\n path_dataset_images = [path_images.joinpath(f'label_{row[1]}', row[0])\n for row in dataset]\n\n images_and_labels[k] = np.array([np.fromfile(path_image, np.float64)\n for path_image\n in path_dataset_images])\n\n images_and_labels[k+num_sets] = [row[1] for row in dataset]\n\n return images_and_labels", "def __loadDataset(self, parameters):\n # self.localConfigured = Settings.instance().readValue( key = 'Common/local-repo' )\n for pr in parameters:\n if pr['type'] == 'dataset':\n if pr['value'].startswith('undefined:/'):\n fileName = pr['value'].split('undefined:/')[1]\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n\n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"undefined:/%s\" % doc.getRaw()\n elif pr['value'].startswith('local-tests:/'):\n fileName = pr['value'].split('local-tests:/')[1]\n\n if not os.path.exists( fileName ):\n raise Exception(\"the following test data file is missing: %s \" % fileName)\n \n doc = FileModelTestData.DataModel()\n res = doc.load( absPath = fileName )\n pr['value'] = \"local-tests:/%s\" % doc.getRaw()\n else:\n pass", "def get_datasets(data):\n train_dataset, test_dataset = None, None\n data_dir = '../data'\n\n if data == 'fmnist':\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.2860], std=[0.3530])])\n train_dataset = datasets.FashionMNIST(data_dir, train=True, download=True, transform=transform)\n test_dataset = datasets.FashionMNIST(data_dir, train=False, download=True, transform=transform)\n \n elif data == 'fedemnist':\n train_dir = '../data/Fed_EMNIST/fed_emnist_all_trainset.pt'\n test_dir = '../data/Fed_EMNIST/fed_emnist_all_valset.pt'\n train_dataset = torch.load(train_dir)\n test_dataset = torch.load(test_dir) \n \n elif data == 'cifar10':\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True, transform=transform_train)\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True, transform=transform_test)\n train_dataset.targets, test_dataset.targets = torch.LongTensor(train_dataset.targets), torch.LongTensor(test_dataset.targets) \n \n return train_dataset, test_dataset", "def get_datasets(sim_args):\n if len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'all':\n data_tags = [\n 'Webscope_C14_Set1',\n 'Webscope_C14_Set2',\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'CIKM2017':\n data_tags = [\n 'MSLR-WEB10k',\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n 'MQ2007',\n 'MQ2008',\n 'OHSUMED',\n ]\n elif len(sim_args.data_folders) == 1 and sim_args.data_folders[0] == 'letor64':\n data_tags = [\n 'NP2003',\n 'NP2004',\n 'HP2003',\n 'HP2004',\n 'TD2003',\n 'TD2004',\n ]\n # random.shuffle(data_tags)\n else:\n data_tags = sim_args.data_folders\n for data_tag in data_tags:\n assert data_tag in DATASET_COLLECTION, 'Command line input is currently not supported.'\n yield DATASET_COLLECTION[data_tag]", "def get_data_parascans(rootdir, datasetnames, filterdata):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Original images (to predict)\n images_org = load_scans(rootdir + dataset + '/crop_org')\n\n # Ground truth images (mask image of expert)\n images_gt = load_scans(rootdir + dataset + '/crop_gt')\n images_gt = sitk.GetArrayFromImage(images_gt)\n\n # Smoothed images by specific filter\n images_smoothed = load_scans_filter(images_org, filterdata)\n\n # Save images in datasets dictionary\n datasets.update({dataset : {'org': images_org, 'gt': images_gt, 'smoothed': images_smoothed}})\n\n print(\"datasets created\")\n return datasets", "def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)", "def load_all(self, root_dir, file_list=None, pattern=None):\n # each file name corresponds to another date. Also tools (A, B) and others.\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(WeldData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(WeldData.load_single(path) for path in input_paths)\n\n return all_df", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)", "def read_datasets(data_string):\n if type(data_string) is dict:\n features_file = data_string[\"features\"]\n target_file = data_string[\"meta\"]\n if data_string.get(\"target_col\"):\n target_col = data_string.get(\"target_col\")\n else:\n target_col = \"target\"\n if data_string.get(\"train_test_col\"):\n train_test_col = data_string.get(\"train_test_col\")\n else:\n train_test_col = \"group\"\n elif type(data_string) is tuple:\n features_file = data_string[0]\n target_file = data_string[1]\n target_col = \"target\"\n train_test_col = \"group\"\n\n else:\n raise Exception(\n \"Data has to be expressed in either a tuple (features,target) or dictionary {\\\"features\\\":\\\"your_features\\\",\" +\n \"\\\"target\\\":\\\"your_target\\\"\")\n # opening data\n data_directory = os.path.join(project_dir,\"data/processed/\")\n try:\n X = pd.read_csv(data_directory + features_file, index_col=0)\n y = pd.read_csv(data_directory + target_file, index_col=0, encoding=\"ISO-8859-1\")\n except FileNotFoundError:\n print(\"Files not in data/preprocessed, searching for them in the application's directory. You should run the\" +\n \" program from its directory: python program.py instead of python /somewhere/else/program.py\")\n X = pd.read_csv(features_file, index_col=0)\n y = pd.read_csv(target_file, index_col=0, encoding=\"ISO-8859-1\")\n except pd.errors.ParserError as e:\n print(\"Pandas seams to be unable to read this file. Make sure it's a csv\")\n raise e\n except UnicodeDecodeError as e:\n print(\"The encoding of either the features or the targets is not encoded using UTF-8 or ISO-8859-1\")\n raise e\n # Check to see if columns exist and return them\n target_col = checking_columns(y, target_col, x=target_col)\n\n # Get group column\n train_test_col = checking_columns(y, train_test_col, x=train_test_col, handle=lambda x: target_col)\n\n return features_file, target_file, X, y, target_col, train_test_col", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def detect_prepared_datasets(self):\n if utils.find('*target_ds_preprocessed.pkl', self.prepared_data_dir) and \\\n utils.find('*rf_ds_preprocessed.pkl', self.prepared_data_dir) and \\\n utils.find('*standardized_stacked_arr.pkl', self.prepared_data_dir):\n print('Pickles (preprocessed) found.')\n for pkl in utils.find('*preprocessed.pkl', self.prepared_data_dir):\n if \"target_ds\" in pkl: self.target_ds_preprocessed_path = pkl\n elif \"rf_ds\" in pkl: self.rf_ds_preprocessed_path = pkl\n \n LocalModelParams(self, utils.open_pickle(self.target_ds_preprocessed_path))\n\n for pkl in utils.find('*standardized_stacked_arr.pkl', self.prepared_data_dir):\n self.standardized_stacked_arr_path = pkl\n else:\n print('Pickles of pre-processed data incomplete. Proceeding to load & process raw dataset pickles.')\n self.target_ds_preprocessed_path, self.rf_ds_preprocessed_path = prepare.preprocess_time_series(self, self.prepared_data_dir, self.ALPHAs)\n\n LocalModelParams(self, utils.open_pickle(self.target_ds_preprocessed_path)) # generate new local model params\n\n self.standardized_stacked_arr_path = prepare.flatten_and_standardize_dataset(self, self.prepared_data_dir)\n print(f'--> Months for this dataset are: {self.month_names}')", "def get_data_heuristics(rootdir, img_path, datasetnames, heuristicnames):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Ground truth images (mask image of expert)\n images_gt = load_scans(rootdir + dataset + '/crop_gt')\n images_gt = sitk.GetArrayFromImage(images_gt)\n\n # Heuristic model images (predictions of models)\n images_models = {}\n for model in heuristicnames:\n image = load_data_pickle(img_path, dataset=dataset, filename=model)\n images_models.update({model: image})\n\n # Save images in datasets dictionary\n datasets.update({dataset: {'gt':images_gt, 'models':images_models}})\n\n print(\"dataset created\")\n return datasets", "def process_datasets(self):\n\n with open(self.mappings, \"r+\") as json_file:\n emsl_to_jgi = json.load(json_file)\n emsl_to_jgi_copy = copy.deepcopy(emsl_to_jgi)\n\n contaminant_file_loc = emsl_to_jgi[\"contaminant_file_loc\"]\n # run for each dataset\n for dataset_id, values in emsl_to_jgi.items():\n if dataset_id not in [\n \"contaminant_file_loc\",\n \"analysis_activity_file_loc\",\n \"data_objects_file_loc\",\n \"STUDY\",\n \"tools_used\",\n ]:\n raw_file_loc = values[\"raw_file_loc\"]\n self.dataset_name = values[\"dataset_name\"]\n # dataset search against a fasta file\n for genome_directory, locations in values[\n \"genome_directory\"\n ].items():\n # clear object to prepare next job\n ANALYSIS_JOBS_OBJECT.clear()\n\n # create log_dir\n self.save_job_results = os.path.join(\n self.result_loc, dataset_id, genome_directory\n )\n self.log_collected_at = os.path.join(\n os.path.abspath(self.save_job_results), \"analysis_jobs_logs\"\n )\n if not os.path.exists(self.log_collected_at):\n os.makedirs(self.log_collected_at)\n\n files = [locations[\"faa_file_loc\"], contaminant_file_loc]\n contaminated_faa_file_loc = self.contaminate_fasta(files)\n\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"contaminated_faa_file_loc\",\n contaminated_faa_file_loc,\n emsl_to_jgi_copy,\n )\n # convert .faa to .txt\n faa_txt_file = self.convert_faa2txt(\n dataset_id, contaminated_faa_file_loc\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"txt_faa_file_loc\",\n faa_txt_file,\n emsl_to_jgi_copy,\n )\n\n # log & run job\n self.run_n_log_job(\n dataset_id,\n genome_directory,\n contaminated_faa_file_loc,\n raw_file_loc,\n emsl_to_jgi_copy,\n )\n\n # merge analysis\n resultant_file = self.merge_analysis_jobs(\n dataset_id, genome_directory\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"resultant_file_loc\",\n resultant_file,\n emsl_to_jgi_copy,\n )\n\n # capture the job metadata object\n logger.info(\"Jobrun\", extra=LOGGED_ANALYSIS_JOB)\n\n # update emsl_to_jgi.json\n json_file.seek(0) # move back to BOF.\n json_file.truncate()\n json_file.write(json.dumps(emsl_to_jgi_copy, default=str, indent=4))\n pass", "def LoadTroikaDataset():\n data_dir = \"./datasets/troika/training_data\"\n data_fls = sorted(glob.glob(data_dir + \"/DATA_*.mat\"))\n ref_fls = sorted(glob.glob(data_dir + \"/REF_*.mat\"))\n return data_fls, ref_fls", "def load(name):\n if name in datasets:\n\n return pd.read_csv(os.path.join(datasets_path, \"%s.csv\" % name))\n else:\n raise ValueError(\"Dataset not found!\")", "def dataset_difficulty():\n results = []\n datasets = [ data_2007, data_2012, data_indoor, data_easy ] \n \n for data in datasets:\n \n #Let the user know where we are\n print data\n X,Y = load_csv(data)\n \n # Training/testing split + LDA fit\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y)\n lda = LDA()\n lda.fit(X_train, Y_train)\n \n # Use linear SVC\n clf = svm.SVC(kernel=\"linear\")\n clf.fit(lda.transform(X_train), Y_train)\n \n # Predictions\n train_predict = clf.predict(lda.transform(X_train))\n test_predict = clf.predict(lda.transform(X_test))\n \n #Compute accuracy\n train_acc = 1.*sum(train_predict == Y_train)/len(train_predict)\n test_acc = 1.*sum(test_predict == Y_test)/len(test_predict)\n \n # Append results for that dataset\n results += [ [ data, train_acc, test_acc, clf, lda ] ]\n \n return results", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def data_loaders(dataset_path):\n dataset_path = dataset_path\n news_stock_dataset = NewsStockDataLoader(dataset_path)\n \n dataset_size = len(news_stock_dataset)\n indices = list(range(dataset_size))\n training_split = int(0.8 * dataset_size)\n validation_split = int(0.9 * dataset_size)\n\n np.random.seed(96)\n np.random.shuffle(indices)\n\n train_indices = indices[:training_split]\n valid_indices = indices[training_split:validation_split]\n test_indices = indices[validation_split:]\n\n train_sampler = SubsetRandomSampler(train_indices)\n valid_sampler = SubsetRandomSampler(valid_indices)\n test_sampler = SubsetRandomSampler(test_indices)\n \n collate = PadSequence()\n\n training_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"training_batch_size\"),\n sampler = train_sampler,\n collate_fn = collate)\n\n validation_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"validation_batch_size\"),\n sampler = valid_sampler,\n collate_fn = collate)\n\n testing_loader = DataLoader(news_stock_dataset,\n num_workers = 1,\n batch_size = Config.get(\"testing_batch_size\"),\n sampler= test_sampler,\n collate_fn = collate)\n \n return training_loader, validation_loader, testing_loader", "def load_raw_dataset(name):\n assert name in VALID_NAMES, 'Invalid data set requested. Please make sure name is one of ' + ', '.join(VALID_NAMES) + '.'\n\n os.makedirs('downloads', exist_ok=True)\n path = os.path.join('downloads', name)\n path_raw = os.path.join(path, 'raw')\n\n if name == 'iris':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'iris.data'), names=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid', 'species'])\n\n elif name == 'wine':\n prep_path(path)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', path_raw)\n safe_dl('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names', path_raw)\n return pd.read_csv(os.path.join(path_raw, 'wine.data'), names=['class',\n 'alcohol',\n 'malic_acid',\n 'ash',\n 'alkalinity',\n 'magnesium',\n 'phenols',\n 'flavanoids',\n 'nonflavanoid_phenols',\n 'proanthocyanins',\n 'color_intensity',\n 'hue',\n 'dilution',\n 'proline'])\n\n elif name == 'titanic':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path_raw)) == 0:\n kaggle.api.competition_download_files('titanic', path_raw)\n titanic = pd.read_csv(os.path.join(path_raw, 'train.csv'))\n titanic_test = pd.read_csv(os.path.join(path_raw, 'test.csv'))\n return titanic, titanic_test\n\n elif name == 'lanl':\n import kaggle; kaggle.api.authenticate()\n prep_path(path)\n if len(os.listdir(path)) == 0:\n kaggle.api.competition_download_files('LANL-Earthquake-Prediction', path_raw)\n if not os.path.exists(os.path.join(path_raw, 'test')):\n zip_ref = zipfile.ZipFile(os.path.join(path_raw, 'test.zip'), 'r')\n zip_ref.extractall(os.path.join(path_raw, 'test'))\n zip_ref.close()\n return pd.read_csv(os.path.join(path_raw, 'train.csv.zip'))\n\n elif name == 'MNIST':\n mnist = torchvision.datasets.MNIST('downloads', train=True, download=True)\n mnist_test = torchvision.datasets.MNIST('downloads', train=False, download=True)\n return mnist, mnist_test\n\n elif name == 'FashionMNIST':\n fmnist = torchvision.datasets.FashionMNIST('downloads', train=True, download=True)\n fmnist_test = torchvision.datasets.FashionMNIST('downloads', train=False, download=True)\n return fmnist, fmnist_test", "def get_available_datasets():\n files = [file for file in glob.glob(os.path.join(MODULE_ROOT, \"datasets/*.json\"))]\n datasets = []\n for file in files:\n with open(file, \"r\") as f:\n dataset_info = json.load(f)\n datasets.append(dataset_info)\n return datasets", "def read_and_split_sets():\n gen_train_test_sets(\"Data_Sent_Embds/en_sent.pkl\", \"Data_Sent_Embd_Splitted/en_train.pkl\",\n \"Data_Sent_Embd_Splitted/en_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/es_sent.pkl\", \"Data_Sent_Embd_Splitted/es_train.pkl\",\n \"Data_Sent_Embd_Splitted/es_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/pr_sent.pkl\", \"Data_Sent_Embd_Splitted/pr_train.pkl\",\n \"Data_Sent_Embd_Splitted/pr_test.pkl\")", "def datasets(self, datasets):\n\n self._datasets = datasets", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)", "def get_pipelines() -> Iterable[DataPipeline]:\n for pipeline_name in get_pipeline_names():\n yield DataPipeline.load(pipeline_name)", "def preprocessing(dataset):\r\n # upload the processed time series data to its distinct numpy arrays\r\n print('')\r\n training_input = []\r\n training_output = []\r\n validation_input = []\r\n validation_output = []\r\n loop = tqdm.tqdm(total = len(dataset), position = 0, leave = False)\r\n for d in range(len(dataset)):\r\n loop.set_description('Packaging all processed time series data... ' .format(len(dataset)))\r\n time_series = dataset[d]\r\n if time_series.get_dataset_label() == \"TRAINING\":\r\n training_input.append(time_series.sampled_matrix())\r\n training_output.append(time_series.get_close_value())\r\n else:\r\n validation_input.append(time_series.sampled_matrix())\r\n validation_output.append(time_series.get_close_value())\r\n loop.update(1)\r\n\r\n training_input, training_output = np.array(training_input), np.array(training_output)\r\n training_input = np.reshape(training_input, (training_input.shape[0], training_input.shape[1], 1))\r\n validation_input, validation_output = np.array(validation_input), np.array(validation_output)\r\n validation_input = np.reshape(validation_input, (validation_input.shape[0], validation_input.shape[1], 1))\r\n print('\\n')\r\n loop.close()\r\n return training_input, training_output, validation_input, validation_output", "def loadSets(self, indir=\"\"):\n\n if indir==\"\":\n print(\"specify folder\")\n return -1\n\n self.train = pd.read_pickle(\"{}/train.pkl\".format(indir))\n self.valid = pd.read_pickle(\"{}/valid.pkl\".format(indir))\n self.test = pd.read_pickle(\"{}/test.pkl\".format(indir))\n\n print(\"sets loaded\")", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')", "def download_all_datasets():\n print(\"Downloading all datasets ...\")\n for dataset in get_available_datasets():\n download_dataset(dataset)", "def perform_extraction(self) -> None:\n\n self._process_datasets_all_frames()", "def collect_datset(self):\n response = requests.get(self.url)\n lines = response.text.splitlines()\n data = []\n for item in lines:\n item = item.split(\",\")\n data.append(item)\n data.pop(0) # to remove labels from list\n dataset = np.matrix(data)\n return dataset", "def import_datasets(snli_path):\n print('extract data from snli directory..')\n train = dict(); dev = dict(); test = dict()\n gold_labels = {'entailment': 0, 'neutral': 1, 'contradiction': 2}\n\n for file_type in ['train', 'dev', 'test']:\n path = os.path.join(snli_path, 'snli_1.0_{}.jsonl'.format(file_type))\n with open(path) as file:\n data = [json.loads(line) for line in file]\n eval(file_type)['premise'] = [entry['sentence1'] for entry in data if entry['gold_label'] != '-']\n eval(file_type)['hypothesis'] = [entry['sentence2'] for entry in data if entry['gold_label'] != '-']\n g_labels = np.array([gold_labels[entry['gold_label']] for entry in data if entry['gold_label'] != '-'])\n eval(file_type)['label'] = g_labels\n print('extraction process was finished successfully!')\n return train, dev, test", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def loadBootstrapData(dirName):\n import glob\n bsSetNames = glob.glob(dirName + \"*.csv\")\n\n bsSets = []\n\n # Load each bootstrap dataset's CSV\n for bsn in bsSetNames:\n bsSets.append(np.loadtxt(bsn, dtype=\"float\"))\n\n return bsSets", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def get_data_scans(rootdir, datasetnames, filterdata):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Original images (to predict)\n images_org = load_scans(rootdir + dataset + '/crop_org')\n\n # Smoothed images by specific filter\n images_smoothed = load_scans_filter(images_org, filterdata)\n\n # Save images in datasets dictionary\n datasets.update({dataset : {'org': images_org, 'smoothed': images_smoothed}})\n\n print(\"datasets created\")\n return datasets", "def setup(self, stage: Union[str, None] = None) -> None:\n self.data_splits = {}\n # set up each of the dataset splits\n for key, path in self.paths.items():\n self.data_splits[key] = self.dataset_class(path)", "def get_datasets() -> List[Dataset]:\n\n amzn = Dataset(\n id='amzn', name='Amazon Reviews', language='en',\n description=\"This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.\")\n\n cnn = Dataset(\n id='cnn_dailymail', name='CNN/ DailyMail', language='en',\n description='The well-known CNN/ DailyMail data set for text summarization (version 3.0.0). The data has been fetched via HuggingFace Datasets')\n\n swisstext = Dataset(\n id='swisstext', name='SwissText 2019', language='de',\n description='The dataset was published for the SwissText conference 2019. ')\n\n return [amzn, cnn, swisstext]", "def load(datasets, treemakers='Basics', force_reload=False):\n if isinstance(datasets, str):\n datasets = [datasets]\n if isinstance(treemakers, (type, str)):\n treemakers = [treemakers]\n\n combined_dataframes = []\n\n for treemaker in treemakers:\n\n dataframes = []\n for dataset in datasets:\n minitree_path = get(dataset, treemaker, force_reload=force_reload)\n new_df = pd.DataFrame.from_records(root_numpy.root2array(minitree_path).view(np.recarray)) \n dataframes.append(new_df)\n\n # Concatenate mini-trees of this type for all datasets\n combined_dataframes.append(pd.concat(dataframes))\n\n # Concatenate mini-trees of all types\n if not len(combined_dataframes):\n raise RuntimeError(\"No data was extracted? What's going on??\")\n return pd.concat(combined_dataframes, axis=1)", "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def fetch_dataset(data_root_dir):\n pattern = \"winemag_dataset_*.csv\"\n\n file_list = glob.glob(os.path.join(data_root_dir, pattern))\n\n df_list = [pd.read_csv(fname) for fname in file_list]\n\n full_df = pd.concat(df_list)\n\n # give unique row names to all\n full_df.index = range(full_df.shape[0])\n\n print(\"Dataset fetched.\")\n return full_df", "def _get_setup(self, dataset_name):\n for potential_setup in self.setup:\n for dataset in potential_setup[\"datasets\"]:\n if dataset_name in dataset:\n test_setup = potential_setup\n self.io_args.color = os.path.join(self.io_args.input_root, dataset)\n return test_setup", "def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def load_all(self, root_dir, file_list=None, pattern=None):\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(PMUData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(PMUData.load_single(path) for path in input_paths)\n\n return all_df", "def get_datasets(request):\n from seed.models import obj_to_dict\n org = Organization.objects.get(pk=request.GET.get('organization_id'))\n datasets = []\n for d in ImportRecord.objects.filter(super_organization=org):\n importfiles = [obj_to_dict(f) for f in d.files]\n dataset = obj_to_dict(d)\n dataset['importfiles'] = importfiles\n if d.last_modified_by:\n dataset['last_modified_by'] = d.last_modified_by.email\n dataset['number_of_buildings'] = BuildingSnapshot.objects.filter(\n import_file__in=d.files,\n canonicalbuilding__active=True,\n ).count()\n dataset['updated_at'] = convert_to_js_timestamp(d.updated_at)\n datasets.append(dataset)\n\n return {\n 'status': 'success',\n 'datasets': datasets,\n }", "def _init_dataset_list(self, datasets):\n lst = []\n for ds in datasets:\n if isinstance(ds, basestring):\n base_path = Storage.gt_path(ds)\n for img in self._storage[base_path].keys():\n lst += self._list_image_patches(ds, img)\n else:\n lst += self._list_image_patches(ds[0],\n Storage.normalize_name(ds[1]))\n return lst", "def get_datasets(business_data_file, enter_data_file, politics_data_file, sport_data_file, tech_data_file):\n # Load data from files\n business_examples = list(open(business_data_file, \"r\").readlines())\n business_examples = [s.strip() for s in business_examples]\n enter_examples = list(open(enter_data_file, \"r\").readlines())\n enter_examples = [s.strip() for s in enter_examples]\n politics_examples = list(open(politics_data_file, \"r\").readlines())\n politics_examples = [s.strip() for s in politics_examples]\n sport_examples = list(open(sport_data_file, \"r\").readlines())\n sport_examples = [s.strip() for s in sport_examples]\n tech_examples = list(open(tech_data_file, \"r\").readlines())\n tech_examples = [s.strip() for s in tech_examples]\n\n datasets = dict()\n datasets['data'] = business_examples + enter_examples + politics_examples + sport_examples + tech_examples\n target = [0 for x in business_examples] + [1 for x in enter_examples] + [2 for x in politics_examples] + [3 for x in sport_examples] + [4 for x in tech_examples]\n datasets['target'] = target\n datasets['target_names'] = ['business_examples', 'enter_examples', 'politics_examples', 'sport_examples', 'tech_examples']\n return datasets", "def load_data(self, inventory, group_names = None, site_names = None, dataset_names = None):\n\n raise NotImplementedError('load_data')", "def get_datasets(process,path):\n datasets = []\n if hasattr(process,\"datasets\"):\n for dataset_name in process.datasets.parameterNames_():\n dataset_paths = getattr(process.datasets,dataset_name)\n if path.label() in dataset_paths:\n datasets.append(dataset_name)\n return datasets", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def _get_existing_datasets(self):\n self._h5_guess = USIDataset(self.h5_results_grp['Guess'])\n\n try:\n self._h5_status_dset = self.h5_results_grp[self._status_dset_name]\n except KeyError:\n warn('status dataset not created yet')\n self._h5_status_dset = None\n\n try:\n self._h5_fit = self.h5_results_grp['Fit']\n self._h5_fit = USIDataset(self._h5_fit)\n except KeyError:\n self._h5_fit = None\n if not self._is_guess:\n self._create_fit_datasets()", "def datasets(self):\n return [Dataset.GWAS_CATALOG, Dataset.CLINVAR, Dataset.EFO]", "def find_products(self):\n return find_datasets(self.input_dir, self.recursive, **self.kwargs)", "def get_data_loaders(opt):\n return find_dataloader_using_name(opt.dataloader)(opt).load_data()", "def KittiTestDataset(test_root_path):\n \n names = os.listdir(test_root_path)\n dataset = [[os.path.join(test_root_path, name)] for name in names]\n \n return dataset", "def prepare_multisubjectdataset(args, load_training=True, load_validation=True,\n load_testing=True,\n log_level=logging.root.level):\n with Timer(\"\\nPreparing datasets\", newline=True, color='blue'):\n dataset = MultiSubjectDataset(\n args.hdf5_file, lazy=args.lazy, cache_size=args.cache_size,\n log_level=log_level)\n dataset.load_data(load_training, load_validation, load_testing)\n\n logging.info(\"Number of subjects loaded: \\n\"\n \" Training: {}\\n\"\n \" Validation: {}\\n\"\n \" Testing: {}\"\n .format(dataset.training_set.nb_subjects,\n dataset.validation_set.nb_subjects,\n dataset.testing_set.nb_subjects))\n\n return dataset", "def get_datasets(recipe):\n # \"datasets\"\n return {dataset: get_instance(**par) for dataset, par in recipe.items()}", "def setup(self, stage: Union[str, None] = None) -> None:\n self.data_splits = {}\n # set up each of the dataset splits\n for key, path in self.paths.items():\n self.data_splits[key] = PointCloudDataset(\n self.dataset_class(path), self._point_cloud_size, self._sample_size\n )", "def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def identify_datasets(self, language_objects, context):\n\n datasets, new_sen = self.extractor.extract_all_templates(\n language_objects, context\n )\n context[\"datasets\"] = datasets\n return {'type': 'result', 'result': (new_sen, context[\"datasets\"])}", "def dataset(options):\n pass", "def get_loaders(train_dataset, val_dataset, test_dataset, batch_size=128):\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8,\n shuffle=True)\n\n val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n return train_loader, val_loader, test_loader", "def getDatasets(fileinformation):\n filedata = pd.read_csv(fileinformation, sep=\"\\t\", header=None)\n datalabels = list(filedata.iloc[:,0].values)\n datafiles = list(filedata.iloc[:,1].values)\n return datalabels, datafiles", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def source_data_files(self, data_dir, tmp_dir, dataset_split):\n raise NotImplementedError()", "def load_data(self, modalities, args):\n print(\"Loading data...\")\n data_dir = os.path.abspath(args.data_dir)\n train_data = SubtitlesDataset(modalities, data_dir, mode='train',\n truncate=True, item_as_dict=True)\n test_data = SubtitlesDataset(modalities, data_dir, mode='test',\n truncate=True, item_as_dict=True)\n print(\"Done.\")\n if len(args.normalize) > 0:\n print(\"Normalizing \", args.normalize, \"...\")\n # Normalize test data using training data as reference\n test_data.normalize_(modalities=args.normalize,\n ref_data=train_data)\n # Normalize training data in-place\n train_data.normalize_(modalities=args.normalize)\n return train_data, test_data", "def load_all_dfs(clf_list = ['test_small','rt_small','test2_small']):\n \n start = time.clock()\n print('loading data')\n first_clf = clf_list[0]\n df = pd.read_csv('Pikki'+first_clf+'.csv')\n df['df'] = first_clf\n\n df = df.set_index(['id','df'])\n\n for clf in clf_list[1:]:\n file_name = 'Pikki' + clf + '.csv'\n df_tmp = pd.read_csv(file_name)\n df_tmp['df'] = clf\n\n df_tmp = df_tmp.set_index(['id','df'])\n\n df = pd.concat([df,df_tmp])\n\n \n df['std'] = df.apply(np.std,axis=1,raw = True)\n end = time.clock()\n print(end-start)\n return df#.swaplevel(0,1)", "def load_raw_data(self, input_files):\n\n log.debug(f\"Loading dataset {input_files}\") \n print(f\"Loading dataset\")\n\n # Load stroke information from XML files\n for file in input_files:\n new_strokeset = strokeset.StrokeSet(file)\n self.strokesets.append(new_strokeset)\n self.stroke_matrix.append(new_strokeset.as_delta_array())\n self.stroke_ascii.append(new_strokeset.get_text())\n\n done_msg = \"Finished parsing dataset. Imported {} lines\".format(len(self.get_strokesets()))\n print (done_msg)\n log.info(done_msg)", "def get_data_loaders(cap_files, visual_feats, vocab, bow2vec, batch_size=100, num_workers=2, n_caption=2, video2frames=None, video2frames_target=None, visual_feats_target=None, caption_file_target=None, multi_flag=0):\n if video2frames_target!=None and visual_feats_target!=None:\n if multi_flag == 0:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train'], video2frames_target=video2frames_target['train'], visual_feat_target=visual_feats_target['train'], caption_file_target=caption_file_target, visual_feat_source2=visual_feats['train2'], video2frames_source2=video2frames['train2'], caption_file_source2=cap_files['train2']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n\n\n else:\n dset = {'train': Dataset4DualEncoding(cap_files['train'], visual_feats['train'], bow2vec, vocab, video2frames=video2frames['train']),\n 'val': Dataset4DualEncoding(cap_files['val'], visual_feats['val'], bow2vec, vocab, n_caption, video2frames=video2frames['val']),\n 'test': Dataset4DualEncoding(cap_files['test'], visual_feats['test'], bow2vec, vocab, n_caption, video2frames=video2frames['test'])}\n\n data_loaders = {x: torch.utils.data.DataLoader(dataset=dset[x],\n batch_size=batch_size,\n shuffle=(x=='train'),\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate_frame_gru_fn)\n for x in ['train', 'val', 'test']}\n return data_loaders", "def create_dataset(dataset_name):\n dataset_as_lower = dataset_name.lower()\n if dataset_as_lower in _datasets_from_keras.keys():\n data_details = _datasets_from_keras[dataset_as_lower]\n (x_train, y_train), (x_test, y_test) = data_details['data'].load_data()\n else:\n raise IOError(\"Dataset {0} is NOT supported\".format(dataset_name))\n\n # Performing pre-processing specifically for images datasets.\n if data_details['data type'] == 'image':\n x_train = _pre_process_images(x_train, data_details)\n x_test = _pre_process_images(x_test, data_details)\n\n return x_train, y_train, x_test, y_test", "def get_dataloader(sets, root_dir, manifest_path, task, batch_size=1, return_pid = False):\n data_loaders = {}\n\n for set in ['train', 'valid', 'test', 'all_images']: # test doesn't apply to MRNet but will keep in\n if set in sets:\n if set == 'train':\n ds = Dataset(set='train', task = task, root_dir=root_dir, manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n #transforms.RandomHorizontalFlip(), # default is 50%\n #transforms.RandomAffine(25, # rotation\n # translate=(0.1, 0.1),\n # shear = (-15, 15)),\n transforms.ToTensor(),\n ]))\n loader = DataLoader(ds, batch_size=batch_size, shuffle=True)\n elif set == 'valid':\n ds = Dataset(set='valid', task = task, root_dir=root_dir,manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n ]))\n\n loader = DataLoader(ds, batch_size=batch_size, shuffle=False)\n elif set == 'all_images':\n ds = Dataset(set='all_images', task = task, root_dir=root_dir,manifest_path = manifest_path, return_pid = return_pid,\n transform=transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n ]))\n loader = DataLoader(ds, batch_size=batch_size, shuffle=False)\n data_loaders[set] = loader\n return (data_loaders)", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def get_dataset_transformers(tokenizer, dataset_name, **kwargs):\n loader = get_loader(dataset_name, **kwargs)\n return get_transformer_splits(loader, tokenizer)", "def _init_dataset(self):\n champions = set()\n\n for name in os.listdir(self.data_root):\n label = name.split(\".\")[0]\n champions.add(label)\n self.image_paths += [(os.path.join(self.data_root, name), [[label]])]\n\n # self.encoder = self.encoder.fit(np.array(list(champions)).reshape(-1, 1))\n self.encoder = self.encoder.fit(list(champions))", "def split_train_and_test_with_py_datasets(data_set, batch_size=cfg['batch_size'], test_size=0.2, num_works=4,\n pin_memory=True):\n num_dataset = len(data_set)\n indices = list(range(num_dataset))\n split = int(np.floor(test_size * num_dataset))\n\n train_idx, test_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=train_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=test_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n return train_loader, test_loader", "def __init__(self, datasets: Union[str, list], **kwargs) -> None:\n if isinstance(datasets, str):\n datasets = [datasets]\n\n self._data = None\n for dataset in datasets:\n assert dataset in DATASETS, f\"[!] Dataset not found: {dataset}\"\n\n if self._data is None:\n self._data = DATASETS[dataset](kwargs)\n else:\n self._data = ConcatDataset([self._data, DATASETS[dataset](kwargs)])", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')" ]
[ "0.6478788", "0.6423142", "0.6417792", "0.6409163", "0.64047015", "0.6366228", "0.62911695", "0.62740004", "0.6178644", "0.61630666", "0.6153745", "0.6104244", "0.6079098", "0.6072685", "0.6070125", "0.60272676", "0.60272676", "0.59892446", "0.5962235", "0.59133875", "0.59104997", "0.5907264", "0.5905157", "0.59005743", "0.58791703", "0.5863494", "0.5846586", "0.58426225", "0.58409995", "0.5839623", "0.5837607", "0.5837346", "0.58364487", "0.5832712", "0.5819176", "0.58167446", "0.5804673", "0.58033085", "0.57960576", "0.57797754", "0.57634735", "0.57627237", "0.57578486", "0.5755688", "0.5743158", "0.5730998", "0.57308084", "0.5722777", "0.572222", "0.57182384", "0.57172215", "0.5715376", "0.5714393", "0.5710558", "0.57044715", "0.5698649", "0.56976485", "0.5697033", "0.5689463", "0.5683333", "0.56827825", "0.56591606", "0.5658959", "0.5658413", "0.56512177", "0.56417406", "0.5639561", "0.5637942", "0.56379384", "0.56360465", "0.5632288", "0.562706", "0.5614255", "0.56064075", "0.5603378", "0.56020856", "0.55968", "0.55932826", "0.55930144", "0.55901605", "0.5590002", "0.55898297", "0.55846053", "0.5583211", "0.558255", "0.55818856", "0.55802155", "0.5576762", "0.5573703", "0.55726624", "0.5568228", "0.55681306", "0.55657655", "0.5562542", "0.5561795", "0.55617577", "0.5558703", "0.55577683", "0.5556547", "0.5556142", "0.55513334" ]
0.0
-1
Building of scikit_df based on the output of plot_ribo_density_dict.py script. C//reverse/complementary strand are taken into account and the profile values ("codon_density_profile", "codon_triplet", "codon_AA") are reversed. This is
def build_mat_scikit_strandOriented(sralist, scikit_data): scikit_mat = {} seq_codons = {} seq_aa = {} for geneID in scikit_data[sralist[0]][0].keys(): for ix, dataset in enumerate(sralist): if geneID in scikit_data[dataset][0].keys(): current_profile = scikit_data[dataset][0].get(geneID, np.nan) current_ribo = current_profile[0] current_ribo = current_ribo[8:-8] N = len(sralist) M = len(current_ribo) print(geneID, M) if ix == 0: current_matrix = np.zeros((N,M)) * np.nan current_seq_codons = current_profile[1] current_seq_codons = current_seq_codons[8:-8] current_seq_aa = current_profile[2] current_seq_aa = current_seq_aa[8:-8] if strand_by_geneID_dict.get(geneID, "NA") == "+": seq_codons[geneID] = current_seq_codons seq_aa[geneID] = current_seq_aa elif strand_by_geneID_dict.get(geneID, "NA") == "-": seq_codons[geneID] = current_seq_codons[::-1] seq_aa[geneID] = current_seq_aa[::-1] if strand_by_geneID_dict.get(geneID, "NA") == "+": current_matrix[ix,:] = current_ribo elif strand_by_geneID_dict.get(geneID, "NA") == "-": current_matrix[ix,:] = current_ribo[::-1] if np.sum(current_matrix) > 0: scikit_mat[geneID] = current_matrix # scikit_df = pd.DataFrame(values_list, columns=columns_list) return scikit_mat, seq_codons, seq_aa
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n super().__init__()\n self.upperBoundUsed = False # True if the distribution is right truncated\n self.lowerBoundUsed = False # True if the distribution is left truncated\n self.hasInfiniteBound = False # True if the untruncated distribution has bounds of +- system max\n self.upperBound = None # Right bound\n self.lowerBound = None # Left bound\n self.__adjustmentType = '' # this describe how the re-normalization to preserve the probability should be done for truncated distributions\n self.dimensionality = None # Dimensionality of the distribution (1D or ND)\n self.distType = None # Distribution type (continuous or discrete)\n self.memory = False # This variable flags if the distribution has history dependence in the sampling process (True) or not (False)\n self.printTag = 'DISTRIBUTIONS'\n self.preferredPolynomials = None # best polynomial for probability-weighted norm of error\n self.preferredQuadrature = None # best quadrature for probability-weighted norm of error\n self.compatibleQuadrature = [] #list of compatible quadratures\n self.convertToDistrDict = {} #dict of methods keyed on quadrature types to convert points from quadrature measure and domain to distribution measure and domain\n self.convertToQuadDict = {} #dict of methods keyed on quadrature types to convert points from distribution measure and domain to quadrature measure and domain\n self.measureNormDict = {} #dict of methods keyed on quadrature types to provide scalar adjustment for measure transformation (from quad to distr)\n self.convertToDistrDict['CDFLegendre'] = self.CDFconvertToDistr\n self.convertToQuadDict ['CDFLegendre'] = self.CDFconvertToQuad\n self.measureNormDict ['CDFLegendre'] = self.CDFMeasureNorm\n self.convertToDistrDict['CDFClenshawCurtis'] = self.CDFconvertToDistr\n self.convertToQuadDict ['CDFClenshawCurtis'] = self.CDFconvertToQuad\n self.measureNormDict ['CDFClenshawCurtis'] = self.CDFMeasureNorm", "def show_dcr_results(dg):\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()", "def _2d_plot_samples(self, **kwargs):\n\n from pesummary.core.plots.bounded_2d_kde import Bounded_2d_kde\n\n # get bounds\n lows = []\n highs = []\n methods = []\n for param in self.parameters[0:2]:\n if param in DEFAULT_BOUNDS:\n lows.append(\n DEFAULT_BOUNDS[param][\"low\"]\n if \"low\" in DEFAULT_BOUNDS[param]\n else None\n )\n highs.append(\n DEFAULT_BOUNDS[param][\"high\"]\n if \"high\" in DEFAULT_BOUNDS[param]\n else None\n )\n methods.append(\n DEFAULT_BOUNDS[param][\"method\"]\n if \"method\" in DEFAULT_BOUNDS[param]\n else \"Reflection\"\n )\n\n if self.plottype == \"triangle\":\n from pesummary.core.plots.publication import triangle_plot as plotfunc\n elif self.plottype == \"reverse_triangle\":\n from pesummary.core.plots.publication import (\n reverse_triangle_plot as plotfunc,\n )\n else:\n # contour plot\n from pesummary.core.plots.publication import (\n comparison_twod_contour_plot as plotfunc,\n )\n\n # set KDE information\n kwargs.update(\n {\n \"kde\": Bounded_2d_kde,\n \"kde_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n }\n )\n\n # default to not showing data points\n if \"plot_datapoints\" not in kwargs:\n kwargs[\"plot_datapoints\"] = False\n\n if \"triangle\" in self.plottype:\n from pesummary.core.plots.bounded_1d_kde import bounded_1d_kde\n\n # set KDE informaiton\n kwargs.update(\n {\n \"kde_2d\": Bounded_2d_kde,\n \"kde_2d_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n \"kde\": bounded_1d_kde,\n }\n )\n\n kwargs[\"kde_kwargs\"] = {\n \"x_axis\": {\"xlow\": lows[0], \"xhigh\": highs[0], \"method\": methods[0]},\n \"y_axis\": {\"xlow\": lows[1], \"xhigh\": highs[1], \"method\": methods[1]},\n }\n\n args = [\n [samps[self.parameters[0]].values for samps in self._samples.values()],\n [samps[self.parameters[1]].values for samps in self._samples.values()],\n ]\n\n if \"xlabel\" not in kwargs:\n kwargs[\"xlabel\"] = self.latex_labels[self.parameters[0]]\n if \"ylabel\" not in kwargs:\n kwargs[\"ylabel\"] = self.latex_labels[self.parameters[1]]\n\n if \"labels\" not in kwargs and len(self.results) > 1:\n kwargs[\"labels\"] = list(self._samples.keys())\n\n # set injection parameter values\n if self.injection_parameters is not None:\n if (\n self.injection_parameters[self.parameters[0]] is not None\n and self.injection_parameters[self.parameters[1]] is not None\n ):\n kwargname = \"truths\" if self.plottype == \"corner\" else \"truth\"\n kwargs[kwargname] = [\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]],\n self.injection_parameters[self.parameters[1]]\n - self.parameter_offsets[self.parameters[1]],\n ]\n\n # create plot\n with DisableLogger():\n fig = plotfunc(*args, **kwargs)\n\n return fig", "def test_by_csa(df):\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 3))\n f, axes = plt.subplots(4, 1, figsize=(4, 9))#, sharex=True)\n sns.despine(top=True, bottom=True)\n f.suptitle(\"Diagnostic Test\\nGrouped by %CSA\")\n\n OSA_pure_df = df.loc[df['BaseDx'] == \"Mainly OSA\"]\n OSA_predom_df = df.loc[df['BaseDx'] == \"Combined OSA/CSA\"]\n CSA_predom_df = df.loc[df['BaseDx'] == \"Predominantly CSA\"]\n CSA_pure_df = df.loc[df['BaseDx'] == \"Pure CSA\"]\n\n OSA_pure_hist = OSA_pure_df['StudyType'].value_counts()\n OSA_predom_hist = OSA_predom_df['StudyType'].value_counts()\n CSA_predom_hist = CSA_predom_df['StudyType'].value_counts()\n CSA_pure_hist = CSA_pure_df['StudyType'].value_counts()\n\n # Pure OSA\n axes[0].set(xlabel=\"\", ylabel=\"<10% CSA\")\n osa_pure_wedges, _, _ = axes[0].pie(OSA_pure_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[0].legend(osa_pure_wedges, OSA_pure_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Predom OSA\n axes[1].set(xlabel=\"\", ylabel=\"10-49.9% CSA\")\n osa_predom_wedges, _, _ = axes[1].pie(OSA_predom_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[1].legend(osa_predom_wedges, OSA_predom_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Predom CSA\n axes[2].set(xlabel=\"\", ylabel=\"50-90% CSA\")\n\n csa_predom_wedges, _, _ = axes[2].pie(CSA_predom_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[2].legend(csa_predom_wedges, CSA_predom_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Pure CSA\n axes[3].set(xlabel=\"Patients With Each Etiology Contributing to CSA\", ylabel=\">90% CSA\")\n\n csa_pure_wedges, _, _ = axes[3].pie(CSA_pure_hist, autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[3].legend(csa_pure_wedges, CSA_pure_hist.keys(), loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n axes[3].set(xlabel=\"\\nProportion using each type \\nof diagnostic test\")\n\n f.tight_layout(rect=[0, 0, 1, 0.95]) # .95 to leave space for title\n f.savefig('Diag Test by percentage CSA.png', dpi=100)\n # plt.show()", "def plot_evaluation(parameters_dict, log_df, settings, evaluation_set_kde, plotname):\n\n\n plots = []\n\n\n ### setup the colors for each component\n if int(settings['nr_components']) < 3:\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)']\n elif int(settings['nr_components']) < 13:\n colors = np.array(cl.scales[str(settings['nr_components'])]['qual']['Paired'])\n else:\n colors = cl.interp(cl.scales['10']['qual']['Paired'], 20)\n\n\n ### set up ab list\n ab_list = evaluation_set_kde['contact'].keys()\n\n\n\n\n ####################### plotting of settings\n print_to_table = {}\n for key in sorted(settings.keys()):\n if key not in ['fold_id_dir','plot_name', 'fixed_parameters', 'threads_proteins', 'qijab_dir',\n 'debug_mode', 'parameter_file', 'settings_file', 'optimization_log_file', 'braw_dir', 'pdb_dir', 'paramdir',\n 'mask_sse', 'lambda_w_fix', 'lfactor', 'plotdir', 'psicov_dir', 'contact', 'hessian_pseudocount']:\n print_to_table[key] = settings[key]\n\n print(\"Generate settings table...\")\n table_settings_1 = plot_settings_table(print_to_table, 1)\n table_settings_2 = plot_settings_table(print_to_table, 2)\n table_settings_3 = plot_settings_table(print_to_table, 3)\n plots.append(table_settings_1)\n plots.append(table_settings_2)\n plots.append(table_settings_3)\n\n\n ####################### negLL and realted plots\n if 'step' in log_df.columns and 'pass' in log_df.columns:\n\n if 'negLL' in log_df.columns:\n plot_negll = plot_convergence_trace_plotly(log_df,\n name=['negLL', 'negLL_crossval'],\n plot_title='neg LL trace for training and cross-val set')\n plots.append(plot_negll)\n\n plot_expfit_negll = plot_exponentialFit_negLL(log_df, plot_title='exponential Fit neg LL')\n plots.append(plot_expfit_negll)\n\n if 'timestamp' in log_df.columns:\n plot_timestamps = plot_convergence_trace_plotly(log_df,\n name=['timestamp'],\n plot_title='time (s) per iteration')\n plots.append(plot_timestamps)\n\n\n if 'gradient_norm_weights' in log_df.columns:\n plot_grad_norm_weights = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_weights'],\n plot_title='norm of weight gradients')\n plots.append(plot_grad_norm_weights)\n\n if 'gradient_norm_means' in log_df.columns:\n plot_grad_norm_means = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_means'],\n plot_title='norm of mean gradients')\n plots.append(plot_grad_norm_means)\n\n if 'gradient_norm_prec' in log_df.columns:\n plot_grad_norm_prec = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_prec'],\n plot_title='norm of precMat gradients')\n plots.append(plot_grad_norm_prec)\n\n\n ####################### plotting of parameters\n print(\"Generate distribution of parameters...\")\n\n #weights\n weights_dict = {}\n for component in range(settings['nr_components']):\n weights_dict['component ' + str(component)] = {\n 'weights (contact)': parameters_dict[\"weight_contact_\" + str(component)][0],\n 'weights (bg)': parameters_dict[\"weight_bg_\" + str(component)][0]\n }\n plot_weights = plot_barplot(\n weights_dict,\n 'Distribution of weights',\n 'component weights',\n type='group',\n colors=colors\n #,plot_out=\"/home/vorberg/weights.html\"\n )\n\n #mu\n mu_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'mu' in k))\n plot_means = plot_boxplot(\n mu_df,\n 'Distribution of Means',\n \"values of mean parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/mus.html\"\n )\n\n #std deviation\n prec_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'prec' in k))\n try:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/p))\n if settings['prec_wrt_L']:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/(p*142))) #in case precision is specified depending on L=142\n except ZeroDivisionError as e:\n print(e)\n std_dev=prec_df\n\n std_dev.columns = [column_name.replace(\"prec\", \"std\") for column_name in std_dev.columns]\n plot_stddev = plot_boxplot(\n std_dev,\n 'Distribution of std deviations',\n \"values of std deviation parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/std.html\"\n )\n\n\n plots.append(plot_weights)\n plots.append(plot_means)\n plots.append(plot_stddev)\n\n ####################### Scatterplot mu vs std dev\n print(\"Generate scatter plot mu vs std...\")\n scatter_dict = {}\n for component in range(settings['nr_components']):\n scatter_dict['mu_'+str(component)] = [\n mu_df['mu_'+str(component)].tolist(),\n std_dev['std_'+str(component)].tolist(),\n AB.values()\n ]\n plot_mu_vs_stddev = plot_scatter(scatter_dict,\n 'Mean vs std deviation',\n 'mean',\n \"std deviation\",\n False,\n colors\n #,plot_out=\"/home/vorberg/mu_vs_std.html\"\n )\n\n plots.append(plot_mu_vs_stddev)\n\n\n ############################################## plotting of gradient norms\n print(\"Generate gradient norms plot...\")\n\n #gradients for mu\n mu_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'mu_'+str(component)\n mu_grad_dict[key] = log_df[key].tolist()[-1]\n annotations_dict[key] = AB\n\n\n plot_gradient_mu_stats = jitter_plot(mu_grad_dict,\n 'Distribution of gradients for mean in last iteration',\n annotations_dict,\n colors,\n None)\n plots.append(plot_gradient_mu_stats)\n\n\n #gradients for precMat\n precMat_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'prec_'+str(component)\n precMat_grad_dict['diagPrecMat_'+str(component)] = log_df[key].tolist()[-1]\n annotations_dict['diagPrecMat_'+str(component)] = AB\n\n\n plot_gradient_precMat_stats = jitter_plot(\n precMat_grad_dict,\n 'Distribution of gradients for precMat in last iteration',\n annotations_dict,\n colors,\n None\n )\n plots.append(plot_gradient_precMat_stats)\n\n ##################################### plotting of gradient trace of a specific ab pair for all components\n print(\"Generate gradient trace plot...\")\n\n gradient_df = log_df.filter(regex=(\"mu_[0-9]*\"))\n plot_gradient_mu_ab_trace = plot_gradient_ab_trace(gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_mu_ab_trace)\n\n gradient_df = log_df.filter(regex=(\"prec_[0-9]*\"))\n plot_gradient_prec_ab_trace = plot_gradient_ab_trace(\n gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_prec_ab_trace)\n\n\n ##################################### plotting of univariate mixtures\n if len(evaluation_set_kde['contact']) == 0 or len(evaluation_set_kde['bg']) == 0:\n print \"Evaluation set is empty. Cannot plot Mixture Visualization.\"\n else:\n print(\"Generate parameter visualization 1d plots...\")\n plots.append(plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L']))\n # plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L'], plot_out=\"/home/vorberg/1d_vis.html\")\n\n # ------------------------------------------------------------------------------\n ### define merged plot\n # ------------------------------------------------------------------------------\n cols = 3.0\n rows = int(np.ceil((len(plots)-1) / cols)) + 2\n subplot_titles = []\n\n # set up titles\n for plot in range(len(plots)-1):\n subplot_titles.append(plots[plot]['layout']['title'])\n if len(subplot_titles) < (cols * (rows-2)):\n for i in range(int((cols * (rows-2))) - len(subplot_titles) ):\n subplot_titles.append(\" \")\n subplot_titles.append(plots[-1]['layout']['title'])\n\n\n # plot all plots as subplots\n fig = tools.make_subplots(rows=rows,\n cols=3,\n specs = [ [{} for col in range(int(cols))] for row in range(rows-2)] + \\\n [[{'rowspan':2, 'colspan': 3}, None, None], [None, None, None]],\n subplot_titles=tuple(subplot_titles),\n print_grid=False)\n\n\n\n\n for i, plot in enumerate(plots[:-1]):\n col = i % int(cols)\n row = (i - col) / int(cols)\n\n #add traces to subplot\n for trace in plot['data']:\n trace['showlegend']=False\n fig.append_trace(trace, row + 1, col + 1)\n\n # adjust x and y axis for table plotting\n if 'annotations' in plot['layout'].keys():\n for cell in plot['layout']['annotations']:\n cell['yref'] = 'y' + str(i + 1)\n cell['xref'] = 'x' + str(i + 1)\n fig['layout']['annotations'] += plot['layout']['annotations']\n\n # adjust axis for all plots\n fig['layout']['xaxis' + str(i + 1)].update(plot['layout']['xaxis1'])\n fig['layout']['yaxis' + str(i + 1)].update(plot['layout']['yaxis1'])\n\n ## add mixture visualisation plot - spans 3 columns\n for trace in plots[-1]['data']:\n fig.append_trace(trace, int(rows)-1, 1)\n fig['layout']['xaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['xaxis1'])\n fig['layout']['yaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['yaxis1'])\n\n #check which plots are visible/invisible according to menu selection\n trace_visibility_ab = {}\n for ab in range(len(ab_list)):\n trace_visibility_ab[ab] = []\n for i, plot in enumerate(plots):\n if 'updatemenus' not in plot['layout'].keys():\n trace_visibility_ab[ab].extend([True] * len(plot['data']))\n else:\n trace_visibility_ab[ab].extend(plot['layout']['updatemenus'][0]['buttons'][ab]['args'][1])\n\n\n #use menu of last plot (=vis of mixture) as template for multiplot menu\n fig['layout']['updatemenus'] = plots[-1]['layout']['updatemenus']\n for ab in range(len(ab_list)):\n fig['layout']['updatemenus'][0]['buttons'][ab]['args'][1] = trace_visibility_ab[ab]\n\n\n fig['layout']['legend']['yanchor'] = 'bottom'\n fig['layout']['legend']['y'] = 0\n fig['layout']['height'] = rows * 250\n fig['layout']['font'] = {'size': 18} # set global font size\n\n plotly_plot(fig, filename=plotname, auto_open=False)", "def get_contacts_plots(itype, ligandonly): \n\n # Creating set_itypes and loading data\n if itype == \"all\":\n set_itypes = set((\"sb\", \"pc\", \"ps\", \"ts\", \"vdw\", \"hp\", \"hb\", \"hbbb\", \"hbsb\", \"hbss\", \"wb\", \"wb2\", \"hbls\", \"hblb\", \"all\"))\n df_raw = None\n for itype_df in set_itypes:\n df_raw_itype = pd.read_csv(str(basepath + \"contact_tables/compare_\" + itype_df + \".tsv\"), sep=\"\\s+\")\n df_raw = pd.concat([df_raw, df_raw_itype])\n else: \n set_itypes = { itype }\n df_raw = pd.read_csv(str(basepath + \"contact_tables/compare_\" + itype + \".tsv\"), sep=\"\\s+\")\n\n print(\"Computing contmaps inputs for %s-%s\" % (itype, ligandonly))\n\n #Loading files\n compl_data = json_dict(str(basepath + \"compl_info.json\"))\n flare_template = json_dict(basepath + \"template.json\")\n\n # Adapting to Mariona's format\n df_original = adapt_to_marionas(df_raw)\n\n # If is working with total frequency and all interaction partners, create a new flareplot template file\n if (itype=='all') and (ligandonly=='prt_lg'):\n flareplot_template(df_original, basepath)\n \n # Filtering out non-ligand interactions if option ligandonly is True\n if ligandonly == \"lg\":\n ligandfilter = df_original['Position'].str.contains('Ligand')\n df_original = df_original[ligandfilter]\n elif ligandonly == \"prt\":\n ligandfilter = ~df_original['Position'].str.contains('Ligand')\n df_original = df_original[ligandfilter]\n\n df_original = filter_same_helix(df_original)\n\n #Removing low-frequency contacts\n df_original = filter_lowfreq(df_original, itype)\n\n #Add \\n between GPCR nomenclatures, to show it multiline in the heatmap axis \n df_original = set_new_axis(df_original)\n\n # Excluding non-standard (and by standard I'm saying \"made by us\", in the simulation rounds) simulations\n (df_standard) = split_by_standard(df_original, compl_data)\n \n #Repeat everything for standartd and non-standard dataframes (our simulations and the simulations from everone in GPCRmd)\n for (stnd,df) in ((\"cmpl\", df_original), (\"stnd\", df_standard)):\n \n #If doesn't exists yet, create base input folder\n options_path = \"%scontmaps_inputs/%s/%s/%s/\" %(basepath, itype, stnd, ligandonly)\n os.makedirs(options_path, exist_ok=True)\n\n # If there are no interactions with this ligandonly-itype combination\n if df.empty:\n print(\"No interactions avalible for this molecular partners and interaction type: %s and %s\" % (ligandonly, itype) )\n return\n\n # Setting columns 'Position', 'leter+Position1' and 'leter+Position2' in df for jsons files \n df_columned = new_columns(df, itype)\n df_columned.to_pickle(options_path+\"dataframe_customflareplot.pkl\")\n\n #Dropping away Position columns, once they are not needed\n df_drop = df.drop(['Position1','Position2'], 1)\n \n # Stack matrix (one row for each interaction pair and dynamic. Colnames are position, dynid and itypes)\n df_ts = stack_matrix(df_drop, set_itypes)\n \n #Dropping away non selected-type interaction rows.\n df_drop = df_drop[df_drop['itype'] == itype]\n df_drop.drop('itype',axis=1, inplace=True)\n\n # Set position as row index of the dataframe\n df_drop = df_drop.set_index('Position') \n\n # Labels for dendogram\n dendlabels_dyns = list(df_drop.columns)\n \n # Making one-simulation flareplots. Only done in cmpl to avoid repeating same Simulations\n if stnd == \"cmpl\":\n sim_jsons_path = '%scontmaps_inputs/%s/simulation_jsons/%s/' % (basepath, itype, ligandonly)\n dyn_flareplots(df_columned, sim_jsons_path, dendlabels_dyns, itype, flare_template)\n\n #Computing frequency matrix\n dend_matrix = frequencies(df_drop)\n (recept_info,recept_info_order,df_ts,dyn_gpcr_pdb,index_dict)=improve_receptor_names(df_ts,compl_data)\n \n # Apending column with PDB ids\n pdb_id = recept_info_order['pdb_id']\n df_ts['pdb_id'] = df_ts['Id'].apply(lambda x: recept_info[x][pdb_id])\n \n #Storing dataframe with results in a CSV file, downloadable from web\n create_csvfile(options_path, recept_info,df_drop)\n\n # Add residue types to dataframe\n df_ts = add_restypes(df_ts, compl_data, recept_info, recept_info_order)\n\n #Preparing dendrogram folders and parameters\n dendfolder = options_path + \"dendrograms/\" \n os.makedirs(dendfolder, exist_ok = True)\n dend_height = int( int(df.shape[1]) * 18.5)\n dend_width = 450\n\n # Computing several dendrograms and corresponding json files\n #for cluster in [2]:# DEBUG\n for cluster in list(range(2,21)):\n print(' computing dendrogram with '+str(cluster)+' clusters')\n dendfile = (\"%s%iclusters_dendrogram.html\" % (dendfolder, cluster))\n (dyn_dend_order, clustdict) = dendrogram_clustering(dend_matrix, dendlabels_dyns, dend_height, dend_width, dendfile, cluster, recept_info, recept_info_order)\n # Write dynamicID-cluster dictionary on a json\n clustdir = \"%sflarejsons/%sclusters/\" % (options_path, cluster)\n os.makedirs(clustdir, exist_ok= True)\n with open(clustdir + \"clustdict.json\", 'w') as clusdictfile:\n dump(clustdict, clusdictfile, ensure_ascii=False, indent = 4)\n\n #Jsons for the flareplots of this combinations of clusters\n flareplot_json(df_columned, clustdict, clustdir, flare_template)\n \n #Store Simulation names and dyn on file\n create_dyntoname_file(dyn_dend_order, recept_info, recept_info_order, options_path)\n \n for rev in [\"norev\",\"rev\"]:\n #for rev in [\"norev\"]:# DEBUG\n # If rev option is setted to rev, duplicate all lines with the reversed-position version \n #(4x32-2x54 duplicates to 2x54-4x32)\n if rev == \"rev\":\n df_ts_rev = reverse_positions(df_ts)\n else:\n df_ts_rev = df_ts\n \n df_ts_rev = sort_simulations(df_ts_rev, dyn_dend_order)\n\n #Taking some variables for dataframe slicing\n max_columns = 45\n pairs_number = df_drop.shape[0]\n inter_number = df_ts_rev.shape[0]\n inter_per_pair = (inter_number/pairs_number)/2 if rev == \"rev\" else inter_number/pairs_number \n number_heatmaps = ceil((inter_number/inter_per_pair)/max_columns)\n \n #Create heatmap folder if not yet exists\n heatmap_path_jupyter = settings.MEDIA_ROOT + \"Precomputed/get_contacts_files/contmaps_inputs/%s/%s/%s/heatmaps/%s/\" % (itype,stnd,ligandonly,rev)\n heatmap_path = \"%sheatmaps/%s/\" % (options_path,rev)\n os.makedirs(heatmap_path, exist_ok=True)\n\n #Saving dataframe for future uses in customized heatmaps\n df_ts_rev.to_pickle(heatmap_path+\"dataframe_for_customized.pkl\")\n \n #Make heatmaps each 50 interacting pairs\n div_list = []\n heatmap_filename_list = []\n number_heatmap_list = []\n prev_slicepoint = 0\n for i in range(1,number_heatmaps+1):\n number_heatmap_list.append(str(i))\n\n #Slice dataframe. Also definig heigth and width of the heatmap\n slicepoint = int(i*inter_per_pair*max_columns)\n if i == number_heatmaps:\n df_slided = df_ts_rev[prev_slicepoint:]\n else:\n df_slided = df_ts_rev[prev_slicepoint:slicepoint]\n w = int(df_slided.shape[0]/inter_per_pair*20+40)\n prev_slicepoint = slicepoint\n h=dend_height\n\n # Define bokeh figure and hovertool\n \n hover = create_hovertool(itype, itypes_order, hb_itypes, typelist)\n mysource,p = define_figure(w, h, df_slided, hover, itype)\n\n # Creating javascript for side-window\n mysource = select_tool_callback(recept_info, recept_info_order, dyn_gpcr_pdb, itype, typelist, mysource)\n\n # Extract bokeh plot components and store them in lists\n script, div = components(p)\n div_list.append(div.lstrip())\n heatmap_filename = \"%s%iheatmap.html\" % (heatmap_path_jupyter,i)\n heatmap_filename_list.append(heatmap_filename)\n\n # Write heatmap on file\n heatmap_filename = \"%s%iheatmap.html\" % (heatmap_path,i)\n with open(heatmap_filename, 'w') as heatmap:\n heatmap.write(script)\n\n # Write lists as python variables in a python file\n variables_file = \"%svariables.py\" % (heatmap_path)\n with open(variables_file, 'w') as varfile:\n varfile.write(\"div_list = [\\'%s\\']\\n\" % \"\\',\\'\".join(div_list))\n varfile.write(\"heatmap_filename_list = [\\'%s\\']\\n\" % \"\\',\\'\".join(heatmap_filename_list))\n varfile.write(\"number_heatmaps_list = [\\'%s\\']\\n\" % \"\\',\\'\".join(number_heatmap_list))", "def make_joint_pdf(self, benchmark) :\n \n #distortion case 1 -- taxes/subsidy uncorrelated with firm size or benchmark case where no tax/subsidy at all\n if self.distortion_case == 1 or benchmark == 1 : \n self.joint_pdf = self.prod_pdf_matrix * self.policy_pdf \n \n #distortion case 2 -- tax/subsidy negatively correlated with firm size, subsidize only fraction self.subsidy_frac of lowest prod plants\n if self.distortion_case == 2:\n \n self.joint_pdf = np.zeros((self.Ns,self.ntau))\n prod_cdf = np.cumsum(self.prod_pdf) # cdf over the idiosyncratic draws of s\n I=np.where(prod_cdf <= self.subsidy_frac)\n self.joint_pdf[I,0]=self.prod_pdf[I] #take the lower part of the pdf over idiosyncratic draws of s\n \n #if there is excempt firms\n if self.excempt_frac>0:\n #take the indices of pdf for s for the interval sub and sub+nosub. \n I=np.where((prod_cdf > self.subsidy_frac) & (prod_cdf <= self.subsidy_frac + self.excempt_frac))\n self.joint_pdf[I,1] = self.prod_pdf[I]\n \n J=np.where(prod_cdf > self.excempt_frac + self.subsidy_frac)\n self.joint_pdf[J,2]=self.prod_pdf[J]\n \n \n #distortion case 3 -- tax/subsidy positively correlated with firm size, subsidize only fraction self.subsidy_frac of highest prod plants\n elif self.distortion_case == 3:\n \n self.joint_pdf = np.zeros((self.Ns,self.ntau))\n prod_cdf = np.cumsum(self.prod_pdf) # cdf over the idiosyncratic draws of s\n I=np.where(prod_cdf <= 1-self.subsidy_frac - self.excempt_frac)\n self.joint_pdf[I,2]=self.prod_pdf[I] #take the lower part of the pdf over idiosyncratic draws of s to tax\n \n #if there is excempt firms\n if self.excempt_frac>0:\n #take the indices of pdf for s for the interval sub and sub+nosub. \n I = np.where((prod_cdf > 1 - self.subsidy_frac - self.excempt_frac) & (prod_cdf <= 1 - self.subsidy_frac))\n self.joint_pdf [I,1] = self.prod_pdf[I]\n \n J=np.where(prod_cdf > 1 - self.subsidy_frac)\n self.joint_pdf[J,0] = self.prod_pdf[J]", "def make_N4255_plots(data_obj, aspect_corr=1.0, title_pages=False):\n\n print(\"Generating plots...\")\n\n #Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n #Call the function to create the title page of the pdf document\n plot_front_title(data_obj)\n\n #-----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n #-----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n #-----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n #-----------------------------------------------------------------------------------#\n\n #Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 1, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the first test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the third test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 3, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] #Make sure the local yi is updated\n\n #Plot the overall results text of the fourth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 4, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the fifth test, NEQ Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 5, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) #Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n #Plot the overall results text of the sixth test, Flatness of field\n yi = yi - dy[0]\n plot_ff_text(data_obj, 6, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the seventh test, Image Extent\n yi = yi - dy[0]\n plot_extent_text(data_obj, 7, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the eighth test, Image Area\n yi = yi - dy[0]\n plot_area_text(data_obj, 8, yi, xpos, ha, va, fs, dfs)\n\n #Plot the overall results text of the ninth test, Aspect Ratio\n yi = yi - dy[0]\n plot_a_ratio_text(data_obj, 9, yi, xpos, ha, va, fs, dfs)\n\n #--------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n #--------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs)\n\n\n #-----------------#\n # Plot the images #\n #-----------------#\n plot_images(data_obj, fs) #Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) #Add in the footnotes to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Penetration\")\n\n #Call the function to plot the Steel Penetration results to the pdf\n plot_steel_pen_N4255(data_obj, 1)\n\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 2, cmap)\n\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 3)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 4)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 5)\n\n #-------------------#\n # Flatness of field #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Flatness of Field\")\n\n # Call the function to plot the Flatness of Field results to the pdf\n plot_field_flatness(data_obj, 6)\n\n #--------------#\n # Image extent #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 7: Image Extent\")\n\n # Call the function to plot the Image Extent results to the pdf\n plot_image_extent(data_obj, 7)\n\n\n #------------#\n # Image Area #\n #------------#\n if title_pages:\n fig = new_pdf_page(data_obj.pdf_obj)\n plt.axis('off')\n plt.text(0.5, 0.5, 'Test 8: Image Area', ha='center', va='center', fontsize=20)\n str1 = str(data_obj.image_area[0]) + ' by ' + str(data_obj.image_area[1]) + ' pixels'\n plt.text(0.5, 0.4, str1, ha='center', va='center', fontsize=12)\n\n #--------------#\n # Aspect Ratio #\n #--------------#\n if title_pages:\n new_title_page(data_obj, \"Test 9: Aspect Ratio\")\n\n #Call the function to plot the Aspect Ratio results to the pdf\n plot_aspect_ratio(data_obj, 9, cmap, aspect_corr)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def make_F792_plots(data_obj, title_pages=False):\n\n print(\"Generating plots...\")\n\n # Create color maps\n cmap = plt.get_cmap('jet')\n cmap = plt.get_cmap('gray')\n\n # Call the\n plot_front_title(data_obj)\n\n # -----------------------------------------------------------------------#\n # Initialize the position variables for the text and graphs on the pdf. #\n # -----------------------------------------------------------------------#\n y0 = 0.9\n dy = [0.03, 0.025]\n\n ha = 'left'\n va = 'center'\n fs = 10\n dfs = 2\n\n # metric name value unc min\n xpos = [0.0, 0.4, 0.5, 0.75]\n yi = y0 - 0.1 # The position of the text on the y access, which is constantly updated as more text is added\n\n # -----------------------------------------------------------------------------------#\n # Plot the 'summary' page listing all the tests and the overall results - TEXT ONLY #\n # -----------------------------------------------------------------------------------#\n\n # Create the title of the page\n plot_overall_text(data_obj, yi, xpos, ha, va, fs)\n\n #Plot the overall results text of the first test, Steel Differentiation\n\n\n # Plot the overall results text of the second test, Penetration\n yi = yi - dy[0]\n plot_pen_text(data_obj, 2, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the third test, Organic Material Detection\n yi = yi - dy[0]\n plot_BSNR_text(data_obj, 3, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the fourth test, Spatial Resolution\n yi = yi - dy[0]\n plot_spatial_text(data_obj, 4, yi, yi - dy[1], xpos, ha, va, fs, dfs)\n yi = yi - dy[1] # Make sure the local yi is updated\n\n # Plot the overall results text of the fifth test, Dynamic Range\n yi = yi - dy[0]\n plot_dyn_text(data_obj, 5, yi, xpos, ha, va, fs, dfs)\n\n # Plot the overall results text of the sixth test, Noise\n yi = yi - dy[0]\n plot_noise_text(data_obj, 6, yi, dy, xpos, ha, va, fs, dfs)\n yi = yi - (dy[1] * 2) # Make sure to update yi, as it was only locally changed in 'plot_noise_text()'\n\n # --------------------------------------------------#\n # Plot the footnotes for the overall results page. #\n # --------------------------------------------------#\n plot_overall_footnotes(xpos, ha, va, fs, dfs, standard=\"ASTM F792\")\n\n\n #---------------------------------------------------------#\n # Plot the cropped and rotated images from the processing #\n #---------------------------------------------------------#\n plot_images(data_obj, fs) # Plot the images to the pdf\n\n plot_image_footnotes(data_obj, xpos, ha, va, fs, dfs) # Add in the footnotes to the pdf\n\n # NOTE: Above image plotting the same, with the same footnotes, for F792???\n\n #-----------------------------#\n # Steel differentiation plots #\n #-----------------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 1: Steel Differentiation\")\n\n #Call the function to plot the Steel Differentiation results to the pdf\n\n\n #-------------------#\n # Penetration plots #\n #-------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 2: Penetration\")\n\n # Call the function to plot the Steel Penetration results to the pdf\n #plot_steel_pen(data_obj, 2)\n\n #------------#\n # BSNR plots #\n #------------#\n if title_pages:\n new_title_page(data_obj, \"Test 3: Organic Material Detection\")\n\n # Call the function to plot the Organic Material Detection results to the pdf\n plot_BSNR(data_obj, 3, cmap)\n\n #--------------------#\n # Spatial Resolution #\n #--------------------#\n if title_pages:\n new_title_page(data_obj, \"Test 4: Spatial Resolution\")\n\n # Call the function to plot the Spatial Resolution results to the pdf\n plot_spatial_res(data_obj, 4)\n\n #---------------#\n # Dynamic Range #\n #---------------#\n if title_pages:\n new_title_page(data_obj, \"Test 5: Dynamic Range\")\n\n # Call the function to plot the Dynamic Range results to the pdf\n plot_dynamic_range(data_obj, 5)\n\n #-------#\n # Noise #\n #-------#\n if title_pages:\n new_title_page(data_obj, \"Test 6: Noise (NEQ)\")\n\n # Call the function to plot the Noise (NEQ) results to the pdf\n plot_noise(data_obj, 6)\n\n fig = new_pdf_page(data_obj.pdf_obj, open_fig=False)", "def plot_kde_per_sample(\n per_base_coverage_by_sample, sample_labels, sample_color_dict, region_dict\n):\n\n fig = ff.create_distplot(\n per_base_coverage_by_sample,\n sample_labels,\n # bin_size = .1,\n # curve_type = \"normal\",\n colors=[sample_color_dict[x] for x in sample_labels],\n show_rug=False,\n show_hist=False,\n )\n\n fig.update_layout(\n title_text=\"Density Coverage Distribution for {}:{}-{}\".format(\n region_dict[\"chrom\"], region_dict[\"start\"], region_dict[\"end\"]\n )\n )\n fig.update_layout(legend_title_text=\"Samples\")\n fig.update_layout(xaxis_title=\"Coverage\")\n fig.update_layout(yaxis_title=\"Density\")\n return fig.to_html(full_html=False, include_plotlyjs=\"cdn\")", "def construct_plot( snps_per_chr_in, indels_per_chr_in, result_file, result_table, resolution ):\n\t\n\t# --- conversion of data into lists of lists --- #\n\tchr_lengths = []\n\tchr_names = []\n\tsnps_per_chr = []\n\tindels_per_chr = []\n\tfor key in sorted( snps_per_chr_in.keys() ):\n\t\tsnps = snps_per_chr_in[ key ]\n\t\tindels = indels_per_chr_in[ key ]\n\t\tsnps_per_chr.append( snps )\n\t\tindels_per_chr.append( indels )\n\t\tchr_lengths.append( max( snps+indels ) )\n\t\tchr_names.append( key )\n\t\n\tmax_x_value = max( [ x for chro in snps_per_chr for x in chro ] + [ x for chro in indels_per_chr for x in chro ] )\n\t\n\t# --- generation of figure --- #\n\tfig, ax = plt.subplots()\n\t\n\tsnp_scale = 1\n\tindel_scale = 1\n\tsnp_data = []\n\tindel_data = []\n\tfor idx, chr_length in enumerate( chr_lengths ):\n\t\tmax_snp, max_indel, snp_temp, indel_temp = generate_binned_values( 0, resolution+0, chr_length, snps_per_chr[ idx ], indels_per_chr[ idx ], resolution )\n\t\tsnp_data.append( snp_temp )\n\t\tindel_data.append( indel_temp )\n\t\tsnp_scale = max( [ snp_scale, max_snp ] )\n\t\tindel_scale = max( [ indel_scale, max_indel ] )\n\t\n\tsnp_scale = float( snp_scale )\n\tindel_scale = float( indel_scale )\n\ty_max = len( chr_lengths )\n\t\n\tax2 = ax.twinx()\n\t\n\twith open( result_table, \"w\" ) as out:\n\t\tfor idx, chr_length in enumerate( chr_lengths ):\n\t\t\ty = y_max-( idx*1.2 )\n\t\t\tx = resolution / 1000000.0\n\t\t\t\n\t\t\tax.text( ( chr_length/ 1000000.0 ), y+0.3, chr_names[ idx ], ha=\"right\" )\n\t\t\t\n\t\t\t# --- plotting SNP and InDel distribution --- #\n\t\t\tfor i, snps in enumerate( snp_data[ idx ] ):\n\t\t\t\tindels = indel_data[ idx ][ i ]\n\t\t\t\t\n\t\t\t\tax.plot( [ x*i+0.5*x, x*i+0.5*x ], [ y, y+ ( snps / snp_scale ) ], \"-\", color=\"lime\" )\n\t\t\t\tax2.plot( [ x*i+0.5*x, x*i+0.5*x ], [ y, y+ ( indels / indel_scale ) ], \"-\", color=\"magenta\" )\n\t\t\t\n\t\t\tax.plot( [ 0, 0 ], [ y, y+1 ], color=\"black\" )\n\t\t\tax.text( 0, y+1, str( int( snp_scale ) ), ha=\"right\", fontsize=5 )\n\t\t\tax.text( 0, y+0.5, str( int( snp_scale / 2 ) ), ha=\"right\", fontsize=5 )\n\t\t\tax.text( 0, y, \"0\", ha=\"right\", fontsize=5 )\n\t\t\t\n\t\t\tax.plot( [ max_x_value, max_x_value ], [ y, y+1 ], color=\"black\" )\n\t\t\tax.text( max_x_value, y+1, str( int( indel_scale ) ), ha=\"right\", fontsize=5 )\n\t\t\tax.text( max_x_value, y+0.5, str( int( indel_scale / 2 ) ), ha=\"right\", fontsize=5 )\n\t\t\tax.text( max_x_value, y, \"0\", ha=\"right\", fontsize=5 )\n\t\t\t\n\t\t\t# --- writing data into output table --- #\n\t\t\tout.write( 'Chr' + str( idx+1 ) + \"SNVs:\\t\" + '\\t'.join( map( str, snp_data ) ) + '\\n' )\n\t\t\tout.write( 'Chr' + str( idx+1 ) + \"InDels:\\t\" + '\\t'.join( map( str, indel_data ) ) + '\\n' )\n\t\n\tax.set_xlabel( \"genomic position [ Mbp ]\" )\n\tax.set_ylabel( \"number of SNVs per interval\" )\n\tax2.set_ylabel( \"number of InDels per interval\" )\n\t\n\tax.set_xlim( 0, max( chr_lengths ) / 1000000.0 )\t\n\t\n\tax.legend( handles=[ mpatches.Patch(color='magenta', label='InDels'), mpatches.Patch(color='lime', label='SNVs') ], prop={'size':10} )\n\t\n\tax.spines['top'].set_visible(False)\n\tax.spines['right'].set_visible(False)\n\tax.spines['left'].set_visible(False)\n\t\n\tax.get_yaxis().set_ticks([])\n\tax2.get_yaxis().set_ticks([])\n\tax.yaxis.labelpad = 15\n\tax2.yaxis.labelpad = 15\n\t\n\t\n\tplt.subplots_adjust( left=0.1, right=0.9, top=0.99, bottom=0.1 )\n\tfig.savefig( result_file, dpi=600 )\n\t\n\tplt.close('all')", "def startrek_starships_specs():\n pdf = pd.DataFrame({\n 'uid': [\n 'NCC-1701',\n 'NCC-74656',\n 'NCC-1031',\n 'NCC-1764'\n ],\n 'warp': [\n 9.2,\n 9.975,\n 9.9,\n 9.2\n ]\n })\n return pdf", "def cmd_makecldf(self, args):\n wl = lingpy.Wordlist(self.raw_dir.joinpath(\"GEM-CNL.csv\").as_posix())\n concepts = args.writer.add_concepts(\n id_factory=lambda x: x.id.split(\"-\")[-1] + \"_\" + slug(x.english), lookup_factory=\"Name\"\n )\n for concept in self.conceptlists[0].concepts.values():\n for cis in concept.attributes[\"lexibank_gloss\"]:\n if cis not in concepts:\n concepts[cis] = concepts[concept.english]\n\n languages = args.writer.add_languages(lookup_factory=\"STEDT_Name\")\n args.writer.add_sources()\n\n for idx, language, concept, value, pos in wl.iter_rows(\n \"doculect\", \"concept\", \"reflex\", \"gfn\"\n ):\n # Fix for 251479\n if concept == \"top (i.e. highest point\":\n concept = \"top (i.e. highest point)\"\n\n if concept not in concepts:\n args.log.warning(concept)\n else:\n args.writer.add_forms_from_value(\n Language_ID=languages[language],\n Parameter_ID=concepts[concept],\n Value=value,\n Source=[\"Marrison1967\"],\n )", "def pcf2_iso_histo(data_location='../../fake_DATA/DATOS/data_500.dat',rand_location='../../fake_DATA/DATOS/rand0_500.dat', d_max=180.0, bins_number=30):\n \n data = np.loadtxt(fname=data_location, delimiter=\" \", usecols=(0,1,2))\n rand0 = np.loadtxt(fname=rand_location, delimiter=\" \", usecols=(0,1,2))\n \n if not data.shape == rand0.shape:\n raise Exception(\"The data file and rand file do not have the same size\")\n #351 s\n\n #Pure histograms\n start = time.perf_counter()\n print('start DDD distances')\n triangle_points = np.array(list(combinations(data,3)))\n r_12 = triangle_points[:,0,:]-triangle_points[:,1,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = triangle_points[:,1,:]-triangle_points[:,2,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = triangle_points[:,2,:]-triangle_points[:,0,:]\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n\n DDD, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n \n end = time.perf_counter()\n print(f'Finished creating the DDD histo in {end-start} s')\n\n start = time.perf_counter()\n print('start RRR distances')\n triangle_points = np.array(list(combinations(rand0,3)))\n r_12 = triangle_points[:,0,:]-triangle_points[:,1,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = triangle_points[:,1,:]-triangle_points[:,2,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = triangle_points[:,2,:]-triangle_points[:,0,:]\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n\n RRR, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n \n end = time.perf_counter()\n\n\n print(f'Finished creating the RRR histo in {end-start} s')\n\n #Mixed histogram\n start = time.perf_counter()\n print(\"Started gathering the data points pairs\")\n DD_side_points = np.array(list(combinations(data,2)))\n print(\"Finished data points pairs\")\n\n print(\"Started gathering the rand0 points pairs\")\n RR_side_points = np.array(list(combinations(rand0,2)))\n print(\"Finished rand0 points pairs\")\n\n print(\"Started loop for DDR and RRD histograms\")\n \n DDR = np.zeros((bins_number,bins_number,bins_number))\n RRD = np.zeros((bins_number,bins_number,bins_number))\n\n for data_point, rand_point in zip(data, rand0):\n ##DDR\n r_12 = rand_point-DD_side_points[:,0,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = DD_side_points[:,0,:]-DD_side_points[:,1,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = DD_side_points[:,1,:]-rand_point\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n H_DDR, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n DDR += H_DDR\n\n #RRD\n r_12 = data_point-RR_side_points[:,0,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = RR_side_points[:,0,:]-RR_side_points[:,1,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = RR_side_points[:,1,:]-data_point\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n H_RRD, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n RRD += H_RRD\n \n DDR = DDR/3\n RRD = RRD/3\n end = time.perf_counter()\n print(f'Finished the mixed histograms DDR an RRD in {end-start} s')\n\n return RRR, DDD, DDR, RRD, edges", "def plot2D_all(df, sample, sgn, pdf_key):\n\n for xvar in df.columns:\n for yvar in df.columns:\n if xvar!=yvar:\n fig, axs = plt.subplots(figsize=(15, 10))\n cax = plt.hist2d(df[xvar],df[yvar],range=[[df[xvar].min(), df[xvar].max()], [df[yvar].min(), df[yvar].max()]], bins=100,\n norm=mpl.colors.LogNorm(), cmap=plt.cm.viridis)\n\n\n if sgn==1:\n plt.title('Signal candidates ' + sample, fontsize = 25)\n\n if sgn==0:\n plt.title('Background candidates ' + sample, fontsize = 25)\n\n\n plt.xlabel(xvar, fontsize=25)\n plt.ylabel(yvar, fontsize=25)\n\n\n mpl.pyplot.colorbar()\n\n plt.legend(shadow=True,title =str(len(df))+ \" samples\")\n\n fig.tight_layout()\n plt.savefig(pdf_key,format='pdf')\n pdf_key.close()", "def estimate_object_PDFs(fluxratiodictionarylist,generatePDFplots=False,basename='NEOGALobject',AGNcol='blue',SFcol='red',verbose=True):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Loading NEOGAL models ')\n SF_models = nm.load_model('combined',filepath='/Users/kschmidt/work/catalogs/NEOGALlines/nebular_emission/')\n\n AGN_models = nm.load_model('combined',filepath='/Users/kschmidt/work/catalogs/NEOGALlines/AGN_NLR_nebular_feltre16/')\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Define all possible line ratios from the lines:\\n '\n 'NV1240, CIV1550, CIII1908, HeII1640, OIII1663, and SiIII1888')\n fluxratiodic = {} # [[SF range], [AGN range]]\n fluxratiodic['NV1240/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['NV1240/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['CIV1550/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIV1550/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['CIII1908/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['CIII1908/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['HeII1640/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/OIII1663'] = [[0,1e10],[0,1e10]]\n fluxratiodic['HeII1640/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['OIII1663/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['OIII1663/SiIII1888'] = [[0,1e10],[0,1e10]]\n\n fluxratiodic['SiIII1888/NV1240'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/CIV1550'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/CIII1908'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/HeII1640'] = [[0,1e10],[0,1e10]]\n fluxratiodic['SiIII1888/OIII1663'] = [[0,1e10],[0,1e10]]\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Set up mode line flux vectors')\n fluxdic = {} # [[SF flxu], [AGN flux]]\n fluxdic['NV1240'] = [SF_models['NV1240'], AGN_models['NV1240']]\n fluxdic['CIV1550'] = [SF_models['CIV1548']+SF_models['CIV1551'], AGN_models['CIV1548']+AGN_models['CIV1551']]\n fluxdic['CIII1908'] = [SF_models['CIII1908'], AGN_models['CIII1907']+AGN_models['CIII1910']]\n fluxdic['HeII1640'] = [SF_models['HeII1640'], AGN_models['HeII1640']]\n fluxdic['OIII1663'] = [SF_models['OIII1661']+SF_models['OIII1666'], AGN_models['OIII1661']+AGN_models['OIII1666']]\n fluxdic['SiIII1888'] = [SF_models['SiIII1888'], AGN_models['SiIII1888']]\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Get ranges of model flux ratios')\n # for FR in fluxratiodic.keys():\n # numerator = FR.split('/')[0]\n # denominator = FR.split('/')[1]\n # fluxratiodic[FR][0] = [np.min(fluxdic[numerator][0]/fluxdic[denominator][0]),\n # np.max(fluxdic[numerator][0]/fluxdic[denominator][0])]\n # fluxratiodic[FR][1] = [np.min(fluxdic[numerator][1]/fluxdic[denominator][1]),\n # np.max(fluxdic[numerator][1]/fluxdic[denominator][1])]\n\n\n Nobj = len(fluxratiodictionarylist)\n if verbose: print(' - Get model selection given flux ratio ranges according to '+\n str(Nobj)+\" object's data provided \")\n if verbose: print(' Selection based on the total number of NEOGAL models: SF='+str(len(SF_models))+' and AGN='+str(len(AGN_models )))\n\n parametercollection_SF = [{'id':0, 'Zgas':[],'logUs':[],'xid':[],'nh':[],'COCOsol':[],'mup':[]}]*Nobj\n parametercollection_AGN = [{'id':0, 'Zgas':[],'logUs':[],'xid':[],'nh':[],'alpha':[]}]*Nobj\n\n for oo, FRdic_input in enumerate(fluxratiodictionarylist):\n objid = FRdic_input['id']\n\n # ------ resetting flux ratio dictionary for object ------\n fluxratiodic_obj = {}\n for key in fluxratiodic.keys():\n fluxratiodic_obj[key] = fluxratiodic[key]\n # --------------------------------------------------------\n\n for FR in FRdic_input.keys():\n if FR in fluxratiodic.keys():\n fluxratiodic_obj[FR] = [FRdic_input[FR],FRdic_input[FR]]\n # print(str(objid)+':'+FR+' -->'+str(fluxratiodic_obj[FR]))\n elif FR == 'id':\n pass\n else:\n print(' WARNING nm.estimate_object_PDFs(): The flux ratio entry '+FR+' is not availble in the \\n'\n ' dictionary from the NEOGAL models. Define that flux \\n'\n ' ratio or correct input data.')\n\n goodent_SF = np.arange(len(SF_models))\n goodent_AGN = np.arange(len(AGN_models))\n for FR in fluxratiodic.keys():\n numerator = FR.split('/')[0]\n denominator = FR.split('/')[1]\n\n goodent_FR_SF = np.where( (fluxdic[numerator][0]/fluxdic[denominator][0] >= fluxratiodic_obj[FR][0][0]) &\n (fluxdic[numerator][0]/fluxdic[denominator][0] <= fluxratiodic_obj[FR][0][1]))[0]\n goodent_SF = np.intersect1d(goodent_SF,goodent_FR_SF)\n\n goodent_FR_AGN = np.where( (fluxdic[numerator][1]/fluxdic[denominator][1] >= fluxratiodic_obj[FR][1][0]) &\n (fluxdic[numerator][1]/fluxdic[denominator][1] <= fluxratiodic_obj[FR][1][1]))[0]\n goodent_AGN = np.intersect1d(goodent_AGN,goodent_FR_AGN)\n\n\n parametercollection_SF[oo] = {'id' : FRdic_input['id'],\n 'Zgas' : SF_models['Zgas'][goodent_SF],\n 'logUs' : SF_models['logUs'][goodent_SF],\n 'xid' : SF_models['xid'][goodent_SF],\n 'nh' : SF_models['nh'][goodent_SF],\n 'COCOsol': SF_models['COCOsol'][goodent_SF],\n 'mup' : SF_models['mup'][goodent_SF]}\n\n parametercollection_AGN[oo] = {'id' : FRdic_input['id'],\n 'Zgas' : AGN_models['Zgas'][goodent_AGN],\n 'logUs' : AGN_models['logUs'][goodent_AGN],\n 'xid' : AGN_models['xid'][goodent_AGN],\n 'nh' : AGN_models['nh'][goodent_AGN],\n 'alpha' : AGN_models['alpha'][goodent_AGN]}\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if verbose: print(' - Getting distribution ranges (percentiles) for parameter collections ')\n stat_SF = []\n stat_AGN = []\n\n for oo in np.arange(Nobj):\n stat_SF.append({'id':parametercollection_SF[oo]['id'], 'Zgas':[],'logUs':[],'xid':[],'nh':[],'COCOsol':[],'mup':[]})\n for key in stat_SF[oo].keys():\n if key == 'id': continue\n\n if len(parametercollection_SF[oo][key]) > 0:\n\n meanval_SF = np.mean(parametercollection_SF[oo][key])\n std_SF = np.std(parametercollection_SF[oo][key])\n medianval_SF = np.median(parametercollection_SF[oo][key])\n perc2p5_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.025)]\n perc16_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.16)]\n perc25_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.25)]\n perc50_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.50)]\n perc75_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.75)]\n perc84_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.84)]\n perc97p5_SF = np.sort(parametercollection_SF[oo][key])[int(len(parametercollection_SF[oo][key])*0.975)]\n\n stat_SF[oo][key] = [meanval_SF,std_SF,medianval_SF,perc2p5_SF,perc16_SF,perc25_SF,\n perc50_SF,perc75_SF,perc84_SF,perc97p5_SF]\n else:\n stat_SF[oo][key] = [np.nan]*10\n\n stat_AGN.append({'id':parametercollection_AGN[oo]['id'], 'Zgas':[],'logUs':[],'xid':[],'nh':[],'alpha':[]})\n for key in stat_AGN[oo].keys():\n if key == 'id': continue\n\n if len(parametercollection_AGN[oo][key]) > 0:\n meanval_AGN = np.mean(parametercollection_AGN[oo][key])\n std_AGN = np.std(parametercollection_AGN[oo][key])\n medianval_AGN = np.median(parametercollection_AGN[oo][key])\n perc2p5_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.025)]\n perc16_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.16)]\n perc25_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.25)]\n perc50_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.50)]\n perc75_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.75)]\n perc84_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.84)]\n perc97p5_AGN = np.sort(parametercollection_AGN[oo][key])[int(len(parametercollection_AGN[oo][key])*0.975)]\n\n stat_AGN[oo][key] = [meanval_AGN,std_AGN,medianval_AGN,perc2p5_AGN,perc16_AGN,perc25_AGN,\n perc50_AGN,perc75_AGN,perc84_AGN,perc97p5_AGN]\n else:\n stat_AGN[oo][key] = [np.nan]*10\n\n stat_idlist = [stat_AGN[oo]['id'] for oo in np.arange(len(stat_AGN))]\n parametercollection_idlist = [parametercollection_AGN[oo]['id'] for oo in np.arange(len(parametercollection_AGN))]\n\n if stat_idlist != parametercollection_idlist:\n sys.exit(' NEOGALmodels.estimate_object_PDFs(): Wait a minute... the ID lists are not identical between \\n'\n ' the parameter collection ('+str(parametercollection_idlist)+') and \\n'\n ' the stats ('+str(stat_idlist)+')')\n\n plotname = basename+'_Stats.pdf'\n nm.plot_stat(plotname,stat_SF,stat_AGN,SFcol=SFcol,AGNcol=AGNcol,verbose=verbose)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if generatePDFplots:\n if verbose: print(' - Plotting the extracted model parameter collections')\n plotname = basename+'_PDFs.pdf'\n nm.plot_modelparametercollections(plotname, parametercollection_SF, parametercollection_AGN,\n stat_SF, stat_AGN, AGNcol=AGNcol,SFcol=SFcol,\n fluxratiodictionarylist=fluxratiodictionarylist,\n verbose=verbose)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n return parametercollection_SF, parametercollection_AGN, stat_SF, stat_AGN", "def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0", "def coeff_display(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=0.,y=0.,z=0.,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=hdui.data[i][4:].reshape(npix,npix),sigma=2.)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n ind = np.arange(len(betaAll[0]))\n momname = ('M20','M22.Real','M22.imag','M31.real','M31.imag','M33.real','M33.imag')\n fmtarr = ['bo-','ro-','go-','co-','mo-','yo-','ko-']\n pl.figure(figsize=(17,13))\n for i in range(7):\n pl.subplot(7,1,i+1)\n pl.errorbar(ind,betaAll[i],yerr = betaErrAll[i],fmt=fmtarr[i])\n if i == 0:\n pl.title('x: '+str(hdu[0].header['x'])+' y: '+str(hdu[0].header['y'])+' z: '+str(hdu[0].header['z'])+' tilt: '+str(hdu[0].header['theta'])+' fwhm: '+str(hdu[0].header['s_fwhm'])+' e1: '+str(hdu[0].header['e1'])+' e2: '+str(hdu[0].header['e2']))\n pl.grid()\n pl.xlim(-1,21)\n if i ==0:\n pl.ylim(-10,65)\n elif i ==1:\n pl.ylim(-5,6)\n elif i ==2:\n pl.ylim(-5,6)\n elif i == 3:\n pl.ylim(-0.1,0.1)\n elif i == 4:\n pl.ylim(-0.1,0.1)\n elif i ==5:\n pl.ylim(-100,100)\n elif i == 6:\n pl.ylim(-100,100)\n pl.xticks(ind,('','','','','','','','','','','','','','','','','','','',''))\n pl.ylabel(momname[i])\n pl.xticks(ind,('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20'))\n pl.xlabel('Zernike Coefficients')\n return betaAll,betaErrAll", "def df_2_dict(df,band_list,lens_model_list,source_model_list,lens_light_model_list):\n \n import re\n from lenstronomy.Util.param_util import ellipticity2phi_q\n from lenstronomy.Util.param_util import shear_cartesian2polar\n \n import pandas as pd\n \n\n model_kwarg_names = get_kwarg_names(lens_model_list,source_model_list,\n lens_light_model_list,None)\n \n IDs = df.loc[:,'ID']\n chi_sq = df.loc[:,'reduced chi^2']\n \n lens_dict = {}\n \n for i,prof in enumerate(lens_model_list):\n lens_dict[prof] = {}\n for param in model_kwarg_names['kwargs_lens'][i]:\n col = df.loc[:,'{}_lens.{}'.format(prof,param)]\n col_array = col.values\n lens_dict[prof][param] = col_array\n \n if 'e1' in model_kwarg_names['kwargs_lens'][i]:\n lens_dict[prof]['q'] = np.array([])\n lens_dict[prof]['phi'] = np.array([]) \n for j in range(len(lens_dict[prof]['e1'])):\n phi,q = ellipticity2phi_q(lens_dict[prof]['e1'][j],lens_dict[prof]['e2'][j])\n lens_dict[prof]['q'] = np.append(lens_dict[prof]['q'],q)\n lens_dict[prof]['phi'] = np.append(lens_dict[prof]['phi'],phi)\n elif 'gamma1' in model_kwarg_names['kwargs_lens'][i]:\n lens_dict[prof]['gamma'] = np.array([])\n lens_dict[prof]['theta'] = np.array([])\n for j in range(len(lens_dict[prof]['gamma1'])):\n theta,gamma = shear_cartesian2polar(lens_dict[prof]['gamma1'][j],lens_dict[prof]['gamma2'][j])\n lens_dict[prof]['gamma'] = np.append(lens_dict[prof]['gamma'],gamma)\n lens_dict[prof]['theta'] = np.append(lens_dict[prof]['theta'],theta)\n \n \n source_dict = {}\n lens_light_dict = {}\n \n for i,band in enumerate(band_list):\n for j,prof in enumerate(source_model_list):\n key = '{} Band: {}'.format(band,prof)\n source_dict[key] = {}\n for param in model_kwarg_names['kwargs_source'][j]:\n col = df.loc[:,'{} Band: {}_source.{}'.format(band,prof,param)]\n col_array = col.values\n source_dict[key][param] = col_array\n \n if 'e1' in model_kwarg_names['kwargs_source'][j]:\n source_dict[key]['q'] = np.array([])\n source_dict[key]['phi'] = np.array([]) \n for k in range(len(source_dict[key]['e1'])):\n phi,q = ellipticity2phi_q(source_dict[key]['e1'][k],source_dict[key]['e2'][k])\n source_dict[key]['q'] = np.append(source_dict[key]['q'],q)\n source_dict[key]['phi'] = np.append(source_dict[key]['phi'],phi)\n \n for j,prof in enumerate(lens_light_model_list):\n key = '{} Band: {}'.format(band,prof)\n lens_light_dict[key] = {}\n for param in model_kwarg_names['kwargs_lens_light'][j]:\n col = df.loc[:,'{} Band: {}_lens_light.{}'.format(band,prof,param)]\n col_array = col.values\n lens_light_dict[key][param] = col_array\n\n if 'e1' in model_kwarg_names['kwargs_lens_light'][j]:\n lens_light_dict[key]['q'] = np.array([])\n lens_light_dict[key]['phi'] = np.array([]) \n for k in range(len(lens_light_dict[key]['e1'])):\n phi,q = ellipticity2phi_q(lens_light_dict[key]['e1'][k],lens_light_dict[key]['e2'][k])\n lens_light_dict[key]['q'] = np.append(lens_light_dict[key]['q'],q)\n lens_light_dict[key]['phi'] = np.append(lens_light_dict[key]['phi'],phi)\n \n params_dict = {'Object IDs': IDs.values,'Reduced Chi^2': chi_sq.values,\n 'lens': lens_dict, 'source': source_dict, 'lens_light': lens_light_dict}\n \n return params_dict", "def plot_data_histogram_details(\n info_dict: Dict, main_df: pd.DataFrame = main_df\n) -> Tuple:\n\n def _domain_name_gen(short_df: pd.DataFrame, dom1: str):\n for row in short_df[[\"template\", \"query\"]].itertuples():\n if row[1] != dom1:\n yield row[1]\n else:\n yield row[2]\n\n # domain1 in group 1\n dom1_inside_df = main_df[\n (\n (main_df[\"template\"] == info_dict[\"domain1\"])\n | (main_df[\"query\"] == info_dict[\"domain1\"])\n )\n & (\n (main_df[\"x_template\"] == int(info_dict[\"X\"]))\n & (main_df[\"x_query\"] == int(info_dict[\"X\"]))\n )\n ]\n # domain 2 in group 2\n dom2_inside_df = main_df[\n (\n (main_df[\"template\"] == info_dict[\"domain2\"])\n | (main_df[\"query\"] == info_dict[\"domain2\"])\n )\n & (\n (main_df[\"x_template\"] == int(info_dict[\"Y\"]))\n & (main_df[\"x_query\"] == int(info_dict[\"Y\"]))\n )\n ]\n if info_dict[\"swapFlag\"]:\n # domain 1 in group 2 Swap\n dom1_outside_df = main_df[\n (\n (main_df[\"template\"] == info_dict[\"domain1\"])\n | (main_df[\"query\"] == info_dict[\"domain1\"])\n )\n & (\n (main_df[\"x_template\"] == int(info_dict[\"Y\"]))\n & (main_df[\"x_query\"] == int(info_dict[\"X\"]))\n )\n ]\n # domain 2 in group 1 Swap\n dom2_outside_df = main_df[\n (\n (main_df[\"template\"] == info_dict[\"domain2\"])\n | (main_df[\"query\"] == info_dict[\"domain2\"])\n )\n & (\n (main_df[\"x_template\"] == int(info_dict[\"Y\"]))\n & (main_df[\"x_query\"] == int(info_dict[\"X\"]))\n )\n ]\n else:\n dom1_outside_df = main_df[\n (\n (main_df[\"template\"] == info_dict[\"domain1\"])\n | (main_df[\"query\"] == info_dict[\"domain1\"])\n )\n & (\n (main_df[\"x_template\"] == int(info_dict[\"X\"]))\n & (main_df[\"x_query\"] == int(info_dict[\"Y\"]))\n )\n ]\n dom2_outside_df = main_df[\n (\n (main_df[\"template\"] == info_dict[\"domain2\"])\n | (main_df[\"query\"] == info_dict[\"domain2\"])\n )\n & (\n (main_df[\"x_template\"] == int(info_dict[\"X\"]))\n & (main_df[\"x_query\"] == int(info_dict[\"Y\"]))\n )\n ]\n # plot data frames\n df_plot1 = pd.DataFrame(\n {\n \"prob\": dom1_inside_df[\"prob\"].values,\n \"type\": [f\"In Group: ({info_dict['X']}) {x_group_label(info_dict['X'])}\"]\n * len(dom1_inside_df),\n \"domain\": list(_domain_name_gen(dom1_inside_df, info_dict[\"domain1\"])),\n \"in_flag\": [True] * len(dom1_inside_df),\n }\n )\n df_plot1 = df_plot1.append(\n pd.DataFrame(\n {\n \"prob\": dom1_outside_df[\"prob\"].values,\n \"type\": [\n f\"Out Group: ({info_dict['Y']}) {x_group_label(info_dict['Y'])}\"\n ]\n * len(dom1_outside_df),\n \"domain\": _domain_name_gen(dom1_outside_df, info_dict[\"domain1\"]),\n \"in_flag\": [False] * len(dom1_outside_df),\n }\n )\n ).reset_index(drop=True)\n # plot data frames 2\n df_plot2 = pd.DataFrame(\n {\n \"prob\": dom2_inside_df[\"prob\"].values,\n \"type\": [f\"In Group: ({info_dict['Y']}) {x_group_label(info_dict['Y'])}\"]\n * len(dom2_inside_df),\n \"domain\": list(_domain_name_gen(dom2_inside_df, info_dict[\"domain2\"])),\n \"in_flag\": [True] * len(dom2_inside_df),\n }\n )\n df_plot2 = df_plot2.append(\n pd.DataFrame(\n {\n \"prob\": dom2_outside_df[\"prob\"].values,\n \"type\": [\n f\"Out Group: ({info_dict['X']}) {x_group_label(info_dict['X'])}\"\n ]\n * len(dom2_outside_df),\n \"domain\": _domain_name_gen(dom2_outside_df, info_dict[\"domain2\"]),\n \"in_flag\": [False] * len(dom2_outside_df),\n }\n )\n ).reset_index(drop=True)\n return df_plot1, df_plot2", "def make_jpdf_plot(x_data,\n y_data,\n x_label,\n y_label, \n axis=\"\", \n title=\"\"):\n \n xmin = 0.\n ymax = 0.\n ymin = 0.\n ymax = 0.\n if axis == \"\":\n xmin = x_data.min()\n xmax = x_data.max()\n ymin = y_data.min()\n ymax = y_data.max()\n axis = [xmin,xmax,ymin,ymax]\n else:\n xmin = axis[0]\n xmax = axis[1]\n ymin = axis[2]\n ymax = axis[3]\n\n # prepare data for jpdf plot\n X, Y = np.mgrid[xmin:xmax:100j,ymin:ymax:100j]\n positions = np.vstack([X.ravel(), Y.ravel()])\n values = np.vstack([x_data,y_data])\n kernel = scipy.stats.gaussian_kde(values)\n Z = np.reshape(kernel(positions).T, X.shape)\n \n \n plt.figure()\n plt.pcolor(X,Y,Z)\n plt.plot(x_data, y_data, 'k.', markersize=3)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.axis([xmin, xmax,ymin,ymax])\n if not title == \"\":\n plt.title(title)\n #plt.set_ylim([ymin, ymax])\n cb = plt.colorbar()\n cb.set_label(\"probability density\")\n plt.show()", "def main():\n \n \"\"\" Download and load data\"\"\"\n dfs = get_data()\n \n \"\"\" Preprocess data, combine rows for country provinces\"\"\"\n combine_list = [\"Australia\", \"US\", \"Canada\", \"Mainland China\", \"China\"]\n for key in dfs.keys():\n dfs[key] = preprocess(df=dfs[key], combine_list=combine_list)\n \n \"\"\" Compute additional variables\"\"\"\n dfs = compute_deaths_over_closed(dfs)\n dfs = compute_active_cases(dfs)\n dfs = compute_death_rate(dfs)\n dfs = compute_df_reindexed(dfs, \"active_cases\")\n dfs = compute_df_reindexed(dfs, \"death_rate\")\n \n \"\"\"Remove 0 and 1 from rate variables\"\"\"\n for keys in [\"death_rate\", \"death_rate_reindexed\", \"deaths_over_closed\"]:\n dfs[keys] = remove_corner_values(dfs[keys])\n \n \"\"\" Set parameters for plotting\"\"\"\n titles = {\"active_cases\": \"COVID-19 Active Cases\", \"active_cases_reindexed\": \"COVID-19 Active Cases (Days from the Start of the Outbreak)\", \"deaths_over_closed\": \"COVID-19 Deaths over (Deaths + Recovered)\", \"death_rate\": \"COVID-19 Death Rate\", \"death_rate_reindexed\": \"COVID-19 Death Rate (Days from the Start of the Outbreak)\"}\n filenames = {\"active_cases\": \"covid19_active.png\", \"active_cases_reindexed\": \"covid19_active_ri.png\", \"deaths_over_closed\": \"covid19_death_over_closed.png\", \"death_rate\": \"covid19_death_rate.png\", \"death_rate_reindexed\": \"covid19_death_rate_ri.png\"}\n row_inclusion_index_threasholds = {\"active_cases\": 770, \"active_cases_reindexed\": 500, \"deaths_over_closed\": 770, \"death_rate\": 770, \"death_rate_reindexed\": 500}\n row_inclusion_indices = {}\n #row_inclusion_indices.get(x) is None:\n # row_inclusion_indices = dfs[\"cases\"].iloc[:,-1] > x\n\n \"\"\" Plot\"\"\"\n for key in row_inclusion_index_threasholds.keys():\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-1] > row_inclusion_index_threasholds[key]\n if key in [\"active_cases_reindexed\", \"death_rate_reindexed\"]:\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-5] > row_inclusion_index_threasholds[key]\n plot(dfs[key], row_inclusion_indices.get(key), titles[key], filenames[key])", "def _cont_calcs(srs: Srs, cfg: Config) -> Dict[str, List[Any]]:\n\n data: Dict[str, List[Any]] = {}\n\n # drop infinite values\n mask = srs.apply(\"isin\", {np.inf, -np.inf})\n srs = Srs(srs.getmask(mask, inverse=True), agg=True)\n min_max = srs.apply(\n \"map_partitions\", lambda x: pd.Series([x.max(), x.min()]), meta=pd.Series([], dtype=float)\n ).data\n min_max_comp = []\n if cfg.diff.density:\n for min_max_value in dask.compute(min_max)[0]:\n min_max_comp.append(math.isclose(min_max_value.min(), min_max_value.max()))\n min_max = dd.concat(min_max).repartition(npartitions=1)\n\n # histogram\n data[\"hist\"] = srs.self_map(\n da.histogram, bins=cfg.hist.bins, range=(min_max.min(), min_max.max())\n )\n\n # compute the density histogram\n if cfg.diff.density:\n data[\"dens\"] = srs.self_map(\n da.histogram,\n condition=min_max_comp,\n bins=cfg.kde.bins,\n range=(min_max.min(), min_max.max()),\n density=True,\n )\n # gaussian kernel density estimate\n data[\"kde\"] = []\n sample_data = dask.compute(\n srs.apply(\n \"map_partitions\",\n lambda x: x.sample(min(1000, x.shape[0])),\n meta=pd.Series([], dtype=float),\n ).data\n )\n for ind in range(len(sample_data[0])):\n data[\"kde\"].append(gaussian_kde(sample_data[0][ind]))\n\n return data", "def cdf(data, args):\n return Plot._dist(data, args)", "def cloudy_table_scatter(x_index='lognHs',y_index='lognSFRs',**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n lookup_table = cloudy_library._restore_lookup_table()\n lookup_table['lognSFRs'] = np.round(lookup_table['lognSFRs']*10.)/10.\n\n fig,ax = plt.subplots(figsize=(8,5))\n\n key_const1 = list(p.keep_const.keys())[0]\n value_const1 = list(p.keep_const.values())[0]\n print('%s table values:' % key_const1)\n print(np.unique(lookup_table[key_const1]))\n print('kept fixed at %f' % value_const1)\n lookup_table_cut = lookup_table[(lookup_table[key_const1] == value_const1)]\n\n try: \n key_const2 = list(p.keep_const.keys())[1]\n value_const2 = list(p.keep_const.values())[1]\n print('%s table values:' % key_const2)\n print(np.unique(lookup_table[key_const2]))\n print('kept fixed at %f' % value_const2)\n lookup_table_cut = lookup_table_cut[(lookup_table_cut[key_const2] == value_const2)]\n print('2 fixed parameters')\n except:\n pass\n try: \n key_const3 = list(p.keep_const.keys())[2]\n value_const3 = list(p.keep_const.values())[2]\n print('%s table values:' % key_const3)\n print(np.unique(lookup_table[key_const3]))\n print('kept fixed at %f' % value_const3)\n lookup_table_cut = lookup_table_cut[(lookup_table_cut[key_const3] == value_const3)]\n print('3 fixed parameters')\n except:\n pass\n try: \n key_const4 = list(p.keep_const.keys())[3]\n value_const4 = list(p.keep_const.values())[3]\n print('%s table values:' % key_const4)\n print(np.unique(lookup_table[key_const4]))\n print('kept fixed at %f' % value_const4)\n lookup_table_cut = lookup_table_cut[(lookup_table_cut[key_const4] == value_const4)]\n print('4 fixed parameters')\n except:\n pass\n\n x, y = lookup_table_cut[x_index].values, lookup_table_cut[y_index].values\n print(x.min(),x.max())\n\n if p.line == '[CII]158_CO(1-0)':\n line_lum = 10.**lookup_table_cut['[CII]158'].values / 10.**lookup_table_cut['CO(1-0)'].values\n line_lum = np.log10(line_lum)\n if p.line == 'alpha_CO':\n line_lum = 1e4 / aux.Lsun_to_K_km_s_pc2(10.**lookup_table_cut['CO(1-0)'].values,'CO(1-0)') \n try:\n line_lum = lookup_table_cut[p.line].values\n except:\n pass\n\n lum = line_lum\n vmin = np.min(lum)\n vmax = np.max(lum)\n if p.ylim:\n vmin = p.ylim[0]\n vmax = p.ylim[1]\n #lum[lum < vmin] = vmin\n #lum[lum > vmax] = vmax\n if p.log: \n lum = np.log10(lum)\n vmin,vmax = np.log10(vmin),np.log10(vmax)\n\n print('Highest and lowest value to be mapped:', np.min(lum), np.max(lum))\n print(vmin,vmax)\n print(p.zlim)\n\n sc = ax.scatter(x, lum, marker='o', c=y, cmap=\"jet\", alpha=0.8)\n\n translate_labels = {'lognHs':'lnH','logNHs':'lNH','logFUVs':'lG0','logZs':'lZ','lognSFRs':'lSFR_density'}\n plt.colorbar(sc,label=getlabel(translate_labels[y_index]))\n ax.set_xlabel(getlabel(translate_labels[x_index]))\n ax.set_ylabel('\\n\\n' + p.line)\n plt.tight_layout()\n \n if p.ylim: ax.set_ylim(p.ylim)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_table%s_%s_scatter.png' % (p.grid_ext,p.line), format='png', dpi=300)", "def scatterplot_coverage(budgets=['180', '300', '600', 'default'], tools=['evosuite', 'randoop']):\n for budget in budgets:\n for tool in tools:\n # reading predictions\n frame_path = 'data/{}/{}-predictions-{}.csv'.format(budget, tool, budget)\n frame = pd.read_csv(frame_path)\n label = 'BranchCoverage' if 'BranchCoverage' in frame.columns else 'branch_coverage'\n frame = frame.sort_values(by=[label], ascending=True)\n predictions = list(frame['prediction'])\n oracles = list(frame[label])\n\n trace0 = go.Scatter(\n y=predictions,\n x=[i for i in range(0, len(predictions))],\n name='Predicted',\n mode='markers',\n marker=dict(size=5)\n )\n\n trace1 = go.Scatter(\n y=oracles,\n x=[i for i in range(0, len(oracles))],\n name='Oracle',\n mode='markers',\n marker=dict(size=5)\n )\n\n data = [trace0, trace1]\n\n layout = dict(yaxis=dict(zeroline=False,\n title='coverage',\n size=16,\n color='black'),\n xaxis=dict(zeroline=False,\n size=16,\n title='classes',\n color='black'),\n autosize=True,\n margin=go.Margin(\n l=50,\n r=50,\n b=40,\n t=20,\n pad=10\n )\n )\n\n fig = dict(data=data, layout=layout)\n py.image.save_as(fig, filename='plots/scatter-{}-{}-.pdf'.format(tool, budget))", "def parse_dataframes(genome_gtf, sralist):\n\n def gather_strand_by_geneID_dict(genome_gtf):\n \"\"\"\n Returns dictionary with strand orientation as values and geneIDs as Keys/\n e.g.: {'YAL012W': '+',\n 'YAL069W': '+',\n 'YAL068W-A': '+',\n \"\"\"\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict\n\n\n def import_scikit_data(sralist):\n \"\"\"\n Import results from scikit pipeline for all datasets contained in datsets_names.\n \"\"\"\n scikit_data_dict = {}\n for dataset in sralist:\n with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data:\n scikit_data_dict[dataset] = [json.load(scikit_data)]\n return scikit_data_dict\n\n\n def build_mat_scikit_strandOriented(sralist, scikit_data):\n \"\"\"\n Building of scikit_df based on the output of plot_ribo_density_dict.py script.\n\n C/-/reverse/complementary strand are taken into account and the profile values\n (\"codon_density_profile\", \"codon_triplet\", \"codon_AA\") are reversed. This is\n performed by adding [::-1] to C strands profile ends.\n\n Same profile values are also have their extremities trimmed out of 8 codons.\n (This is because the scikit-ribo pipeline considers 8 extra codons on each end,\n but here we are only interested in the coding sequence). This is performed by\n adding [8:-8] to profile lists ends.\n \"\"\"\n\n scikit_mat = {}\n seq_codons = {}\n seq_aa = {}\n\n for geneID in scikit_data[sralist[0]][0].keys():\n for ix, dataset in enumerate(sralist):\n\n if geneID in scikit_data[dataset][0].keys():\n current_profile = scikit_data[dataset][0].get(geneID, np.nan)\n current_ribo = current_profile[0]\n current_ribo = current_ribo[8:-8]\n N = len(sralist)\n M = len(current_ribo)\n print(geneID, M)\n\n if ix == 0:\n current_matrix = np.zeros((N,M)) * np.nan\n\n current_seq_codons = current_profile[1]\n current_seq_codons = current_seq_codons[8:-8]\n\n current_seq_aa = current_profile[2]\n current_seq_aa = current_seq_aa[8:-8]\n\n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n seq_codons[geneID] = current_seq_codons\n seq_aa[geneID] = current_seq_aa\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n seq_codons[geneID] = current_seq_codons[::-1]\n seq_aa[geneID] = current_seq_aa[::-1]\n \n \n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n current_matrix[ix,:] = current_ribo\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n current_matrix[ix,:] = current_ribo[::-1]\n \n if np.sum(current_matrix) > 0: \n scikit_mat[geneID] = current_matrix\n\n# scikit_df = pd.DataFrame(values_list, columns=columns_list)\n\n return scikit_mat, seq_codons, seq_aa\n\n\n def mean_norm(row):\n codon_dens_prof = row.codon_density_profile\n profile_average = np.average(codon_dens_prof)\n\n return [x/profile_average for x in codon_dens_prof]\n \n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df.apply(mean_norm, axis=1)\n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df['mean_norm_codon_density_profile'].apply(lambda x: x[8:-8])\n\n strand_by_geneID_dict = gather_strand_by_geneID_dict(genome_gtf)\n scikit_data_dict = import_scikit_data(sralist)\n scikit_data_mat, seq_codons_dict, seq_aa_dict = build_mat_scikit_strandOriented(sralist, scikit_data_dict)\n\n with open('../data/processed/scikit_mat.pkl', 'wb') as f:\n \tpickle.dump(scikit_data_mat, f)\n\n with open('../data/processed/scikit_codonseq.pkl', 'wb') as f_seq:\n pickle.dump(seq_codons_dict, f_seq)\n \n\n return scikit_data_mat", "def NPD_gos(df,bvals,c=1):\n j_dist = get_coop_coop_neighbour_dist(df) \n degree_dist = get_degree_distribution(df)\n f_jk = get_f_jkAB(j_dist,degree_dist)\n return pd.concat([gradient_of_selection(f_jk,NPD_benefit,b,c) for b in bvals],keys=bvals,names='b')", "def dichotomize_plot(args):\n # Read the files.\n df = _parse_data(args)\n df[\"group\"] = np.nan\n df[\"intercept\"] = 1\n\n df = df[[\"group\", \"intercept\", \"grs\", args.phenotype]]\n\n # Init the statistical test.\n test = model_map[args.test]()\n\n qs = []\n upper_ci = []\n lower_ci = []\n ns = []\n betas = []\n\n for q in np.linspace(0.05, 0.5, 200):\n low, high = df[[\"grs\"]].quantile([q, 1 - q]).values.T[0]\n\n df[\"group\"] = np.nan\n df.loc[df[\"grs\"] <= low, \"group\"] = 0\n df.loc[df[\"grs\"] >= high, \"group\"] = 1\n\n cur = df.dropna()\n\n stats = test.fit(\n cur[[args.phenotype]], cur[[\"group\", \"intercept\"]]\n )\n\n qs.append(q)\n betas.append(stats[\"group\"][\"coef\"])\n ns.append(df.dropna().shape[0])\n upper_ci.append(stats[\"group\"][\"upper_ci\"])\n lower_ci.append(stats[\"group\"][\"lower_ci\"])\n\n fig, ax1 = plt.subplots()\n\n beta_line, = ax1.plot(qs, betas)\n ci_line, = ax1.plot(qs, upper_ci, \"--\", color=\"gray\", linewidth=0.2)\n ax1.plot(qs, lower_ci, \"--\", color=\"gray\", linewidth=0.2)\n ax1.set_ylabel(r\"$\\beta$\")\n ax1.set_xlabel(\"Quantile used to form groups (0.5 is median)\")\n\n ax2 = ax1.twinx()\n ax2.grid(False, which=\"both\")\n n_line, = ax2.plot(qs, ns, \"-\", linewidth=0.2)\n ax2.set_ylabel(\"effective n\")\n\n plt.legend(\n (beta_line, ci_line, n_line),\n (r\"$\\beta$\", \"95% CI\", \"$n$\"),\n loc=\"upper center\"\n )\n\n if args.out:\n plt.savefig(args.out)\n else:\n plt.show()", "def make_pdf_reports(df, path):\n with PdfPages(path) as pdf:\n # settings for the file\n base = 10 # threshold for grouping points\n page_size = (11, 8.5)\n point_size = 1.5 # scatter plot point size\n\n df[\"color\"] = df.db.apply(rand_color) # adjacency color\n df[\"fuzzy_y\"] = df.y.apply(my_round) # horizontal group color\n df[\"y_color\"] = df.fuzzy_y.apply(rand_color)\n df[\"fuzzy_x\"] = df.x.apply(my_round) # vertical group color\n df[\"x_color\"] = df.fuzzy_x.apply(rand_color)\n\n # Add title and axis names\n plt.figure(figsize=page_size)\n plt.title('Horizontal Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.y_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Vertical Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.x_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Block Adjacency Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n data1 = df[[\"floor\", \"swing_drop\", \"name\"]]\n data = data1.groupby([\"floor\", \"swing_drop\"]).count()\n data = data.reset_index()\n data.head()\n data = data.fillna(0)\n pivot = data.pivot(index=\"floor\", columns=\"swing_drop\", values=\"name\")\n pivot = pivot.fillna(0)\n order = sorted(df.floor.unique(), reverse=True)\n pivot = pivot.reindex(order)\n plt.figure(figsize=page_size)\n ax = sns.heatmap(pivot, cmap=\"BuPu\")\n ax.set_title(\"Block Qty Heatmap\")\n pdf.savefig()\n plt.close()\n\n # bar chart\n plt.rcParams.update({'font.size': 5})\n plt.figure(figsize=page_size)\n plt.title('Block Style Bar Graph')\n plt.xlabel('Names')\n plt.xticks(rotation=90)\n plt.ylabel('Quantities')\n dd = df[['name', \"guid\"]].groupby(\"name\").count()\n dd = dd.reset_index()\n dd = dd.sort_values(\"guid\")\n plt.bar(dd.name, dd.guid)\n # plt.show()\n pdf.savefig()\n plt.close()\n\n # We can also set the file's metadata via the PdfPages object:\n d = pdf.infodict()\n d['Title'] = 'Multipage PDF Example'\n d['Author'] = 'Matthew Kreidler'\n d['Subject'] = 'How to create a multipage pdf file and set its metadata'\n d['Keywords'] = 'PdfPages multipage keywords author title subject'\n d['CreationDate'] = datetime.datetime.today()\n d['ModDate'] = datetime.datetime.today()\n\n print(\"Graphs and Charts finished!\")\n return path", "def CII_vs_CO(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[1])\n fig,ax1 = plt.subplots()\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n sc = ax1.scatter(np.log10(L_CO)-10, np.log10(L_CII)-10, marker='o', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n s=10, alpha=0.8)#, label='SIGAME 100Mpc_arepoPDF')\n # print('Min Zsfr in Simba sample: ',np.min(Zsfr))\n # print('indices with L_CO < 1e0:')\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='forestgreen',levels=8,zorder=10)\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF')\n\n GR = glo.global_results(sim_run=p.sim_runs[0],nGal=p.nGals[0],grid_ext=p.grid_exts[1])\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n lL_CO = np.append(lL_CO,np.array([6.1,5]))\n lL_CII = np.append(lL_CII,np.array([8.9,9.7]))\n # ax1.scatter(np.log10(L_CO), np.log10(L_CII), marker='^', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n # vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n # s=10, alpha=0.8, label='SIGAME 25Mpc_arepoPDF')\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='deepskyblue',linestyles='dotted',levels=6)\n CS.collections[0].set_label('SIGAME 25Mpc_arepoPDF')\n\n GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[0])\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n lL_CO = np.append(lL_CO,np.array([-2.2,4.7]))\n lL_CII = np.append(lL_CII,np.array([8,9.3]))\n # ax1.scatter(np.log10(L_CO), np.log10(L_CII), marker='^', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n # vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n # s=10, alpha=0.8, label='SIGAME v3 Simba-%s' % (p.sim_runs[0].replace('_','').replace('Mpc','')))\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='brown',levels=8,zorder=5,linestyles='dashed')\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF_no_ext')\n\n # Observations\n K16 = pd.read_pickle('data/observations/AHIMSA_sample_lit')\n K16_LCII = K16['[CII]158_Lsun']\n K16_LCO = K16['CO(1-0)_Lsun']\n ax1.plot(np.log10(K16_LCO), np.log10(K16_LCII), '>', color='grey', ms=6, fillstyle='none',alpha=0.8, mew=1,zorder=0,\\\n label='Mixed type galaxies [Kamenetzky+16]')\n\n C15 = pd.read_pickle('data/observations/DGS_Cormier_2015')\n C15_LCII = C15['L_[CII]158']\n C15_LCO = C15['L_CO(1-0)']\n C15_Z = C15['Z']\n # L_ul = C15['L_[CII]158'][(C15['L_[CII]158'] < 0) & (C15['L_CO(1-0)'] > 0)]\n # if len(L_ul) > 0:\n # ax1.plot(np.log10(C15['L_CO(1-0)'][C15['L_[CII]158'] < 0]),np.log10(-1.*L_ul),'s',ms=5,mew=0,color='grey',alpha=0.8)\n # ax1.errorbar(np.log10(C15['L_CO(1-0)'][C15['L_[CII]158'] < 0]),np.log10(-1.*L_ul), elinewidth=1,\\\n # uplims=np.ones(len(L_ul)),yerr=np.ones(len(L_ul))*1,color='grey',alpha=0.8,lw=0)\n ax1.scatter(np.log10(C15_LCO), np.log10(C15_LCII), marker='+', c=np.log10(C15_Z), cmap='viridis', zorder=0,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1),\\\n s=100, lw=3, alpha=0.8, label='Dwarf galaxies [Cormier+15]')\n\n A17 = pd.read_pickle('data/observations/xCOLD_GASS_Accurso_2017')\n A17 = A17.loc[np.argwhere(A17['L_CO(1-0)'].values > 0).flatten()]\n ax1.scatter(A17['L_CO(1-0)'],A17['L_[CII]158'], marker='d', c=np.log10(A17['Z']), cmap='viridis', zorder=0,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1),\\\n s=50, lw=0, alpha=0.8, label='COLD GASS [Accurso+17]') #c=np.log10(A17['Z']), \n\n CII_obs = np.log10(np.append(K16_LCII.values,C15_LCII.values))\n CO_obs = np.log10(np.append(K16_LCO.values,C15_LCO.values))\n CII_obs = np.append(CII_obs,A17['L_[CII]158'].values)\n CO_obs = np.append(CO_obs,A17['L_CO(1-0)'].values)\n index = np.argwhere((CII_obs > 0) & (CO_obs > 0)).flatten()\n CII_obs = CII_obs[index]\n CO_obs = CO_obs[index]\n\n x = np.linspace(0, 7, 100)\n fit = LinearRegression().fit(CO_obs.reshape(-1, 1),\\\n CII_obs.reshape(-1, 1))\n L_fit = fit.predict(x.reshape(-1, 1))\n ax1.plot(x, L_fit, color='black', linestyle='--', label='Log-linear fit to observations')\n\n ax1.set_ylabel('log ' + getlabel('[CII]158'))\n ax1.set_xlabel('log ' + getlabel('CO(1-0)'))\n plt.colorbar(sc,label=r'log $\\langle$Z$\\rangle_{\\rm SFR}$ [Z$_{\\rm \\odot}$]')\n\n handles, labels = ax1.get_legend_handles_labels()\n print(labels) # labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n # handles = [handles[_] for _ in [2,4,3,5,0,6,7,1]]\n # labels = [labels[_] for _ in [2,4,3,5,0,6,7,1]]\n handles = [handles[_] for _ in [2,4,3,5,6,0,1]]\n labels = [labels[_] for _ in [2,4,3,5,6,0,1]]\n plt.legend(handles,labels,loc='lower left',fontsize=10.,frameon=True)\n\n ax1.set_xlim([-3,6.2])\n ax1.set_ylim([4,10])\n\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/CO_vs_CII%s%s.png' % (p.grid_ext,p.table_ext), dpi=300)", "def compute_features(input: str, output: str):\n co.nb.matplotlib_inline()\n\n df = pd.read_csv(input)\n\n # Show proportion of customers exited vs retained\n labels = 'Exited', 'Retained'\n sizes = [df.Exited[df['Exited'] == 1].count(), df.Exited[df['Exited'] == 0].count()]\n explode = (0, 0.1)\n fig1, ax1 = plt.subplots(figsize=(5, 4))\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal')\n plt.title(\"Proportion of customers churned vs retained\", size=10)\n plt.show()\n\n # Drop meaningless index columns, as well as surname which would likely be\n # profiling.\n df.drop([\"RowNumber\", \"CustomerId\", \"Surname\"], axis=1, inplace=True)\n\n # Normalize balance by salary, and tenure and credit score by age.\n df[\"BalanceSalaryRatio\"] = df.Balance / df.EstimatedSalary\n df[\"TenureByAge\"] = df.Tenure / df.Age\n df[\"CreditScoreGivenAge\"] = df.CreditScore / df.Age\n\n # Arrange columns by data type for easier manipulation\n continuous_vars = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary',\n 'BalanceSalaryRatio',\n 'TenureByAge', 'CreditScoreGivenAge']\n cat_vars = ['HasCrCard', 'IsActiveMember', 'Geography', 'Gender']\n df = df[['Exited'] + continuous_vars + cat_vars]\n\n # For the one hot variables, we change 0 to -1 so that the models can capture\n # a negative relation where the attribute is inapplicable instead of 0\n df.loc[df.HasCrCard == 0, 'HasCrCard'] = -1\n df.loc[df.IsActiveMember == 0, 'IsActiveMember'] = -1\n\n # One hot encode the categorical variables\n lst = ['Geography', 'Gender']\n remove = list()\n for i in lst:\n if df[i].dtype == np.str or df[i].dtype == np.object:\n for j in df[i].unique():\n df[i + '_' + j] = np.where(df[i] == j, 1, -1)\n remove.append(i)\n df = df.drop(remove, axis=1)\n\n # Scale continuous variables to go from 0 to 1.\n min_vec = df[continuous_vars].min().copy()\n max_vec = df[continuous_vars].max().copy()\n df[continuous_vars] = (df[continuous_vars] - min_vec) / (max_vec - min_vec)\n\n # Print results\n _df_pretty(df.head().transpose().round(2))\n\n os.makedirs(os.path.dirname(output), exist_ok=True)\n df.to_csv(output)", "def plot_HDres_histos_vs_z(\n df,\n nameout,\n threshold_var=\"class0\",\n threshold_list=[0.5, 0.7, 0.9],\n threshold_sign=\">\",\n):\n\n P = df[df[\"class0\"] > 0.5]\n Ias = df[df[\"target\"] == 0]\n\n TP = P[P[\"target\"] == 0]\n FP = P[P[\"target\"] != 0]\n\n sel_TP_dic = {}\n sel_FP_dic = {}\n for t in threshold_list:\n if threshold_sign == \">\":\n sel_TP_dic[t] = TP[TP[threshold_var] > t]\n sel_FP_dic[t] = FP[FP[threshold_var] > t]\n else:\n sel_TP_dic[t] = TP[TP[threshold_var] < t]\n sel_FP_dic[t] = FP[FP[threshold_var] < t]\n\n plt.clf()\n cm = CMAP\n fig = plt.figure(figsize=(14, 14))\n # gs = gridspec.GridSpec(4, 2, width_ratios=[3, 1], height_ratios=[2, 2, 1, 1])\n # gs.update(wspace=0.1, hspace=0.3)\n\n # # gridspec init\n # ax00 = plt.subplot(gs[0, 0]) # Hres Ia\n # ax10 = plt.subplot(gs[1, 0], sharex=ax00) # Hres CC\n # ax20 = plt.subplot(gs[2:, 0], sharex=ax00) # efficiency\n # ax01 = plt.subplot(gs[0, 1], sharey=ax00) # histo Ia\n # ax11 = plt.subplot(gs[1, 1], sharey=ax10) # histo CC\n # ax21 = plt.subplot(gs[2, 1]) # histo x1\n # ax31 = plt.subplot(gs[3, 1]) # histo c\n gs = gridspec.GridSpec(3, 3, height_ratios=[2, 2, 1])\n # gs.update(wspace=0.2, hspace=0.1)\n\n # gridspec init\n ax00 = plt.subplot(gs[0, 0:2]) # Hres Ia\n ax10 = plt.subplot(gs[1, 0:2], sharex=ax00) # Hres CC\n ax20 = plt.subplot(gs[2, 0]) # redshift dist\n ax01 = plt.subplot(gs[0, 2], sharey=ax00) # histo Ia\n ax11 = plt.subplot(gs[1, 2], sharey=ax10) # histo CC\n ax21 = plt.subplot(gs[2, 1]) # histo x1\n ax31 = plt.subplot(gs[2, 2]) # histo c\n\n # lines\n ax00.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n ax10.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n\n mubins = np.arange(-2, 2 + 0.1, 0.1)\n\n # Hres w. histogram\n def HRwhisto(\n df, sel_dic, ax_left, ax_right, threshold_sign, ylabel=\"TP\", visible=False\n ):\n if ylabel == \"TP\":\n sntyp = \"Ia\"\n else:\n sntyp = \"CC\"\n ax_left.scatter(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n c=df[\"class0\"],\n cmap=CMAP,\n vmin=0.5,\n vmax=1,\n s=8,\n )\n ax_left.errorbar(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n yerr=df[\"delmu_err\"],\n color=\"gray\",\n zorder=0,\n fmt=\"none\",\n marker=\"none\",\n )\n\n ax_left.set_ylim(-2, 2)\n ax_left.set_xlim(0, 1.2)\n ax_left.set_ylabel(f\"{ylabel} residual\", fontsize=18)\n ax_left.tick_params(labelsize=14)\n plt.setp(ax_left.get_xticklabels(), visible=visible)\n if visible is True:\n ax_left.set_xlabel(\"simulated redshift\", fontsize=18)\n for t in threshold_list:\n sel = sel_dic[t]\n n_SNe = len(sel)\n ax_right.hist(\n sel[\"delmu\"],\n orientation=\"horizontal\",\n histtype=\"step\",\n color=cm(t),\n bins=mubins,\n density=True,\n label=f\"{n_SNe} {sntyp} {threshold_sign} {t}\",\n lw=2,\n )\n ax_right.legend(loc=\"lower center\", prop={\"size\": 13})\n plt.setp(ax_right.get_yticklabels(), visible=False)\n plt.setp(ax_right.get_xticklabels(), visible=False)\n ax_right.plot(\n [ax_right.get_xlim()[0], ax_right.get_xlim()[1]],\n np.zeros(len([ax_right.get_xlim()[0], ax_right.get_xlim()[1]])),\n \"k:\",\n )\n\n HRwhisto(TP, sel_TP_dic, ax00, ax01, threshold_sign, ylabel=\"TP\", visible=False)\n HRwhisto(FP, sel_FP_dic, ax10, ax11, threshold_sign, ylabel=\"FP\", visible=True)\n\n # z histos\n n, bins_to_use, tmp = ax20.hist(\n Ias[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=\"black\", bins=15, lw=3\n )\n\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n sel_FP = sel_FP_dic[t]\n ax20.hist(\n sel_TP[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=cm(t), bins=bins_to_use\n )\n ax20.hist(\n sel_FP[\"SIM_REDSHIFT_CMB\"],\n histtype=\"step\",\n color=cm(t),\n linestyle=\"--\",\n bins=bins_to_use,\n )\n ax20.set_xlim(0, 1.2)\n ax20.tick_params(labelsize=14)\n ax20.set_xlabel(\"simulated redshift\", fontsize=18)\n\n # hist stretch\n n, bins_to_use, tmp = ax21.hist(Ias[\"x1\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax21.hist(\n sel_TP[\"x1\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax21.set_xlabel(\"x1\", fontsize=18)\n ax21.yaxis.set_label_position(\"right\")\n ax21.set_xlim(-3, 3)\n ax21.tick_params(labelsize=14)\n # color histo\n n, bins_to_use, tmp = ax31.hist(Ias[\"c\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax31.hist(\n sel_TP[\"c\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax31.set_xlabel(\"c\", fontsize=18)\n ax31.set_xlim(-1, 1)\n ax31.tick_params(labelsize=14)\n ax31.yaxis.set_label_position(\"right\")\n\n gs.tight_layout(fig)\n plt.savefig(nameout)\n plt.close()\n del fig", "def plot_pdf(pop_name, pop_val, pop_file, full_pop_file, outdir='.'):\n try:\n plt.style.use(\n \"https://gist.githubusercontent.com/avivajpeyi/4d9839b1ceb7d3651cbb469bc6b0d69b/raw/4ee4a870126653d542572372ff3eee4e89abcab0/publication.mplstyle\")\n except Exception:\n pass\n\n plt.close('all')\n all = pd.read_csv(full_pop_file, sep=\" \")\n all['cos_theta_1'] = all['cos_tilt_1']\n all = process_samples(all)\n sub = pd.read_csv(pop_file, sep=\" \")\n sub = process_samples(sub)\n sub['cos_theta_1'] = sub['cos_tilt_1']\n\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for ax, l in zip(axes, [\"cos_theta_1\", \"cos_theta_12\"]):\n ax.hist(all[l], density=True, histtype='step', color=\"tab:blue\", label=\"ALL\", lw=2, alpha=0.8)\n ax.scatter(all[l], [0 for _ in all[l]], color=\"tab:blue\",marker=\"+\")\n ax.hist(sub[l], density=True, histtype='step', color=\"tab:purple\", label=\"HIGH SNR\", lw=2, alpha=0.6)\n ax.scatter(sub[l], [0 for _ in sub[l]], color=\"tab:purple\", marker=\"+\")\n\n x = np.linspace(-1, 1, 100)\n y1 = TruncatedNormal(mu=1, sigma=pop_val[0], minimum=-1, maximum=1).prob(x)\n y2 = TruncatedNormal(mu=1, sigma=pop_val[1], minimum=-1, maximum=1).prob(x)\n axes[1].plot(x, y2, color='tab:gray', zorder=-10, lw=3, label=\"TRUE\")\n axes[0].plot(x, y1, color='tab:gray', zorder=-10, lw=3)\n\n for i in range(len(axes)):\n if (i == 0):\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_1$\")\n axes[i].set_ylabel(\"PDF\")\n else:\n axes[i].set_xlabel(r\"$\\cos\\ \\theta_{12}$\")\n axes[i].set_yticklabels([])\n axes[i].legend()\n axes[i].grid(False)\n axes[i].set_xlim(-1, 1)\n\n plt.suptitle(f\"POP {pop_name}\")\n plt.tight_layout()\n plt.savefig(f\"{outdir}/pop_trues_{pop_name}.png\")", "def plot_scan_density(dataframe, **kwargs):\n\n # Determine number of cases\n nr = len(set(dataframe[dataframe._column_names['case']].values))\n\n # Reshape in (N,Sa*Sb) matrix\n data = dataframe['error'].values.reshape(dataframe.Sa * dataframe.Sb, nr)\n if kwargs.get('absolute', False):\n data = numpy.absolute(dGdiff)\n\n # Set upper and lower tolerance limit if not set. Use cutoff of 5% between\n # minimum and maximum value in dataet\n ltol = kwargs.get('ltol', numpy.min(data) * 0.05)\n utol = kwargs.get('utol', numpy.max(data) * 0.05)\n\n # Make new matrix of size (alpha range x beta range) and add reshaped\n # identity matrixes for each case. Identity matrix is determined as 1 for\n # all cases with gamma residual in range ltol <= x <= utol, and 0 outside.\n identmatr = ((data >= ltol) & (data <= utol)).astype(int)\n ident_matrix_cases = identmatr * numpy.arange(1, nr + 1).reshape(1, nr)\n scanmatrix = numpy.zeros((dataframe.Sa, dataframe.Sb))\n for i in range(1, nr + 1):\n scanmatrix = scanmatrix + numpy.sum((ident_matrix_cases == i).astype(int), axis=1).reshape(dataframe.Sa,\n dataframe.Sb)\n\n # Transform counts into percentages\n scanmatrix = (scanmatrix / nr) * 100\n\n # Only display matrix elements with percentage above cutoff\n ptol = kwargs.get('ptol', None)\n if ptol:\n scanmatrix[scanmatrix < ptol] = 0\n\n # Define plotting variables\n plotvars = {\n 'title': r'LIE $\\alpha$ and $\\beta$ parameter scan for {0} cases'.format(nr),\n 'subtitle': 'Density as number of ligands with dG RMSe within range: {0:.2f} to {1:.2f} kJ/mol'.format(ltol,\n utol),\n 'cblabel': 'Percentage within tolerance range',\n 'ylabel': r'$\\alpha$',\n 'xlabel': r'$\\beta$'\n }\n plotvars.update(kwargs)\n\n logging.info(\"Create density matrix plot for Alpha/Beta parameter scan results\")\n logging.info(\n \"Create matrix plot using lower tolerance (ltol) of {0:.3f} and upper rolerance (utol) of {1:.3f}\".format(ltol,\n utol))\n\n # Prepare the plot\n return plot_matrix(scanmatrix,\n xaxis=dataframe.beta_scan_range,\n yaxis=dataframe.alpha_scan_range,\n **plotvars)", "def read_drainage_efficiency(self):#, PLOT, FIGURE, DISTRIBUTION):\n\n print ' Reading drainage efficiency'\n\n self.drainage_efficiency = {}\n\n drainage = np.zeros(self.ATTM_nrows * self.ATTM_ncols)\n\n for i in range(0, self.ATTM_nrows * self.ATTM_ncols):\n if self.ATTM_Total_Fractional_Area[i] > 0.0 :\n if self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'random':\n chance = random.random()\n if chance > self.Terrestrial['Drainage_Efficiency_Random_Value']:\n self.drainage_efficiency[i] = 'above'\n drainage[i] = 1.\n else:\n self.drainage_efficiency[i] = 'below'\n drainage[i] = 2. # redundant, but explicit\n elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'above':\n self.drainage_efficiency[i] = 'above'\n drainage[i] = 1.\n elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'below':\n self.drainage_efficiency[i] = 'below'\n drainage[i] = 2.\n else: \n self.drainage_efficiency[i] = 'none'\n drainage[i] =0.\n\n print ' done.'\n print ' '\n\n # ==================================================\n # Create desired output files, figures, and plots\n # ==================================================\n if self.Terrestrial['Drainage_Efficiency_Figure'].lower() == 'yes':\n # -------------------------\n # Move to output directory\n # -------------------------\n if self.Simulation_area.lower() == 'barrow':\n os.chdir(self.control['Run_dir']+self.Output_directory+'/Barrow')\n\n # -----------------------\n # Create desired output\n # -----------------------\n drainage = np.reshape(drainage, [self.ATTM_nrows, self.ATTM_ncols])\n\n fig = pl.figure()\n pl.imshow(drainage, interpolation='nearest', cmap='bone')\n pl.colorbar( extend = 'max', shrink = 0.92)\n pl.title('Drainage efficiency')\n pl.savefig('./Initialization/Drainage_efficiency.png', format = 'png')\n drainage.tofile('./Initialization/Drainage_efficiency.bin')\n pl.close()\n\n os.chdir(self.control['Run_dir'])", "def P_AI_Rocky(in_dict):\n # START\n fs = 16\n plt.rc('font', size=fs)\n fig = plt.figure(figsize=(14,12))\n ds = nc.Dataset(in_dict['fn'])\n\n # PLOT CODE\n aa = [-122.8, -122.54, 47.92, 48.22]\n import cmocean\n cmap = cmocean.cm.balance\n # cmap = 'RdYlBu_r'\n\n from warnings import filterwarnings\n filterwarnings('ignore') # skip some warning messages\n \n # plot Code\n \n # calculate divergence and vorticity\n uu = ds['u'][0, -1, :, :]\n vv = ds['v'][0, -1, :, :]\n u = zfun.fillit(uu)\n v = zfun.fillit(vv)\n u[np.isnan(u)] = 0\n v[np.isnan(v)] = 0\n \n G = zrfun.get_basic_info(in_dict['fn'], only_G=True)\n \n dive = ((np.diff(u, axis=1)/G['DX'][:, 1:-1])[1:-1, :]\n + (np.diff(v, axis = 0)/G['DY'][1:-1, :])[:, 1:-1])\n #dive[G['mask_rho'][1:-1,1:-1]==False] = np.nan\n \n vort = np.diff(v, axis=1)/G['DX'][1:,1:] - np.diff(u, axis=0)/G['DY'][1:,1:]\n #vort[G['mask_rho'][1:,1:]==False] = np.nan\n \n scl = 2e-3\n \n # panel 1\n ax = fig.add_subplot(121)\n # cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], dive/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_rho'][1:-1,1:-1], G['lat_rho'][1:-1,1:-1], dive/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Divergence (%0.1e $s^{-1}$)' % (scl))\n #pfun.add_bathy_contours(ax, ds, txt=True)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n ax.set_title(tstr)\n pfun.add_info(ax, in_dict['fn'])\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([48, 48.1, 48.2])\n #\n # panel 2\n ax = fig.add_subplot(122)\n # cs = plt.pcolormesh(G['lon_rho'], G['lat_rho'], vort/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], vort/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Vorticity (%0.1e $s^{-1}$)' % (scl))\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([])\n #fig.colorbar(cs)\n \n # Inset colorbar\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n cbaxes = inset_axes(ax, width=\"4%\", height=\"40%\", loc='lower left')\n fig.colorbar(cs, cax=cbaxes, orientation='vertical')\n \n #pfun.add_bathy_contours(ax, ds)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_title(tstr) \n \n #fig.tight_layout()\n # FINISH\n ds.close()\n if len(in_dict['fn_out']) > 0:\n plt.savefig(in_dict['fn_out'])\n plt.close()\n else:\n plt.show()\n plt.rcdefaults()", "def show_dprime(sim_attr_generator):\n#TODO description\n dprime_fnc_list = [\n (sim_attr.id_name,sim_attr.dprime_fnc) for sim_attr in sim_attr_generator\n ]\n\n if Args.mat_file_out != None:\n save_dict = dict()\n else:\n x_axis = int(math.ceil(math.sqrt(len(dprime_fnc_list))))\n y_axis = int(math.ceil(float(len(dprime_fnc_list)) / x_axis))\n fig, axes = plt.subplots(nrows=y_axis,ncols=x_axis)\n\n#? Code duplication\n if len(dprime_fnc_list) == 1:\n id_name, dprime_fnc = dprime_fnc_list[0]\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n im = show_plot_imshow_from_mesh(\n axes, mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n fig.colorbar(im,shrink=0.8)\n plt.show()\n# End code duplication\n return\n\n for i, (id_name, dprime_fnc) in enumerate(dprime_fnc_list):\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n if Args.mat_file_out != None:\n dprime_fnc[id_name] = {'X':mesh_X, 'Y':mesh_Y, 'Z':mesh_Z}\n else:\n im = show_plot_imshow_from_mesh(\n axes.flat[i], mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n if Args.mat_file_out != None:\n scipy.io.savemat(Args.mat_file_out, save_dict)\n else:\n fig.colorbar(im,ax=axes.ravel().tolist(),shrink=0.8)\n plt.show()", "def density(self, features_table):\n\n # Get features\n if type(features_table) != np.ndarray:\n X = features_to_array(features_table, self.scaler)\n else:\n X = features_table\n\n # Predict\n df_density = pd.DataFrame(self.predictor.predict_proba(X), columns=self.predictor.classes_)\n df_density = df_density.loc[:, GT_LABELS]\n\n return df_density", "def make_distplot(data, output_f, title, xlabel, prefix):\n\n plt.figure(figsize=(8, 8), dpi=1200)\n displot = sns.distplot(data, hist=False, rug=True, color=\"b\")\n out_name = prefix + \"_\" + title + \"_\" + output_f.split(\".\")[0] + \".pdf\"\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel('Density')\n displot.figure.savefig(out_name)\n plt.close()", "def generate_csd_1D(csd_profile, csd_seed, start_x=0., end_x=1., res_x=50):\n chrg_pos_x = np.linspace(start_x, end_x, res_x)\n f = csd_profile(chrg_pos_x, seed=csd_seed)\n return chrg_pos_x, f", "def main():\n parser = argparse.ArgumentParser(description=\n ('Make usable profile data for FWHM fitting. Process output of '\n 'ds9projplotter.py: convert to arcsec, compute errors. '\n 'Outputs .npz and .dat files; downstream scripts use .npz, but '\n 'plaintext (.dat) is nice for archiving.'))\n parser.add_argument('ds9physreg', help='DS9 region file, physical coords')\n parser.add_argument('inroot', help='Intensity profiles, file root')\n parser.add_argument('ctroot', help='Uncorrected count profiles, file root')\n parser.add_argument('outroot', help='Output profile root')\n\n parser.add_argument('-b', '--binsize', default=1, type=float,\n help=('Bin size of images used to generate profiles; '\n 'set in CIAO\\'s merge_obs, reproject_obs plists'))\n parser.add_argument('-l', '--labels', nargs='+',\n help='Energy band labels for profiles; must be '\n 'sorted & consistent for subsequent analysis')\n\n parser.add_argument('-w', '--window', default='hanning',\n help=('Window type for smoothing to set fit domain. '\n 'Options: flat, hanning, hamming, bartlett, blackman '\n '(see fsmooth.std_smooth)'))\n parser.add_argument('-n', '--window-n', default=21, type=int,\n help='Smoothing window length (pixels/points)')\n\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Verbose mode')\n\n args = parser.parse_args()\n ds9physreg = args.ds9physreg\n inroot, ctroot, outroot = args.inroot, args.ctroot, args.outroot\n binsize, labels = args.binsize, args.labels\n wtype, wlen = args.window, args.window_n\n verbose = args.verbose\n\n if not binsize:\n print 'No binsize supplied, assuming binsize 1 (0.492 arcsec/pixel)'\n print 'DEBUG: {}'.format(binsize)\n binsize = 1\n\n dims = get_proj_dims(ds9physreg)\n px2arcsec = binsize * ACIS_PX2ARCSEC\n\n fit_cuts = []\n\n for i in xrange(len(dims)):\n length, thickness = dims[i] # Units: physical pixels\n thck = np.floor(thickness) # Floor to be consistent w/ DS9,\n # for number of pixels integrated\n if verbose:\n print ('Region {:02d}: length={:0.2f}px, '\n 'thickness={:0.2f}px'.format(i+1, length, thck))\n\n reg_cuts = [i+1] # First entry = region number\n\n for lab in labels:\n # Intensity data; x data is just 1, 2, 3, ...\n infile = '{0}_{1:02d}_band_{2}.dat'.format(inroot, i+1, lab)\n data = np.loadtxt(infile)\n x = (data[:,0] - 1) * px2arcsec # Convert to arcsec, with x[0] = 0\n y = data[:,1]\n\n # Count data; counts already averaged over integration length\n ctfile = '{0}_{1:02d}_band_{2}.dat'.format(ctroot, i+1, lab)\n data_cts = np.loadtxt(ctfile)\n cts = data_cts[:,1]\n cts_err = np.sqrt(cts * thck) / thck # Poisson: n/L +/- sqrt(n)/L\n cts_err[cts_err==0] = np.sqrt(1) / thck # if n=0, err = sqrt(1)/L\n\n # Use count data to compute y errors\n np_errdict = np.seterr(divide='ignore', invalid='ignore')\n y_err = y * (cts_err / cts)\n # Edge cases if cts=0, or y=0 (cts_err addressed above)\n # if cts==0, use y +/- y (1 +/- 1 for Poisson error)\n # if y_err==0 (y==0), use minimum non-zero error\n y_err[cts==0] = y[cts==0]\n y_err[y_err==0] = np.amin(y_err[np.nonzero(y_err)])\n np.seterr(**np_errdict) # Restore error warnings\n\n # Determine cuts in fit domain\n cut = fsmooth.ind_first_min(y,window_len=wlen,window=wtype)\n reg_cuts.append(cut)\n if verbose:\n tot_cts = np.sum(cts[cut:]) * thck\n avg_cts = np.mean(cts[cut:]) * thck\n max_cts = np.amax(cts[cut:]) * thck\n #print (' Band {}: avg integrated cts {:0.3f}, total cts {} '\n # 'in fit domain'.format(lab, avg_cts, tot_cts))\n print ' Band {}: peak cts {} in fit domain'.format(lab, max_cts)\n\n # Save data to output files\n outfile_dat = '{0}_{1:02d}_band_{2}.dat'.format(outroot, i+1, lab)\n outfile_npz = '{0}_{1:02d}_band_{2}.npz'.format(outroot, i+1, lab)\n np.savetxt(outfile_dat, np.array((x, y, y_err)).T)\n np.savez(outfile_npz, x=x, y=y, y_err=y_err)\n\n fit_cuts.append(reg_cuts)\n\n fit_cuts = np.array(fit_cuts)\n\n # Save fit domain cuts to output files -- also note profile information\n cutfile_dat = '{0}_fit_cuts.dat'.format(outroot)\n cutfile_npz = '{0}_fit_cuts.npz'.format(outroot)\n hdr = 'Profile fit domain downstream cuts (array indices)\\n'\n hdr += 'binsize={}, resolution={} arcsec\\n'.format(binsize, px2arcsec)\n hdr += 'smoothing window: {}, length={}\\n'.format(wtype, wlen)\n hdr += 'region number, ' + ', '.join(labels)\n np.savetxt(cutfile_dat, fit_cuts, fmt='%d', header=hdr)\n np.savez(cutfile_npz, cuts=fit_cuts)\n\n if verbose:\n print 'Wrote fit domain cuts to {}, {}'.format(cutfile_dat,cutfile_npz)\n print 'Done!'", "def report_pw_over_rt(dfdict, suptitle='PeakWidth over RT', min=5, cols=1):\n if not any(dfdict):\n return ''\n else:\n\n keys = dfdict.keys()\n N = len(dfdict)\n keyindex = zip(range(N), keys)\n\n rows = int(math.ceil(N / cols))\n gs = gridspec.GridSpec(rows, cols)\n fig = plt.figure(figsize=(10, 4 * rows))\n\n\n for n, key in keyindex:\n if key.lower().endswith('osw'):\n subdict = dfdict[key]\n df = subdict['feature']\n if key.lower().endswith('tsv'):\n df = dfdict[key]\n # set ax\n temp_df = df.copy()\n ax = fig.add_subplot(gs[n])\n pw_over_rt_subplot(ax, temp_df, key)\n set_uni_ylimits(fig)\n plt.suptitle(suptitle)\n fig.tight_layout()\n fig.subplots_adjust(top=(1-0.14/rows))\n return img_to_html(fig)", "def get_plotting_data(each_misfit_windows_collection, iterations_list, snr_threshold, event_depth_dict):\n result = {}\n phases_zr = [\"P\", \"pP\", \"sP\", \"PP\", \"S\", \"sS\", \"SS\"]\n phases_t = [\"ScS\", \"S\", \"sS\", \"SS\"]\n conditions = {\n \"P\": {\n \"exclude_p\": False,\n \"exclude_s\": True\n },\n \"pP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"sP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"PP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"S\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"sS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"SS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"ScS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"surface_z\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_r\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_t\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n }\n # we can exrtact the information from the misfit_windows in the order of the pdf output.\n # order will be z,r,t[,surface_z,surface_r,surface_t]\n rep_net_sta = sorted(event_depth_dict.keys())[0]\n event_depth_this_event = event_depth_dict[rep_net_sta]\n if (event_depth_this_event > SURFACE_THRESHOLD):\n category_list = [\"z\", \"r\", \"t\"]\n category_phases = [phases_zr, phases_zr, phases_t]\n else:\n category_list = [\"z\", \"r\", \"t\", \"surface_z\", \"surface_r\", \"surface_t\"]\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\"], [\"surface_r\"], [\"surface_t\"]]\n for each_iteration in iterations_list:\n result[each_iteration] = {}\n for each_category, each_category_phases in zip(category_list, category_phases):\n result[each_iteration][each_category] = []\n for each_category_phase in each_category_phases:\n phase_condition = conditions[each_category_phase]\n cc = get_windows_cc(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n cc = cc[cc >= 0]\n deltat = get_windows_deltat(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n deltat = deltat[np.abs(deltat) <= 10]\n similarity = get_windows_similarity(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n similarity = similarity[similarity >= 0]\n result[each_iteration][each_category].append(\n {\"net_sta\": get_windows_net_sta(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase),\n \"cc\": cc,\n \"deltat\": deltat,\n \"similarity\": similarity,\n }\n )\n # result:dict->each_iteration:dict->each_category:list as the dict showed before, we should return the category_phases\n # we should combine the surface wave phases to one page\n if (len(category_phases) == 6):\n for each_iteration in iterations_list:\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\", \"surface_r\", \"surface_t\"]]\n category_list = [\"z\", \"r\", \"t\", \"surface\"]\n result[each_iteration][\"surface\"] = []\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_z\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_r\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_t\"][0])\n del result[each_iteration][\"surface_z\"]\n del result[each_iteration][\"surface_r\"]\n del result[each_iteration][\"surface_t\"]\n\n return result, category_phases, category_list", "def d(self, df):\n # Get variable names\n var = [key for key, _ in self.marginals.items()]\n df_u = self.sample2pr(df)[var]\n # Evaluate copula density\n l_copula = self.copula.d(df_u.values)\n # Evaluate marginal densities\n L_marginals = zeros((df.shape[0], len(var)))\n for i, v in enumerate(var):\n L_marginals[:, i] = self.marginals[v].d(df[v])\n l_marginals = prod(L_marginals, axis=1)\n\n return l_copula * l_marginals", "def rate_density(self, value):\n\n # TODO: analyse for certain that log units cancel out\n # with the change in occr\n\n if value.ndim == 2:\n value = value.T\n\n R_i = np.digitize(value[0], self._R_boundaries) - 1\n P_i = np.digitize(value[1], self._P_boundaries) - 1\n\n # Remove the ones out of bounds (oob_mask = out of bounds mask)\n oob_mask = np.zeros_like(R_i, dtype=bool)\n oob_mask = oob_mask | ((R_i < 0) | (R_i >= np.shape(self.occr)[0]))\n oob_mask = oob_mask | ((P_i < 0) | (P_i >= len(self._P_boundaries)-1))\n\n R_i = R_i[~oob_mask]\n P_i = P_i[~oob_mask]\n\n return self.occr[R_i] * self._cpf_grid[R_i, P_i]", "def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F150W', grism='GR150R'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0658, 0], [0, 0.0654]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F090W':0.50, 'F115W':0.47, 'F140M':0.23, 'F150W':0.48, 'F158M':0.25, 'F200W':0.44}\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRISS'\n h['READN'] = 6 , 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'GR150R':\n h['GRISM'] = 'GR150R', 'Spectral trace along X'\n else:\n h['GRISM'] = 'GR150C', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def contingency(self, scale, distrib=True, dataname=''):\n print 'Generating the plot ...'\n\n cont = np.zeros((scale, scale))\n minLat, maxLat, minLon, maxLon = self.city[1]\n normLat = scale / (maxLat - minLat)\n normLon = scale / (maxLon - minLon)\n\n # syn = (index, rel index, class)\n for i in range(self.dataset.shape[0]):\n posy = int(((self.dataset[i][0] - minLat) * normLat))\n posx = int(((self.dataset[i][1] - minLon) * normLon))\n # print posx,posy,data[i][0],data[i][1], normLat, normLon\n try:\n if distrib:\n cont[scale - posy - 1, posx - 1] += 1\n else:\n cont[scale - posy - 1, posx - 1] = 1\n except IndexError:\n print self.dataset[i][0], self.dataset[i][1]\n if distrib:\n cont = cont / np.max(cont)\n\n fig = plt.figure()\n\n ax = fig.add_subplot(111)\n plt.title('Density ')\n\n plt.imshow(cont, interpolation='bicubic', cmap=cm.gist_yarg)\n vmax = np.max(cont)\n # vmin=np.min(cont)\n\n if distrib:\n plt.colorbar(ticks=np.round(np.linspace(0, 1, 10), 2),\n orientation='vertical')\n nfile = self.application + '-' + dataname\n\n fig.savefig(homepath + 'Results/' + self.city[2] + '-' + nfile + '.pdf', orientation='landscape', format='pdf')\n\n #plt.show()", "def distribution_horizontale(args):\n number_files = [2,5,10,20];\n nbreFileNotDisplay = 0;\n comment = \"\";\n num_bins = args[\"num_bins\"];\n rep = args[\"path_save\"]+args[\"correction\"]+\\\n \"/data_p_\"+str(args[\"p_value\"])+\"/distribution/\";\n w = 4; h = 1; # width = largueur, height = longueur\n fig = plt.figure( figsize=(w,h) ); \n cpt_ax1 = 0;\n for num in number_files:\n print(\"num = \", num)\n num = int(num)\n cpt_ax1 += 1;#cpt = num; # cpt += 1\n \n # ax1\n ax1 = fig.add_subplot(2,len(number_files),cpt_ax1);\n df = pd.read_csv(rep+args[\"fichier_prefix\"] +str(num)+args[\"ext\"], \\\n names=[\"cpt\",\"moy_dc\",\"moy_dh\", \"nbre_aretes_matE\", \"correl_dh_dl\"], \\\n sep=';')\n N_graphs = df[\"moy_dc\"].count()\n \n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dc\"])\n num_bins = df[\"moy_dc\"].max()+1\n bins = range(0,int(num_bins)); bins = range(0, 100)\n print(\"---> bins = \", bins, \" min = \",df[\"moy_dc\"].min(), \\\n \" max = \",df[\"moy_dc\"].max())\n \n max_count_dl, max_count_dh = count_max_df(df)\n \n sns.distplot(df[\"moy_dc\"], ax = ax1, bins = bins, kde = False)\n ax1.set(xlabel= \"moy_distance_correction\", ylabel= \"nombre_graphe\", \\\n title = \"distance de correction pour \\n \"+ str(num)+\\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma)+ \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean))\n ax1.plot([num+1,num+1], (0,max_count_dl), 'r--' )\n ax1.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax1.get_yticks()])\n \n # ax2\n cpt_ax2 = cpt_ax1 +len(number_files); #cpt = num+len(number_files); # cpt +=1 ;\n ax2 = fig.add_subplot(2,len(number_files),cpt_ax2);\n N_graphs = df[\"moy_dh\"].count()\n # best fit of data\n (mu, sigma) = norm.fit(df[\"moy_dh\"])\n \n num_bins = df[\"moy_dh\"].max()+1\n bins = range(0 ,int(num_bins)); bins = range(0, 100)\n\n sns.distplot(df[\"moy_dh\"], ax = ax2, bins = bins, kde = False, color = 'red')\n ax2.set(xlabel= \"moy_distance_hamming\", ylabel= \"nombre_graphe\", \\\n title = \"distance de Hamming pour \\n \"+ str(num)+ \\\n \" cases modifiees \\n $\\mu=%.3f,\\ \\sigma=%.3f$, \" %(mu, sigma) + \\\n \" \\n $aretes = %.3f$\" %(df[\"nbre_aretes_matE\"].mean()))\n# ax2.set_xticklabels(bins, rotation=90)\n ax2.plot([num+1,num+1], (0,max_count_dh), 'r--' )\n ax2.set_yticklabels(['{:3.2f}%'.format(x*100/N_graphs) \\\n for x in ax2.get_yticks()])\n \n for ax in [ax1,ax2]:\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(8)\n \n# plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.grid(True)\n comment += \"_horizontale\";\n plt.savefig(args[\"path_save\"]+args[\"correction\"]+\"/courbes/\"+\\\n \"distributionHorizontale_k_0_\"+str(number_files[len(number_files)-1])+\\\n \"_\"+comment+\".jpeg\", \\\n dpi= 190)\n pass", "def contrast_pdf(contdc, contdc_sigma, dc_tru, dc_sigma, contrast_axis, npts=8000, display=False):\n\n dc_axis = np.linspace(dc_tru - 8 * dc_sigma, dc_tru + 8 * dc_sigma, npts)\n dc_mesh, contrast_mesh = np.meshgrid(dc_axis, contrast_axis)\n contdc_mesh = dc_mesh * contrast_mesh\n\n pdf_contdc = scipy.stats.rice.pdf(contdc_mesh, contdc / contdc_sigma, scale=contdc_sigma, loc=0.)\n pdf_dc, _ = norm_pdf(dc_tru, dc_sigma, x=dc_mesh)\n joint_pdf = pdf_contdc * pdf_dc\n\n # normalise joint PDF\n area = np.trapz(np.trapz(joint_pdf, contdc_mesh, axis=0), dc_axis)\n joint_pdf /= area\n\n # calculate the ratio pdf\n integrand = abs(dc_mesh) * joint_pdf\n contrast_pdf = np.trapz(integrand, dc_mesh, axis=1)\n\n if display:\n plt.figure()\n plt.imshow(pdf_contdc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(pdf_dc)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(joint_pdf)\n plt.colorbar()\n\n plt.figure()\n plt.imshow(integrand)\n plt.colorbar()\n\n plt.figure()\n plt.plot(contrast_axis, contrast_pdf)\n\n plt.show()\n\n return contrast_pdf", "def _core_calc_design(self,prof) :\n\t\tlp_list,ld_list = [],[]\n\t\tcp_list,cd_list = [],[]\n\t\t\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\t\n\t\t\tLp = np.ones(prof.n_sample)\n\t\t\tLd = np.ones(prof.n_sample)\n\t\t\n\t\t\t# Get regressor values\n\t\t\tX_p = [np.log(prof.var[:,reg-1]) for reg in reg_p ]\n\t\t\tX_d = [np.log(prof.var[:,reg-1]) for reg in reg_d ]\n\t\t\t\n\t\t\tLp = np.vstack((Lp,np.array(X_p))).T\n\t\t\tLd = np.vstack((Ld,np.array(X_d))).T\t\t\t\n\n\t\t\t# Calculate Cp\n\t\t\tCp = np.dot(LA.inv(np.dot(Lp.T,Lp)),Lp.T)\n\t\t\tCd = np.dot(LA.inv(np.dot(Ld.T,Ld)),Ld.T)\n\t\t\t# Append Lp,Ld,Cp and Cd to relevant lists\n\t\t\tlp_list.append(Lp)\n\t\t\tld_list.append(Ld)\n\t\t\tcp_list.append(Cp)\n\t\t\tcd_list.append(Cd)\t\t\t\n\t\treturn (lp_list,ld_list,cp_list,cd_list)", "def etio_by_csa(df):\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6))\n f, axes = plt.subplots(4, 2, figsize=(9, 9))#, sharex=True)\n sns.despine(top=True, bottom=True)\n f.suptitle(\"Etiology of Central Events, Grouped by %CSA\")\n\n OSA_pure_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Mainly OSA\"], return_df=True).sort_values(\"Dx\")\n OSA_predom_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Combined OSA/CSA\"], return_df=True).sort_values(\"Dx\")\n CSA_predom_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Predominantly CSA\"], return_df=True).sort_values(\"Dx\")\n CSA_pure_hist = histo_dx_includes(df.loc[df['BaseDx'] == \"Pure CSA\"], return_df=True).sort_values(\"Dx\")\n\n # Create count plot for each #CSA on the left, then a Pie Chart with proportion on the right\n\n # Pure OSA\n sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_pure_hist, ax=axes[0,0])\n axes[0, 0].set(xlabel=\"\", ylabel=\"<10% CSA\")\n osa_pure_wedges, _, _ = axes[0, 1].pie(OSA_pure_hist['Count'], autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[0, 1].legend(osa_pure_wedges, OSA_pure_hist['Dx'], loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Predom OSA\n sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_predom_hist, ax=axes[1,0])\n axes[1, 0].set(xlabel=\"\", ylabel=\"10-49.9% CSA\")\n osa_predom_wedges, _, _ = axes[1, 1].pie(OSA_predom_hist['Count'], autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[1, 1].legend(osa_predom_wedges, OSA_predom_hist['Dx'], loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Predom CSA\n sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_predom_hist, ax=axes[2, 0])\n axes[2, 0].set(xlabel=\"\", ylabel=\"50-90% CSA\")\n\n csa_predom_wedges, _, _ = axes[2, 1].pie(CSA_predom_hist['Count'], autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[2, 1].legend(csa_predom_wedges, CSA_predom_hist['Dx'], loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n\n # Pure CSA\n sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_pure_hist, ax=axes[3,0])\n axes[3, 0].set(xlabel=\"Patients With Each Etiology Contributing to Central Events\", ylabel=\">90% CSA\")\n\n csa_pure_wedges, _, _ = axes[3, 1].pie(CSA_pure_hist['Count'], autopct=\"%1.1f%%\", startangle=0, pctdistance=1.25,\n textprops={'size': 'x-small'}, colors=sns.color_palette(\"cubehelix\", 6),\n wedgeprops={'edgecolor': 'black'})\n axes[3, 1].legend(csa_pure_wedges, CSA_pure_hist['Dx'], loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))\n axes[3, 1].set(xlabel=\"\\nProportion with each etiology\\nContributing to Central Events\")\n\n # Combined X axis for L side\n axes[3, 0].get_shared_x_axes().join(axes[3,0], axes[2,0], axes[1,0], axes[0,0])\n axes[0, 0].set_xticklabels(\"\")\n axes[1, 0].set_xticklabels(\"\")\n axes[2, 0].set_xticklabels(\"\")\n # Leave bottom aka [3,0] labels in\n\n # Resize all\n axes[0, 0].autoscale()\n axes[1, 0].autoscale()\n axes[2, 0].autoscale()\n axes[3, 0].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 0.95]) # .95 to leave space for title\n f.savefig('Etio by percentage CSA.png', dpi=100)\n # plt.show()", "def make_csd(shape, scale, npart, show_plot=False):\r\n if shape == 0:\r\n rads = [scale + 0 * x for x in range(npart)]\r\n else:\r\n rads = lognorm.rvs(shape, scale=scale, size=npart)\r\n with open('diameters.txt', 'w') as fout:\r\n for rad in rads:\r\n fout.write('{0}\\n'.format(rad))\r\n if shape == 0:\r\n xpos = linspace(scale / 2, scale * 2, 100)\r\n else:\r\n xpos = linspace(lognorm.ppf(0.01, shape, scale=scale),\r\n lognorm.ppf(0.99, shape, scale=scale), 100)\r\n plt.plot(xpos, lognorm.pdf(xpos, shape, scale=scale))\r\n plt.hist(rads, normed=True)\r\n plt.savefig('packing_histogram.png')\r\n plt.savefig('packing_histogram.pdf')\r\n if show_plot:\r\n plt.show()", "def cont_comps(srs: dd.Series, bins: int) -> Dict[str, Any]:\n\n data: Dict[str, Any] = {}\n\n ## if cfg.stats_enable or cfg.hist_enable or\n # calculate the total number of rows then drop the missing values\n data[\"nrows\"] = srs.shape[0]\n srs = srs.dropna()\n ## if cfg.stats_enable\n # number of not null (present) values\n data[\"npres\"] = srs.shape[0]\n # remove infinite values\n srs = srs[~srs.isin({np.inf, -np.inf})]\n\n # shared computations\n ## if cfg.stats_enable or cfg.hist_enable or cfg.qqplot_enable and cfg.insights_enable:\n data[\"min\"], data[\"max\"] = srs.min(), srs.max()\n ## if cfg.hist_enable or cfg.qqplot_enable and cfg.ingsights_enable:\n data[\"hist\"] = da.histogram(srs, bins=bins, range=[data[\"min\"], data[\"max\"]])\n ## if cfg.insights_enable and (cfg.qqplot_enable or cfg.hist_enable):\n data[\"norm\"] = normaltest(data[\"hist\"][0])\n ## if cfg.qqplot_enable\n data[\"qntls\"] = srs.quantile(np.linspace(0.01, 0.99, 99))\n ## elif cfg.stats_enable\n ## data[\"qntls\"] = srs.quantile([0.05, 0.25, 0.5, 0.75, 0.95])\n ## elif cfg.boxplot_enable\n ## data[\"qntls\"] = srs.quantile([0.25, 0.5, 0.75])\n ## if cfg.stats_enable or cfg.hist_enable and cfg.insights_enable:\n data[\"skew\"] = skew(srs)\n\n # if cfg.stats_enable\n data[\"nuniq\"] = srs.nunique()\n data[\"nreals\"] = srs.shape[0]\n data[\"nzero\"] = (srs == 0).sum()\n data[\"nneg\"] = (srs < 0).sum()\n data[\"mean\"] = srs.mean()\n data[\"std\"] = srs.std()\n data[\"kurt\"] = kurtosis(srs)\n data[\"mem_use\"] = srs.memory_usage(deep=True)\n\n ## if cfg.hist_enable and cfg.insight_enable\n data[\"chisq\"] = chisquare(data[\"hist\"][0])\n\n # compute the density histogram\n data[\"dens\"] = da.histogram(srs, bins=bins, range=[data[\"min\"], data[\"max\"]], density=True)\n # gaussian kernel density estimate\n data[\"kde\"] = gaussian_kde(\n srs.map_partitions(lambda x: x.sample(min(1000, x.shape[0])), meta=srs)\n )\n\n ## if cfg.box_enable\n data.update(calc_box(srs, data[\"qntls\"]))\n\n return data", "def cdf_discretize(self,variables=[]):\n #the errors in the code are due to the deleted files that require packages to be installed on the computer\n for i in variables:\n x=unique(self.data[:,i])\n m=max(x)-min(x)\n f=lambda x0,y0: array([m*(x0+y0)/(1+m**2), (x0*m+y0)/(1+m**2)])\n cdf=array([np.sum(self.data[:,i]<=t) for t in x])\n d=array([norm(array([x0,cdf[k]])-f(x0,cdf[k])) for k,x0 in\\\n enumerate(x)])", "def getUnscaledPDFs(makePlots=False):\n from ROOT import TFile, TH1D, gROOT\n\n pLo, pHi, ppb = 0, 30, 0.03 # requires ppb=0.03, the fit parameters are optimized for it\n nB = int((pHi-pLo)/ppb)\n\n # output file\n rOut = \"%s/data/specPDFs-sf7.root\" % dsi.latSWDir\n tf = TFile(rOut,\"RECREATE\")\n td = gROOT.CurrentDirectory()\n\n # print(\"Generating unscaled PDFs, eLo %.1f eHi %.1f epb %.2f: %s\" % (eLo, eHi, epb, rOut))\n\n # === 1. axion flux\n\n # axion flux scale.\n # NOTE: to do the fit and set a new limit, we set g_ae=1.\n # To plot an expected flux, we would use a real value.\n # Redondo's note: I calculated the flux using gae = 0.511*10^-10\n # for other values of gae use: FLUX = Table*[gae/(0.511*10^-10)]^2\n gae = 1\n gRat = (gae / 5.11e-11)\n redondoScale = 1e19 * gRat**2 # convert table to [flux / (keV cm^2 d)]\n\n axData = []\n with open(\"%s/data/redondoFlux.txt\" % dsi.latSWDir) as f1: # 23577 entries\n lines = f1.readlines()[11:]\n for line in lines:\n data = line.split()\n axData.append([float(data[0]),float(data[1])])\n axData = np.array(axData)\n\n # === 2. ge photoelectric xs\n phoData = []\n with open(\"%s/data/ge76peXS.txt\" % dsi.latSWDir) as f2: # 2499 entries, 0.01 kev intervals\n lines = f2.readlines()\n for line in lines:\n data = line.split()\n phoData.append([float(data[0]),float(data[1])])\n phoData = np.array(phoData)\n\n # === 3. tritium\n tritData = []\n with open(\"%s/data/TritiumSpectrum.txt\" % dsi.latSWDir) as f3: # 20000 entries\n lines = f3.readlines()[1:]\n for line in lines:\n data = line.split()\n conv = float(data[2]) # raw spectrum convolved w/ ge cross section\n if conv < 0: conv = 0.\n tritData.append([float(data[1]),conv])\n tritData = np.array(tritData)\n\n # NOTE: check sandbox/th1.py for examples of manually filling TH1D's and verifying wl.GetHisto and wl.npTH1D.\n\n # ROOT output\n h1 = TH1D(\"h1\",\"photoelectric\",nB,pLo,pHi) # [cm^2 / kg]\n h2 = TH1D(\"h2\",\"axioelectric\",nB,pLo,pHi) # [cm^2 / kg]\n h3 = TH1D(\"h3\",\"axion flux, gae=1\",nB,pLo,pHi) # [cts / (keV cm^2 d)]\n h4 = TH1D(\"h4\",\"convolved flux\",nB,pLo,pHi) # [cts / (keV d kg)]\n h5 = TH1D(\"h5\",\"tritium\",nB,pLo,pHi) # [cts] (normalized to 1)\n\n # manually fill ROOT histos (don't normalize yet)\n for iB in range(nB+1):\n ctr = (iB + 0.5)*ppb + pLo\n bLo, bHi = ctr - ppb/2, ctr + ppb/2\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\",category=RuntimeWarning)\n\n # if ma>0, we ignore entries with E <= m.\n ma=0 # this used to be a parameter but it's deprecated.\n\n # photoelectric x-section [cm^2 / kg]\n idx = np.where((phoData[:,0] >= bLo) & (phoData[:,0] < bHi))\n pho = np.mean(phoData[idx][:,1]) * 1000\n if np.isnan(pho) or len(phoData[idx][:,1]) == 0: pho = 0.\n if phoData[idx][:,1].any() <= ma: pho = 0.\n h1.SetBinContent(iB+1,pho)\n\n # axioelectric x-section [cm^2 / kg]\n if ctr > ma: axio = pho * wl.sig_ae(ctr, ma)\n else: axio=0.\n h2.SetBinContent(iB+1,axio)\n\n # axion flux [flux / (cm^2 d keV)]\n idx = np.where((axData[:,0] >= bLo) & (axData[:,0] < bHi))\n flux = np.mean(axData[idx][:,1]) * redondoScale\n if np.isnan(flux): flux = 0.\n h3.SetBinContent(iB+1, flux)\n # YES, adding 1 here. keeps the 6.6 keV line in the proper place for all binnings.\n # it must have to do w/ the way i'm reading in the data from the text files ...\n\n # axion flux PDF [flux / (keV d kg)]\n axConv = axio * flux\n h4.SetBinContent(iB+1, axConv)\n\n # tritium\n idx = np.where((tritData[:,0] >= bLo) & (tritData[:,0] <= bHi))\n trit = np.mean(tritData[idx][:,1])\n if np.isnan(trit): trit = 0.\n h5.SetBinContent(iB+1, trit)\n\n # Pb210 (from separate file)\n tf2 = TFile(\"%s/data/Pb210PDFs.root\" % dsi.latSWDir)\n h6 = tf2.Get(\"hPb210TDL\") # with TDL\n h7 = tf2.Get(\"hPb210\") # without TDL\n h6.SetName(\"h6\")\n h7.SetName(\"h7\")\n\n if makePlots:\n\n # === 1. verify the numpy histogram and ROOT histogram give the same output. OK\n\n x, h210, xpb = wl.npTH1D(h7)\n iE = np.where((x > 45) & (x < 48))\n plt.plot(x[iE], h210[iE], ls='steps', lw=3, c='b')\n plt.xlabel(\"Energy (keV)\", ha='right', x=1)\n plt.tight_layout()\n plt.savefig(\"%s/plots/sf-pk210.pdf\" % dsi.latSWDir)\n\n from ROOT import TCanvas\n c = TCanvas()\n h7.GetXaxis().SetTitle(\"Energy (keV)\")\n h7.GetXaxis().SetRangeUser(45, 48)\n h7.Draw('hist')\n c.Print('%s/plots/sf-pb210th1d.pdf' % dsi.latSWDir)\n\n # === 2. print ROOT histos to match w/ numpy histos\n\n c.Clear(); h1.Draw(\"hist\"); c.Print(\"%s/plots/root-sigGe.pdf\" % dsi.latSWDir)\n c.Clear(); h2.Draw(\"hist\"); c.Print(\"%s/plots/root-sigAe.pdf\" % dsi.latSWDir)\n c.Clear(); h3.Draw(\"hist\"); c.Print(\"%s/plots/root-axFlux.pdf\" % dsi.latSWDir)\n c.Clear(); h4.Draw(\"hist\"); c.Print(\"%s/plots/root-axPDF.pdf\" % dsi.latSWDir)\n c.Clear(); h5.Draw(\"hist\"); c.Print(\"%s/plots/root-trit.pdf\" % dsi.latSWDir)\n c.Clear(); h6.Draw(\"hist\"); c.Print(\"%s/plots/root-pb210TDL.pdf\" % dsi.latSWDir)\n c.Clear(); h7.Draw(\"hist\"); c.Print(\"%s/plots/root-pb210.pdf\" % dsi.latSWDir)\n\n gROOT.cd(td.GetPath())\n h1.Write()\n h2.Write()\n h3.Write()\n h4.Write()\n h5.Write()\n h6.Write()\n h7.Write()\n tf.Close()", "def plot_hist_snfit_sncosmo(self):\n \n self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_'+str(self.width)+'.txt')\n self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_'+str(self.width)+'.txt')\n\n# self.read_sncosmo(path='../sugar_analysis_data/results/res_salt2_SNF_GF.txt')\n# self.read_snfit_results(snfit_res_path='../sugar_analysis_data/results/results_snfit_GF.txt')\n\n self.diff_x0_sncosmo = []\n self.diff_x0_err_sncosmo = []\n self.diff_x1_sncosmo = []\n self.diff_x1_err_sncosmo = [] \n self.diff_c_sncosmo = []\n self.diff_c_err_sncosmo = [] \n self.diff_mb_sncosmo = []\n self.diff_mb_err_sncosmo = [] \n self.diff_cov_x0_x1_sncosmo = []\n self.diff_cov_x0_c_sncosmo = []\n self.diff_cov_x1_c_sncosmo = []\n self.diff_cov_mb_x1_sncosmo = []\n self.diff_cov_mb_c_sncosmo = []\n self.diff_chi2 = []\n for i in range (len(self.sn_name)):\n for j in range (len(self.sncosmo_sn_name)):\n if self.sn_name[i] == self.sncosmo_sn_name[j]:\n if np.abs(self.x1[i] - self.sncosmo_x1[j]) < 0.02:\n self.diff_x0_sncosmo.append(self.x0[i] - self.sncosmo_x0[j])\n self.diff_x0_err_sncosmo.append(self.x0_err[i] - self.sncosmo_x0_err[j])\n self.diff_x1_sncosmo.append(self.x1[i] - self.sncosmo_x1[j])\n self.diff_x1_err_sncosmo.append(self.x1_err[i] - self.sncosmo_x1_err[j]) \n self.diff_c_sncosmo.append(self.c[i] - self.sncosmo_c[j])\n self.diff_c_err_sncosmo.append(self.c_err[i] - self.sncosmo_c_err[j]) \n self.diff_mb_sncosmo.append(self.mb[i] - self.sncosmo_mb[j])\n self.diff_mb_err_sncosmo.append(self.mb_err[i] - self.sncosmo_mb_err[j])\n self.diff_chi2.append(self.snfit_chi2[i] - self.sncosmo_chi2[j])\n# self.diff_cov_x0_x1_sncosmo.append()\n# self.diff_cov_x0_c_sncosmo.append()\n# self.diff_cov_x1_c_sncosmo.append()\n# self.diff_cov_mb_x1_sncosmo.append()\n# self.diff_cov_mb_c_sncosmo.append()\n else:\n print self.x1[i] - self.sncosmo_x1[j], self.sn_name[i],self.sncosmo_sn_name[j], self.x1[i], self.sncosmo_x1[j]\n\n# rcParams['font.size'] = 16.\n# font = {'family': 'normal', 'size': 16}\n# rc('axes', linewidth=1.5)\n# rc(\"text\", usetex=True)\n# rc('font', family='serif')\n# rc('font', serif='Times')\n# rc('legend', fontsize=25)\n# rc('xtick.major', size=5, width=1.5)\n# rc('ytick.major', size=5, width=1.5)\n# rc('xtick.minor', size=3, width=1)\n# rc('ytick.minor', size=3, width=1)\n# fig = plt.figure(figsize=(8.,8.)) \n# \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0_sncosmo,50,label='$\\Delta$ x0_'+str(self.width))\n ax0_2.hist(self.diff_x0_err_sncosmo,50,label='$\\Delta$ x0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n# ax0_1.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x0_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1_sncosmo,50,label='$\\Delta$ X1_'+str(self.width))\n ax0_2.hist(self.diff_x1_err_sncosmo,50,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/x1_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c_sncosmo,50,label='$\\Delta$ Color_'+str(self.width))\n ax0_2.hist(self.diff_c_err_sncosmo,50,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/color_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb_sncosmo,50,label='$\\Delta$ mb_'+str(self.width))\n ax0_2.hist(self.diff_mb_err_sncosmo,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n plt.ticklabel_format(axis='x', style='scientific', scilimits=(-1, 2))\n pdffile = '../sugar_analysis_data/results/mb_plot_'+str(self.width)+'.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n plt.hist(self.diff_chi2,50,label='$\\Delta$ chi2_'+str(self.width))\n pdffile = '../sugar_analysis_data/results/chi2_'+str(self.width)+'.pdf'\n plt.legend()\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()", "def startrek_starships():\n pdf = pd.DataFrame({\n 'uid': [\n 'NCC-1701',\n 'NCC-74656',\n 'NCC-1031',\n 'NCC-1764',\n 'NX-01'\n ],\n 'name': [\n 'USS Enterprise',\n 'USS Voyager',\n 'USS Discovery',\n 'USS Defiant',\n 'Enterprise'\n ]\n })\n return pdf", "def distributions(\n data,\n filename: str,\n continuous_kind: str = \"count\",\n nrows: int = 4,\n ncols: int = 3,\n quality: str = \"medium\",\n variables: Optional[List[str]] = None,\n sort: bool = True,\n):\n # Limit variables\n if variables is not None:\n data = data[variables]\n\n # Check filename, adding \".pdf\" if needed\n if type(filename) == str:\n filename = Path(filename)\n if filename.suffix != \"pdf\":\n filename = Path(str(filename) + \".pdf\")\n\n # Set DPI\n dpi_dict = {\"low\": 150, \"medium\": 300, \"high\": 1200}\n dpi = dpi_dict.get(quality, None)\n if dpi is None:\n raise ValueError(f\"quality was set to '{quality}' which is not a valid value\")\n\n # Make sure file is writeable\n try:\n with PdfPages(filename) as pdf:\n pass\n except OSError:\n raise OSError(f\"Unable to write to '{filename}'\")\n\n with PdfPages(filename) as pdf:\n # Determine the number of pages\n plots_per_page = nrows * ncols\n total_pages = (len(data.columns) + (plots_per_page - 1)) // plots_per_page\n click.echo(\n f\"Generating a {total_pages} page PDF for {len(data.columns):,} variables\"\n )\n # Starting plot space\n page_num = 1\n row_idx = 0\n col_idx = 0\n # Loop through all variables\n if sort:\n variables = sorted(list(data))\n else:\n variables = list(data)\n for variable in variables:\n if row_idx == 0 and col_idx == 0:\n # New Page\n _ = plt.subplots(squeeze=False, figsize=(8.5, 11), dpi=dpi)\n plt.suptitle(f\"Page {page_num}\")\n # Plot non-NA values and record the number of those separately (otherwise they can cause issues with generating a KDE)\n ax = plt.subplot2grid((nrows, ncols), (row_idx, col_idx))\n if str(data.dtypes[variable]) == \"category\": # binary and categorical\n sns.countplot(x=data.loc[~data[variable].isna(), variable], ax=ax)\n else:\n if continuous_kind == \"count\":\n sns.distplot(\n x=data.loc[~data[variable].isna(), variable],\n kde=False,\n norm_hist=False,\n hist_kws={\"alpha\": 1},\n ax=ax,\n )\n elif continuous_kind == \"box\":\n sns.boxplot(x=data.loc[~data[variable].isna(), variable], ax=ax)\n elif continuous_kind == \"violin\":\n sns.violinplot(x=data.loc[~data[variable].isna(), variable], ax=ax)\n elif continuous_kind == \"qq\":\n # QQ plots have to be sub-sampled otherwise there are too many points and the pdf is blank\n d = data.loc[~data[variable].isna(), variable]\n if len(d) > 400:\n d = d.sample(n=400, random_state=1)\n qqplot(d, line=\"s\", fit=True, ax=ax, color=\"steelblue\", alpha=0.7)\n else:\n raise ValueError(\n \"Unknown value for 'continuous_kind': must be one of {'count', 'box', 'violin', 'qq'}\"\n )\n # Update xlabel with NA information\n na_count = data[variable].isna().sum()\n ax.set_xlabel(\n f\"{variable}\\n{na_count:,} of {len(data[variable]):,} are NA ({na_count / len(data[variable]):.2%})\"\n )\n # Move to next plot space\n col_idx += 1\n if col_idx == ncols: # Wrap to next row\n col_idx = 0\n row_idx += 1\n if row_idx == nrows: # Wrap to next page\n row_idx = 0\n page_num += 1\n # Save the current page\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n pdf.savefig()\n plt.close()\n # Save final page, unless a full page was finished and the page_num is now more than total_pages\n if page_num == total_pages:\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n pdf.savefig()\n plt.close()\n # Add metadata\n d = pdf.infodict()\n d[\"Title\"] = \"Multipage PDF Example\"\n d[\"Author\"] = f\"CLARITE {clarite_version}\"\n d[\"Subject\"] = \"Distribution plots\"\n d[\"CreationDate\"] = datetime.datetime.today()\n d[\"ModDate\"] = datetime.datetime.today()", "def plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, prec_wrt_L=False, plot_out=None):\n\n nr_components = settings['nr_components']\n plot = {'data': [], 'layout': {}}\n\n #set up drop down menu\n plot['layout']['updatemenus']=[{'xanchor':'left',\n 'yanchor':'top',\n 'x':1.02,\n 'y':0.6,\n 'buttons':[],\n 'active': 0,\n }]\n\n # component weights\n weights_bg = []\n weights_contact = []\n for component in range(nr_components):\n weights_bg.append(parameters_dict['weight_bg_'+str(component)][0])\n weights_contact.append(parameters_dict['weight_contact_'+str(component)][0])\n\n\n min_coupling_xaxis = -1\n max_coupling_xaxis = +1\n\n\n ab_list = evaluation_set_kde['contact'].keys()\n for ab in ab_list:\n\n plot['data'].append(\n go.Scatter(\n x=evaluation_set_kde['x_grid'],\n y=evaluation_set_kde['bg'][ab],\n mode='none',\n fill='tozeroy',\n fillcolor='rgb(50,50,205)',\n opacity=0.2,\n name='training data bg',\n showlegend=True,\n hoverinfo=None,\n visible=False\n )\n )\n\n plot['data'].append(\n go.Scatter(\n x=evaluation_set_kde['x_grid'],\n y=evaluation_set_kde['contact'][ab],\n fill='tonexty',\n fillcolor='rgb(50,205,50)',\n opacity=0.2,\n mode='none',\n name='training data contact',\n showlegend=True,\n hoverinfo=None,\n visible=False\n )\n )\n\n\n means = []\n sd = []\n for component in range(nr_components):\n means.append(parameters_dict['mu_'+str(component)][ab])\n try:\n if prec_wrt_L:\n sd.append(np.sqrt(1.0/(parameters_dict['prec_'+str(component)][ab] * 142) )) #in case precision is spec depending on L=142\n else:\n sd.append(np.sqrt(1.0/parameters_dict['prec_'+str(component)][ab]))\n except ZeroDivisionError as e:\n print(e)\n sd.append(0) #in case prec is zero bc optimizer tries strange values\n\n ### add components\n for component in range(nr_components):\n gaussian_component_density = get_coordinates_for_1d_gaussian(\n min_coupling_xaxis,\n max_coupling_xaxis,\n means[component],\n sd[component]\n )\n\n plot['data'].append(go.Scatter(x=gaussian_component_density[0],\n y=gaussian_component_density[1],\n mode='lines',\n name='component ' + str(component) + ' for ' + AB[ab],\n line=dict(dash='dot',\n color=colors[component]),\n showlegend=False,\n visible=False\n )\n )\n\n ### add mixture if there are more than one component\n if (nr_components > 1):\n gaussian_mixture_x_contact, gaussian_mixture_y_contact = get_coordinates_for_1d_gaussian_mixture(\n min_coupling_xaxis, max_coupling_xaxis,\n weights_contact,\n means,\n sd\n )\n\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_contact,\n y=gaussian_mixture_y_contact,\n mode='lines',\n name='mixture (contact) for ' + AB[ab],\n line=dict(color='rgb(50,205,50)',\n width = 3),\n showlegend=False,\n visible=False\n )\n )\n\n if (nr_components > 1):\n gaussian_mixture_x_bg, gaussian_mixture_y_bg = get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_bg,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_bg,\n y=gaussian_mixture_y_bg,\n mode='lines',\n name='mixture (bg) for ' + AB[ab],\n line=dict(color='rgb(50,50,205 )',\n width = 3),\n showlegend=False,\n visible=False\n )\n )\n\n #set up drop down option\n nr_plots_per_ab = 2 + nr_components\n if (nr_components > 1):\n nr_plots_per_ab += 2\n\n plot['layout']['updatemenus'][0]['buttons'].append(\n {\n 'args':['visible', [False] * (nr_plots_per_ab) * ab_list.index(ab) +\n [True] * (nr_plots_per_ab) +\n [False] * (nr_plots_per_ab) * (len(ab_list) - ab_list.index(ab) - 1)+\n [True]] ,\n 'label': AB[ab],\n 'method':'restyle'\n })\n\n\n\n if \"regularizer\" in evaluation_set_kde.keys():\n plot['data'].append(\n go.Scatter(\n x=evaluation_set_kde['x_grid'],\n y=evaluation_set_kde['regularizer'],\n mode='lines',\n name='regularization prior',\n line=dict(color='black',\n width=3),\n showlegend=True,\n hoverinfo=None,\n visible=False\n )\n )\n\n\n plot['layout'].update({'title': 'Coupling prior as a gaussian mixture'})\n plot['layout'].update({'xaxis1': {'title': \"coupling values\"}})\n plot['layout'].update({'yaxis1': {'title': \"density\"}})\n plot['layout']['updatemenus'][0]['active']=0\n plot['layout']['yaxis1']['range']=[0,15]\n plot['layout']['font'] = {'size': 18}\n\n if plot_out is not None:\n plotly_plot(plot, filename=plot_out, auto_open=False)\n else:\n return plot", "def precipitation():\n\n return jsonify(prcp_df)", "def warp_factors():\n pdf = pd.DataFrame({\n 'uid': [\n 'NCC-1701',\n 'Narada',\n 'NCC-74656',\n 'NCC-1031',\n 'NCC-1764',\n 'NX-01',\n 'Borg cube',\n None,\n ],\n 'warp': [\n 9.2,\n np.nan,\n 9.975,\n 9.9,\n 9.2,\n 4,\n None,\n np.nan\n ]\n })\n return pdf", "def CALSPECAbsLineIdentificationinPDF(spectra,pointing,all_titles,object_name,dir_top_images,all_filt,date,figname,tagname,NBIMGPERROW=2):\n \n \n NBSPEC=len(spectra)\n \n MAXIMGROW=max(2,int(m.ceil(float(NBSPEC)/float(NBIMGPERROW))))\n \n \n # fig file specif\n NBIMGROWPERPAGE=5 # number of rows per pages\n PageNum=0 # page counter\n \n figfilename=os.path.join(dir_top_images,figname)\n \n pp = PdfPages(figfilename) # create a pdf file\n \n \n titlepage='WL calibrated 1D Spectra 1D for obj : {} date :{}'.format(object_name,date)\n \n \n all_wl= [] # containers for wavelength\n \n \n for index in np.arange(0,NBSPEC):\n \n \n # new pdf page \n if index%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n f, axarr = plt.subplots(NBIMGROWPERPAGE,NBIMGPERROW,figsize=(25,30))\n f.suptitle(titlepage,size=20)\n \n # index of image in the pdf page \n indexcut=index-PageNum*(NBIMGROWPERPAGE*NBIMGPERROW) \n ix=indexcut%NBIMGPERROW\n iy=indexcut/NBIMGPERROW\n \n \n spec = spectra[index]\n \n # calibrate\n grating_name=get_disperser_filtname(all_filt[index])\n X_Size_Pixels=np.arange(spec.shape[0])\n lambdas = Pixel_To_Lambdas(grating_name,X_Size_Pixels,pointing[index],False)\n \n \n all_wl.append(lambdas)\n \n #plot\n axarr[iy,ix].plot(lambdas,spec,'r-',lw=2,label=tagname)\n \n thetitle=\"{} : {} : {} \".format(index,all_titles[index],all_filt[index])\n axarr[iy,ix].set_title(thetitle,color='blue',fontweight='bold',fontsize=16)\n \n \n #axarr[iy,ix].text(600.,spec.max()*1.1, all_filt[index],verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20)\n axarr[iy,ix].legend(loc='best',fontsize=16)\n axarr[iy,ix].set_xlabel('Wavelength [nm]', fontsize=16)\n axarr[iy,ix].grid(True)\n \n YMIN=0.\n YMAX=spec.max()*1.2\n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA or line == HDELTA or line ==O2B or line == O2Y or line == O2Z:\n axarr[iy,ix].plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='red',lw=0.5)\n axarr[iy,ix].text(line['lambda'],0.9*(YMAX-YMIN),line['label'],verticalalignment='bottom', horizontalalignment='center',color='red', fontweight='bold',fontsize=16)\n \n \n axarr[iy,ix].set_ylim(YMIN,YMAX)\n axarr[iy,ix].set_xlim(np.min(lambdas),np.max(lambdas))\n axarr[iy,ix].set_xlim(0,1200.)\n \n if (index+1)%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n PageNum+=1 # increase page Number\n f.savefig(pp, format='pdf')\n f.show()\n \n \n f.savefig(pp, format='pdf') \n f.show()\n pp.close() \n \n return all_wl", "def embedding_density(\n adata: AnnData,\n # there is no asterisk here for backward compat (previously, there was)\n basis: str = 'umap', # was positional before 1.4.5\n groupby: Optional[str] = None,\n key_added: Optional[str] = None,\n components: Union[str, Sequence[str]] = None,\n) -> None:\n # to ensure that newly created covariates are categorical\n # to test for category numbers\n sanitize_anndata(adata)\n\n logg.info(f'computing density on {basis!r}')\n\n # Test user inputs\n basis = basis.lower()\n\n if basis == 'fa':\n basis = 'draw_graph_fa'\n\n if f'X_{basis}' not in adata.obsm_keys():\n raise ValueError(\n \"Cannot find the embedded representation \"\n f\"`adata.obsm['X_{basis}']`. Compute the embedding first.\"\n )\n\n if components is None:\n components = '1,2'\n if isinstance(components, str):\n components = components.split(',')\n components = np.array(components).astype(int) - 1\n\n if len(components) != 2:\n raise ValueError('Please specify exactly 2 components, or `None`.')\n\n if basis == 'diffmap':\n components += 1\n\n if groupby is not None:\n if groupby not in adata.obs:\n raise ValueError(f'Could not find {groupby!r} `.obs` column.')\n\n if adata.obs[groupby].dtype.name != 'category':\n raise ValueError(f'{groupby!r} column does not contain categorical data')\n\n # Define new covariate name\n if key_added is not None:\n density_covariate = key_added\n elif groupby is not None:\n density_covariate = f'{basis}_density_{groupby}'\n else:\n density_covariate = f'{basis}_density'\n\n # Calculate the densities over each category in the groupby column\n if groupby is not None:\n categories = adata.obs[groupby].cat.categories\n\n density_values = np.zeros(adata.n_obs)\n\n for cat in categories:\n cat_mask = adata.obs[groupby] == cat\n embed_x = adata.obsm[f'X_{basis}'][cat_mask, components[0]]\n embed_y = adata.obsm[f'X_{basis}'][cat_mask, components[1]]\n\n dens_embed = _calc_density(embed_x, embed_y)\n density_values[cat_mask] = dens_embed\n\n adata.obs[density_covariate] = density_values\n else: # if groupby is None\n # Calculate the density over the whole embedding without subsetting\n embed_x = adata.obsm[f'X_{basis}'][:, components[0]]\n embed_y = adata.obsm[f'X_{basis}'][:, components[1]]\n\n adata.obs[density_covariate] = _calc_density(embed_x, embed_y)\n\n # Reduce diffmap components for labeling\n # Note: plot_scatter takes care of correcting diffmap components\n # for plotting automatically\n if basis != 'diffmap':\n components += 1\n\n adata.uns[f'{density_covariate}_params'] = dict(\n covariate=groupby, components=components.tolist()\n )\n\n logg.hint(\n f\"added\\n\"\n f\" '{density_covariate}', densities (adata.obs)\\n\"\n f\" '{density_covariate}_params', parameter (adata.uns)\"\n )", "def make_llr_plots(self):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n\n outdir = os.path.join(self.outdir, 'LLRDistributions')\n mkdir(outdir)\n\n for injkey in self.values.keys():\n\n data = self.values[injkey]\n metric_type = data['h0_fit_to_h0_fid']['metric_val']['type']\n metric_type_pretty = self.tex_axis_label(metric_type)\n h0_fid_metric = self.fid_values[injkey][\n 'h0_fit_to_%s'%self.labels.dict['data']\n ][\n 'metric_val'\n ]\n h1_fid_metric = self.fid_values[injkey][\n 'h1_fit_to_%s'%self.labels.dict['data']\n ][\n 'metric_val'\n ]\n\n h0_fit_to_h0_fid_metrics = np.array(\n data['h0_fit_to_h0_fid']['metric_val']['vals']\n )\n h1_fit_to_h0_fid_metrics = np.array(\n data['h1_fit_to_h0_fid']['metric_val']['vals']\n )\n h0_fit_to_h1_fid_metrics = np.array(\n data['h0_fit_to_h1_fid']['metric_val']['vals']\n )\n h1_fit_to_h1_fid_metrics = np.array(\n data['h1_fit_to_h1_fid']['metric_val']['vals']\n )\n\n # In the case of likelihood, the maximum metric is the better fit.\n # With chi2 metrics the opposite is true, and so we must multiply\n # everything by -1 in order to apply the same treatment.\n if 'chi2' in metric_type:\n logging.info('Converting chi2 metric to likelihood equivalent.')\n h0_fid_metric *= -1\n h1_fid_metric *= -1\n h0_fit_to_h0_fid_metrics *= -1\n h1_fit_to_h0_fid_metrics *= -1\n h0_fit_to_h1_fid_metrics *= -1\n h1_fit_to_h1_fid_metrics *= -1\n\n if h1_fid_metric < h0_fid_metric:\n bestfit = 'h0'\n altfit = 'h1'\n critical_value = h0_fid_metric-h1_fid_metric\n else:\n bestfit = 'h1'\n altfit = 'h0'\n critical_value = h1_fid_metric-h0_fid_metric\n\n if bestfit == 'h0':\n llrbest = h0_fit_to_h0_fid_metrics - h1_fit_to_h0_fid_metrics\n llralt = h0_fit_to_h1_fid_metrics - h1_fit_to_h1_fid_metrics\n else:\n llrbest = h1_fit_to_h1_fid_metrics - h0_fit_to_h1_fid_metrics\n llralt = h1_fit_to_h0_fid_metrics - h0_fit_to_h0_fid_metrics\n\n minllr = min(min(llrbest), min(llralt))\n maxllr = max(max(llrbest), max(llralt))\n rangellr = maxllr - minllr\n # Special case for low numbers of trials. Here, the plot\n # can't really be interpreted but the numbers printed on\n # it can still be useful, so we need to make something.\n if self.num_trials < 100:\n binning = np.linspace(minllr - 0.1*rangellr,\n maxllr + 0.1*rangellr,\n 10)\n elif self.num_trials < 2000:\n binning = np.linspace(minllr - 0.1*rangellr,\n maxllr + 0.1*rangellr,\n int(self.num_trials/40))\n else:\n binning = np.linspace(minllr - 0.1*rangellr,\n maxllr + 0.1*rangellr,\n 50)\n binwidth = binning[1]-binning[0]\n\n llrbesthist, llrbestbinedges = np.histogram(llrbest, bins=binning)\n llralthist, llraltbinedges = np.histogram(llralt, bins=binning)\n\n llrhistmax = max(max(llrbesthist), max(llralthist))\n\n best_median = np.median(llrbest)\n\n if self.labels.dict['data_name'] == '':\n inj_name = \"data\"\n else:\n inj_name = \"true %s\"%self.tex_axis_label(\n self.labels.dict['data_name']\n )\n best_name = self.labels.dict['%s_name'%bestfit]\n alt_name = self.labels.dict['%s_name'%altfit]\n\n # Calculate p values\n ## First for the preferred hypothesis based on the fiducial fit\n crit_p_value, unc_crit_p_value = self.calc_p_value(\n llrdist=llralt,\n critical_value=critical_value\n )\n ## Then for the alternate hypothesis based on the fiducial fit\n alt_crit_p_value, alt_unc_crit_p_value = self.calc_p_value(\n llrdist=llrbest,\n critical_value=critical_value\n )\n ## Combine these to give a cls value based on arXiv:1407.5052\n cls_value = (1 - alt_crit_p_value) / (1 - crit_p_value)\n unc_cls_value = cls_value * np.sqrt(\n np.power(alt_unc_crit_p_value/alt_crit_p_value, 2.0) + \\\n np.power(unc_crit_p_value/crit_p_value, 2.0)\n )\n ## Then for the preferred hypothesis based on the median. That\n ## is, the case of a median experiment from the distribution\n ## under the preferred hypothesis.\n med_p_value, unc_med_p_value, median_error = self.calc_p_value(\n llrdist=llralt,\n critical_value=best_median,\n median_p_value=True,\n llrbest=llrbest\n )\n\n if metric_type == 'llh':\n plot_title = (r\"\\begin{center}\"\\\n +\"%s %s Event Selection \"%(self.detector,\n self.selection)\\\n +r\"\\\\\"+\" llr Distributions for %s (%i trials)\"%(\n inj_name, self.num_trials)\\\n +r\"\\end{center}\")\n\n else:\n plot_title = (r\"\\begin{center}\"\\\n +\"%s %s Event Selection \"%(self.detector,\n self.selection)\\\n +r\"\\\\\"+\" %s \\\"llr\\\" Distributions for \"\n %(metric_type_pretty)\\\n +\"%s (%i trials)\"%(inj_name,\n self.num_trials)\\\n +r\"\\end{center}\")\n\n # Factor with which to make everything visible\n plot_scaling_factor = 1.55\n\n # In case of median plot, draw both best and alt histograms\n ## Set up the labels for the histograms\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(best_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name),\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(alt_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name)\n ]\n self.plot_llr_histograms(\n llrarrays=[llrbest, llralt],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['r', 'b'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=best_median,\n critical_label=r\"%s Median = $%.4f\\pm%.4f$\"%(\n self.tex_axis_label(best_name),\n best_median,\n median_error),\n critical_height=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax),\n llrhist=llralthist\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n # Write the p-value on the plot\n plt.figtext(\n 0.15,\n 0.66,\n r\"$\\mathrm{p}\\left(\\mathcal{H}_{%s}\\right) = %.4f\\pm%.4f$\"%(\n best_name, med_p_value, unc_med_p_value),\n color='k',\n size='xx-large'\n )\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_median_%i_Trials'%(\n metric_type, self.num_trials)\n )\n # Add the extra points if they exist\n if self.extra_points is not None:\n plt.legend(loc='upper left', fontsize=11)\n curleg = plt.gca().get_legend()\n linelist = self.add_extra_points(\n ymax=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax)\n )\n handles, labels = plt.gca().get_legend_handles_labels()\n newhandles = []\n for l, h in zip(labels, handles):\n if l in linelist:\n newhandles.append(h)\n newleg = plt.legend(\n handles=newhandles,\n loc='upper right',\n fontsize=11\n )\n plt.gca().add_artist(newleg)\n plt.gca().add_artist(curleg)\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_median_w_extra_points_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n\n # Make some debugging plots\n ## Set up the labels for the histograms\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(best_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name),\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(alt_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name)\n ]\n self.plot_llr_histograms(\n llrarrays=[llrbest, llralt],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['r', 'b'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=best_median,\n critical_label=r\"%s Median = $%.4f\\pm%.4f$\"%(\n self.tex_axis_label(best_name),\n best_median,\n median_error),\n critical_height=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax),\n llrhist=None\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_median_both_fit_dists_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n ## Set up the labels for the histograms\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(best_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\" + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name),\n ]\n self.plot_llr_histograms(\n llrarrays=[llrbest],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['r'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=None,\n critical_label=None,\n critical_height=None,\n llrhist=None\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_best_fit_dist_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n ## Set up the labels for the histograms\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(best_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name),\n ]\n self.plot_llr_histograms(\n llrarrays=[llrbest],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['r'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=best_median,\n critical_label=r\"%s Median = $%.4f\\pm%.4f$\"%(\n self.tex_axis_label(best_name),\n best_median,\n median_error),\n critical_height=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax),\n llrhist=None\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_median_best_fit_dist_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n ## Set up the labels for the histograms\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(alt_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name)\n ]\n self.plot_llr_histograms(\n llrarrays=[llralt],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['b'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=None,\n critical_label=None,\n critical_height=None,\n llrhist=None\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_alt_fit_dist_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n ## Set up the labels for the histograms\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(alt_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\" + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name)\n ]\n self.plot_llr_histograms(\n llrarrays=[llralt],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['b'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=best_median,\n critical_label=r\"%s Median = $%.4f\\pm%.4f$\"%(\n self.tex_axis_label(best_name),\n best_median,\n median_error),\n critical_height=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax),\n llrhist=llralthist\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n # Write the p-value on the plot\n plt.figtext(\n 0.15,\n 0.66,\n r\"$\\mathrm{p}\\left(\\mathcal{H}_{%s}\\right) = %.4f\\pm%.4f$\"%(\n best_name, med_p_value, unc_med_p_value),\n color='k',\n size='xx-large'\n )\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_median_alt_fit_dist_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n\n # In case of critical plot, draw just alt histograms\n ## Set up the label for the histogram\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(alt_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name)\n ]\n self.plot_llr_histograms(\n llrarrays=[llralt],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['b'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=critical_value,\n critical_label=r\"Critical Value = %.4f\"%(critical_value),\n critical_height=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax),\n llrhist=llralthist\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n # Write the p-value on the plot\n plt.figtext(\n 0.15,\n 0.70,\n r\"$\\mathrm{p}\\left(\\mathcal{H}_{%s}\\right) = %.4f\\pm%.4f$\"%(\n best_name, crit_p_value, unc_crit_p_value),\n color='k',\n size='xx-large'\n )\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_critical_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n\n # Make a second critical plot for the alt hypothesis, so we draw the\n # preferred hypothesis\n ## Set up the label for the histogram\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(best_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name)\n ]\n self.plot_llr_histograms(\n llrarrays=[llrbest],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['r'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=critical_value,\n critical_label=r\"Critical Value = %.4f\"%(critical_value),\n critical_height=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax),\n llrhist=llrbesthist\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n # Write the p-value on the plot\n plt.figtext(\n 0.15,\n 0.70,\n r\"$\\mathrm{p}\\left(\\mathcal{H}_{%s}\\right) = %.4f\\pm%.4f$\"%(\n alt_name, alt_crit_p_value, alt_unc_crit_p_value),\n color='k',\n size='xx-large'\n )\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_critical_alt_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()\n\n # Lastly, show both exclusion regions and then the joined cls value\n ## Set up the labels for the histograms\n llr_labels = [\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(best_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name),\n r\"%s Pseudo-Experiments - \"%(self.tex_axis_label(alt_name)) + \\\n r\"$\\log\\left[\\mathcal{L}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)/\\mathcal{L}\\left(\\mathcal{H}_{%s}\\right)\\right]$\"%(\n alt_name)\n ]\n self.plot_llr_histograms(\n llrarrays=[llrbest, llralt],\n llrhistmax=llrhistmax,\n binning=binning,\n colors=['r', 'b'],\n labels=llr_labels,\n best_name=best_name,\n alt_name=alt_name,\n critical_value=critical_value,\n critical_label=r\"Critical Value = %.4f\"%(critical_value),\n critical_height=float(max(llrbesthist))/float(\n plot_scaling_factor*llrhistmax),\n llrhist=[llrbesthist, llralthist],\n cls=True,\n )\n plt.legend(loc='upper left')\n plt.title(plot_title)\n # Write the p-values on the plot\n plt.figtext(\n 0.50,\n 0.66,\n r\"$\\mathrm{CL}_{s}\\left(\\mathcal{H}_{%s}\"%(best_name) + \\\n r\"\\right)= %.4f\\pm%.4f$\"%(cls_value, unc_cls_value),\n horizontalalignment='center',\n color='k',\n size='xx-large'\n )\n plt.figtext(\n 0.12,\n 0.55,\n r\"$\\mathrm{p}\\left(\\mathcal{H}_{%s}\\right) = %.2f\\pm%.2f$\"%(\n alt_name, alt_crit_p_value, alt_unc_crit_p_value),\n bbox=dict(facecolor='none', edgecolor='red', boxstyle='round'),\n horizontalalignment='left',\n color='k',\n size='x-large'\n )\n plt.figtext(\n 0.88,\n 0.55,\n r\"$\\mathrm{p}\\left(\\mathcal{H}_{%s}\\right) = %.2f\\pm%.2f$\"%(\n best_name, crit_p_value, unc_crit_p_value),\n horizontalalignment='right',\n bbox=dict(facecolor='none', edgecolor='blue', boxstyle='round'),\n color='k',\n size='x-large'\n )\n self.save_plot(\n outdir=outdir,\n end='%s_llrDistribution_CLs_%i_Trials'%(\n metric_type, self.num_trials)\n )\n plt.close()", "def gen_final_df(res_fname, dat_sel, dname):\n res_file = open(res_fname, \"r\")\n rdict = {}\n pdict = {}\n for p in res_file:\n i, c, g, rate = p.split(\",\")\n rdict[i] = rate\n pdict[i] = [c, g]\n res_file.close()\n\n opti_feat = min([key for key in rdict.keys() if rdict[key] == max(rdict.values())])\n opti_c, opti_g = pdict[opti_feat][0], pdict[opti_feat][1]\n opti_df = df_transformer(dat_sel, opti_feat)\n opti_df = opti_df.replace(to_replace=-1, value=2)\n opti_df.to_csv(dname+\".res\", sep=\" \", header=None, index=None)\n return dname+\".res\", opti_c, opti_g", "def npdict(self):\n\n d = {}\n\n # per profile\n d['cruise'] = self.cruise()\n d['day'] = self.day()\n d['latitude'] = self.latitude()\n d['latitude_unc'] = self.latitude_unc()\n d['longitude'] = self.longitude()\n d['longitude_unc'] = self.longitude_unc()\n d['month'] = self.month()\n d['n_levels'] = self.n_levels()\n d['primary_header_keys'] = self.primary_header_keys()\n d['probe_type'] = self.probe_type()\n d['time'] = self.time()\n d['uid'] = self.uid()\n d['year'] = self.year()\n d['PIs'] = self.PIs()\n d['originator_station'] = self.originator_station()\n d['originator_cruise'] = self.originator_cruise()\n d['originator_flag_type'] = self.originator_flag_type()\n d['t_metadata'] = self.t_metadata()\n d['s_metadata'] = self.s_metadata()\n # per level\n d['s'] = self.s()\n d['s_unc'] = self.s_unc()\n d['s_level_qc'] = self.s_level_qc()\n d['s_profile_qc'] = self.s_profile_qc()\n d['s_qc_mask'] = self.s_qc_mask()\n d['t'] = self.t()\n d['t_unc'] = self.t_unc()\n d['t_level_qc'] = self.t_level_qc()\n d['t_profile_qc'] = self.t_profile_qc()\n d['t_qc_mask'] = self.t_qc_mask()\n d['z'] = self.z()\n d['z_unc'] = self.z_unc()\n d['z_level_qc'] = self.z_level_qc()\n d['oxygen'] = self.oxygen()\n d['phosphate'] = self.phosphate()\n d['silicate'] = self.silicate()\n d['pH'] = self.pH()\n d['p'] = self.p()\n\n return d", "def _kde_example2(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"density\")\n ch.set_title(\"KDE plot + Histogram\")\n ch.plot.kde(data_frame=data, values_column=\"unit_price\", color_column=\"fruit\")\n ch.style.color_palette.reset_palette_order()\n ch.plot.histogram(\n data_frame=data,\n values_column=\"unit_price\",\n color_column=\"fruit\",\n method=\"density\",\n )\n ch.show(_OUTPUT_FORMAT)", "def make_plots(fig_title, \n t_csd_x, t_csd_y, t_csd_z, true_csd, \n ele_x, ele_y, ele_z, pots,\n k_csd_x, k_csd_y, k_csd_z, est_csd):\n fig = plt.figure(figsize=(10,16))\n #True CSD\n z_steps = 5\n height_ratios = [1 for i in range(z_steps)]\n height_ratios.append(0.1)\n gs = gridspec.GridSpec(z_steps+1, 3, height_ratios=height_ratios)\n t_max = np.max(np.abs(true_csd))\n levels = np.linspace(-1*t_max, t_max, 16)\n ind_interest = np.mgrid[0:t_csd_z.shape[2]:np.complex(0,z_steps+2)]\n ind_interest = np.array(ind_interest, dtype=np.int)[1:-1]\n for ii, idx in enumerate(ind_interest):\n ax = plt.subplot(gs[ii, 0])\n im = plt.contourf(t_csd_x[:,:,idx], t_csd_y[:,:,idx], true_csd[:,:,idx], \n levels=levels, cmap=cm.bwr_r)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n title = str(t_csd_z[:,:,idx][0][0])[:4]\n ax.set_title(label=title, fontdict={'x':0.8, 'y':0.8})\n ax.set_aspect('equal')\n cax = plt.subplot(gs[z_steps,0])\n cbar = plt.colorbar(im, cax=cax, orientation='horizontal')\n cbar.set_ticks(levels[::2])\n cbar.set_ticklabels(np.around(levels[::2], decimals=2))\n #Potentials\n v_max = np.max(np.abs(pots))\n levels_pot = np.linspace(-1*v_max, v_max, 16)\n ele_res = int(np.ceil(len(pots)**(3**-1))) \n ele_x = ele_x.reshape(ele_res, ele_res, ele_res)\n ele_y = ele_y.reshape(ele_res, ele_res, ele_res)\n ele_z = ele_z.reshape(ele_res, ele_res, ele_res)\n pots = pots.reshape(ele_res, ele_res, ele_res)\n for idx in range(min(5,ele_res)):\n X,Y,Z = grid(ele_x[:,:,idx], ele_y[:,:,idx], pots[:,:,idx])\n ax = plt.subplot(gs[idx, 1])\n im = plt.contourf(X, Y, Z, levels=levels_pot, cmap=cm.PRGn)\n ax.hold(True)\n plt.scatter(ele_x[:,:,idx], ele_y[:,:,idx], 5)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n title = str(ele_z[:,:,idx][0][0])[:4]\n ax.set_title(label=title, fontdict={'x':0.8, 'y':0.8})\n ax.set_aspect('equal')\n ax.set_xlim([0.,1.])\n ax.set_ylim([0.,1.])\n cax = plt.subplot(gs[z_steps,1])\n cbar2 = plt.colorbar(im, cax=cax, orientation='horizontal')\n cbar2.set_ticks(levels_pot[::2])\n cbar2.set_ticklabels(np.around(levels_pot[::2], decimals=2))\n # #KCSD\n t_max = np.max(np.abs(est_csd[:,:,:,0]))\n levels_kcsd = np.linspace(-1*t_max, t_max, 16)\n ind_interest = np.mgrid[0:k_csd_z.shape[2]:np.complex(0,z_steps+2)]\n ind_interest = np.array(ind_interest, dtype=np.int)[1:-1]\n for ii, idx in enumerate(ind_interest):\n ax = plt.subplot(gs[ii, 2])\n im = plt.contourf(k_csd_x[:,:,idx], k_csd_y[:,:,idx], est_csd[:,:,idx,0], \n levels=levels_kcsd, cmap=cm.bwr_r)\n #im = plt.contourf(k_csd_x[:,:,idx], k_csd_y[:,:,idx], est_csd[:,:,idx,0], \n # levels=levels, cmap=cm.bwr_r)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n title = str(k_csd_z[:,:,idx][0][0])[:4]\n ax.set_title(label=title, fontdict={'x':0.8, 'y':0.8})\n ax.set_aspect('equal')\n cax = plt.subplot(gs[z_steps,2])\n cbar3 = plt.colorbar(im, cax=cax, orientation='horizontal')\n cbar3.set_ticks(levels_kcsd[::2])\n #cbar3.set_ticks(levels[::2])\n cbar3.set_ticklabels(np.around(levels_kcsd[::2], decimals=2))\n #cbar3.set_ticklabels(np.around(levels[::2], decimals=2))\n fig.suptitle(\"Lambda,R,CV_Error,RMS_Error,Time = \"+fig_title)\n gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95]) \n # #Showing\n #plt.tight_layout()\n plt.show()\n return", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def deriv_analysis(fname):\r\n dat = sio.loadmat(fname,struct_as_record=False,squeeze_me=True)\r\n output = dat['output']\r\n y = dat['y'].ravel()\r\n cbool = dat['cbool'].astype('bool').ravel()\r\n R = {}\r\n\r\n for model in output._fieldnames:\r\n print('\\t'+model)\r\n yhat = output.__getattribute__(model).yhat\r\n R[model],_,kernels = get_corr(y,yhat,cbool)\r\n\r\n R['kernels'] = kernels\r\n df = pd.DataFrame(R)\r\n df['id'] = os.path.basename(fname)[:10]\r\n return(df)", "def __init__(self, financial_params, start_year, end_year):\n super().__init__(financial_params, start_year, end_year)\n self.horizon_mode = financial_params['analysis_horizon_mode']\n self.location = financial_params['location']\n self.ownership = financial_params['ownership']\n self.state_tax_rate = financial_params['state_tax_rate']/100\n self.federal_tax_rate = financial_params['federal_tax_rate']/100\n self.property_tax_rate = financial_params['property_tax_rate']/100\n self.ecc_mode = financial_params['ecc_mode']\n self.ecc_df = pd.DataFrame()\n self.equipment_lifetime_report = pd.DataFrame()\n self.tax_calculations = None\n\n self.Scenario = financial_params['CBA']['Scenario']\n self.Finance = financial_params['CBA']['Finance']\n self.valuestream_values = financial_params['CBA']['valuestream_values']\n self.ders_values = financial_params['CBA']['ders_values']\n if 'Battery' in self.ders_values.keys():\n self.ders_values['Battery'] = self.ders_values.pop('Battery')\n if 'CAES' in self.ders_values.keys():\n self.ders_values['CAES'] = self.ders_values.pop('CAES')\n\n self.value_streams = {}\n self.ders = []\n\n self.macrs_depreciation = {\n 3: [33.33, 44.45, 14.81, 7.41],\n 5: [20, 32, 19.2, 11.52, 11.52, 5.76],\n 7: [14.29, 24.49, 17.49, 12.49, 8.93, 8.92, 8.93, 4.46],\n 10: [10, 18, 14.4, 11.52, 9.22, 7.37, 6.55, 6.55, 6.56, 6.55,\n 3.28],\n 15: [5, 9.5, 8.55, 7.7, 6.83, 6.23, 5.9, 5.9, 5.91, 5.9,\n 5.91, 5.9, 5.91, 5.9, 5.91, 2.95],\n 20: [3.75, 7.219, 6.677, 6.177, 5.713, 5.285, 4.888, 4.522, 4.462, 4.461,\n 4.462, 4.461, 4.462, 4.461, 4.462, 4.461, 4.462, 4.461, 4.462, 4.461,\n 2.231]\n }", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def plot_parameter_visualisation_1d_a_b(parameters_dict, nr_components, ab, colors, prec_wrt_L=False, plot_out=None):\n\n\n plot = {'data': [], 'layout': {}}\n\n\n # component weights\n weights_bg = [ v[0] for k,v in sorted(parameters_dict.iteritems()) if 'weight_bg_' in k]\n weights_contact = [ v[0] for k,v in sorted(parameters_dict.iteritems()) if 'weight_contact_' in k]\n\n #component mu\n means = [v[ab] for k,v in sorted(parameters_dict.iteritems()) if 'mu_' in k]\n\n #component sd\n sd = []\n for component in range(nr_components):\n try:\n if prec_wrt_L:\n sd.append(np.sqrt(1.0/(parameters_dict['prec_'+str(component)][ab] * 142) )) #in case precision is spec depending on L=142\n else:\n sd.append(np.sqrt(1.0/parameters_dict['prec_'+str(component)][ab]))\n except ZeroDivisionError as e:\n print(e)\n sd.append(0) #in case prec is zero bc optimizer tries strange values\n\n\n ### add components\n for component in range(nr_components):\n gaussian_component_density = get_coordinates_for_1d_gaussian(-1, 1, means[component], sd[component])\n plot['data'].append(\n go.Scatter(x=gaussian_component_density[0],\n y=gaussian_component_density[1],\n mode='lines',\n name='component ' + str(component) + ' for ' + AB[ab],\n line=dict(dash='dot',\n color=colors[component]),\n showlegend=False\n )\n )\n\n ### add mixture if there are more than one component\n if (nr_components > 1):\n gaussian_mixture_x_contact, gaussian_mixture_y_contact =get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_contact,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_contact,\n y=gaussian_mixture_y_contact,\n mode='lines',\n name='mixture (contact) for ' + AB[ab],\n line=dict(color='rgb(50,205,50)',\n width = 3),\n showlegend=False\n )\n )\n\n if (nr_components > 1):\n gaussian_mixture_x_bg, gaussian_mixture_y_bg = get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_bg,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_bg,\n y=gaussian_mixture_y_bg,\n mode='lines',\n name='mixture (bg) for ' + AB[ab],\n line=dict(color='rgb(50,50,205 )',\n width = 3),\n showlegend=False\n )\n )\n\n\n plot['layout'].update({'title': 'Coupling prior as a gaussian mixture'})\n plot['layout'].update({'xaxis1': {'title': \"coupling values\"}})\n plot['layout'].update({'yaxis1': {'title': \"density\"}})\n plot['layout']['font'] = {'size': 18}\n\n if plot_out is not None:\n plotly_plot(plot, filename=plot_out, auto_open=False)\n else:\n return plot", "def AGN_SB_diagnostic(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results()\n\n fig,ax = plt.subplots()\n L_CII = getattr(GR,'L_[CII]158_sun')\n\n # x = getattr(GR,'L_[CII]158_sun')/getattr(GR,'L_[NII]122_sun')\n x = getattr(GR,'L_[OIV]25_sun')/getattr(GR,'L_[OIII]88_sun')\n y = getattr(GR,'L_[NeIII]15_sun')/getattr(GR,'L_[NeII]12_sun')\n sc = ax.scatter(x,y,marker='o',s=3,alpha=0.6,c=np.log10(getattr(GR,'SFR')))\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim([8e-5,200])\n ax.set_ylim([0.02,150])\n # ax.set_xlabel('[CII]$_{158}$/[NII]$_{122}$')\n plt.colorbar(sc,label='log(SFR)')\n ax.set_xlabel('[OIV]$_{25.9}$/[OIII]$_{88}$')\n ax.set_ylabel('[NeIII]$_{15.6}$/[NeII]$_{12.8}$')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/ratio_%s' % ratio_name,dpi=300)", "def _calc_ecdf(self):\n for numerator, vals in self.lift.items():\n for denominator, lift in vals.items():\n raw_data = np.array(lift)\n cdfx = np.sort(np.unique(lift))\n x_values = np.linspace(start=min(cdfx),\n stop=max(cdfx),\n num=len(cdfx))\n size_data = raw_data.size\n y_values = []\n for i in x_values:\n temp = raw_data[raw_data <= i]\n value = temp.size / size_data\n y_values.append(value)\n temp = {}\n temp['x'] = x_values\n temp['y'] = y_values\n if numerator not in self.ecdf.keys():\n self.ecdf[numerator] = {}\n self.ecdf[numerator][denominator] = temp\n else:\n self.ecdf[numerator][denominator] = temp", "def generate_2d_plots(prefs, data, html_dir_path, data_dir_path, filename,\r\n background_color, label_color, generate_scree):\r\n coord_tups = [(\"1\", \"2\"), (\"3\", \"2\"), (\"1\", \"3\")]\r\n mapping = data['map']\r\n out_table = ''\r\n # Iterate through prefs and generate html files for each colorby option\r\n # Sort by the column name first\r\n sample_location = {}\r\n\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n groups_and_colors = list(groups_and_colors)\r\n\r\n for i in range(len(groups_and_colors)):\r\n labelname = groups_and_colors[i][0]\r\n groups = groups_and_colors[i][1]\r\n colors = groups_and_colors[i][2]\r\n data_colors = groups_and_colors[i][3]\r\n data_color_order = groups_and_colors[i][4]\r\n\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join('.', os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n new_col_name = labelname\r\n img_data = {}\r\n plot_label = labelname\r\n\r\n if 'support_pcoas' in data:\r\n matrix_average, matrix_low, matrix_high, eigval_average, m_names = \\\r\n summarize_pcoas(data['coord'], data['support_pcoas'],\r\n method=data['ellipsoid_method'])\r\n data['coord'] = \\\r\n (m_names, matrix_average, data['coord'][2], data['coord'][3])\r\n for i in range(len(m_names)):\r\n sample_location[m_names[i]] = i\r\n else:\r\n matrix_average = None\r\n matrix_low = None\r\n matrix_high = None\r\n eigval_average = None\r\n m_names = None\r\n iterator = 0\r\n\r\n for coord_tup in coord_tups:\r\n if isarray(matrix_low) and isarray(matrix_high) and \\\r\n isarray(matrix_average):\r\n coord_1r = asarray(matrix_low)\r\n coord_2r = asarray(matrix_high)\r\n mat_ave = asarray(matrix_average)\r\n else:\r\n coord_1r = None\r\n coord_2r = None\r\n mat_ave = None\r\n sample_location = None\r\n\r\n coord_1, coord_2 = coord_tup\r\n img_data[coord_tup] = draw_pcoa_graph(\r\n plot_label, data_file_dir_path,\r\n data_file_link, coord_1, coord_2,\r\n coord_1r, coord_2r, mat_ave,\r\n sample_location,\r\n data, prefs, groups, colors,\r\n background_color, label_color,\r\n data_colors, data_color_order,\r\n generate_eps=True)\r\n\r\n out_table += TABLE_HTML % (labelname,\r\n \"<br>\".join(img_data[(\"1\", \"2\")]),\r\n \"<br>\".join(img_data[(\"3\", \"2\")]),\r\n \"<br>\".join(img_data[(\"1\", \"3\")]))\r\n\r\n if generate_scree:\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join(\r\n '.',\r\n os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n img_src, download_link = draw_scree_graph(\r\n data_file_dir_path, data_file_link, background_color,\r\n label_color, generate_eps=True, data=data)\r\n\r\n out_table += SCREE_TABLE_HTML % (\"<br>\".join((img_src, download_link)))\r\n\r\n outfile = create_html_filename(filename, '.html')\r\n outfile = os.path.join(html_dir_path, outfile)\r\n\r\n write_html_file(out_table, outfile)", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def plot_pareto_design_space(self, dvar1=0, dvar2=1):\n\n # Check inputs\n if not isinstance(dvar1, int) and not isinstance(dvar1, str):\n raise TypeError(\"Expecting int or str for dvar1, received\", type(dvar1))\n if not isinstance(dvar2, int) and not isinstance(dvar2, str):\n raise TypeError(\"Expecting int or str for dvar2, received\", type(dvar2))\n\n # Get both objective function index and name\n if isinstance(dvar1, int):\n idx_dvar1 = dvar1\n dvar1 = self.design_var_names[idx_dvar1]\n else:\n idx_dvar1 = self.design_var_names.index(dvar1)\n if isinstance(dvar2, int):\n idx_dvar2 = dvar2\n dvar2 = self.design_var_names[idx_dvar2]\n else:\n idx_dvar2 = self.design_var_names.index(dvar2)\n\n # Get fitness and ngen\n is_valid = np.array(self.is_valid)\n fitness = np.array(self.fitness)\n ngen = np.array(self.ngen)\n design_var = np.array(self.design_var)\n\n # Keep only valid values\n indx = np.where(is_valid)[0]\n fitness = fitness[indx]\n ngen = ngen[indx]\n design_var = design_var[indx]\n\n # Get pareto front\n pareto = fitness\n\n # Get dominated values\n idx_non_dom = list(range(len(pareto)))\n N = len(pareto)\n for i in range(N):\n for j in idx_non_dom:\n if all(pareto[j] <= pareto[i]) and any(pareto[j] < pareto[i]):\n idx_non_dom.remove(i)\n break\n\n pareto = np.array(pareto)\n pareto = pareto[idx_non_dom]\n design_var_values = design_var[idx_non_dom]\n\n # Write annotations\n legend_annot = []\n p_iterator = range(pareto.shape[1])\n for sim in pareto.tolist():\n legend = \"\"\n for fit in p_iterator:\n legend += \"{:11.10}=\".format(self.fitness_names[fit]) # sim[d_var])\n if isinstance(sim[fit], float):\n legend += \" {}\\n\".format(sim[fit])\n else:\n legend += \"{:>11.10}\\n\".format(str(sim[fit]))\n legend_annot.append(legend[:-2])\n\n fig, axs = plt.subplots()\n\n # Plot Pareto front\n sc = axs.scatter(\n design_var_values[:, idx_dvar1],\n design_var_values[:, idx_dvar2],\n facecolors=(230 / 255, 175 / 255, 0),\n edgecolors=(0.35, 0.35, 0.35),\n label=\"Pareto Front\",\n )\n axs.autoscale(1, 1)\n axs.legend()\n axs.set_title(\"Pareto Front\")\n axs.set_xlabel(dvar1)\n axs.set_ylabel(dvar2)\n\n # Add anotations in the plot see https://stackoverflow.com/a/47166787\n annot = axs.annotate(\n \"\",\n xy=(0, 0),\n xytext=(20, 20),\n textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"w\"),\n arrowprops=dict(arrowstyle=\"->\"),\n )\n annot.set_visible(False)\n\n def update_annot(ind):\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n annot.set_text(legend_annot[ind[\"ind\"][0]])\n annot.get_bbox_patch().set_facecolor(\n (230 / 255, 175 / 255, 0)\n ) # Color of the annotation background (230,175,0) from Pyleecan graphic chart\n annot.get_bbox_patch().set_alpha(0.4)\n\n def hover(event):\n vis = annot.get_visible()\n if event.inaxes == axs:\n cont, ind = sc.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\n fig.show()", "def TRT_DigitizationToolCfg(flags, name=\"TRTDigitizationTool\", **kwargs):\n if flags.Digitization.PileUpPremixing:\n kwargs.setdefault(\"OutputObjectName\", flags.Overlay.BkgPrefix + \"TRT_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", flags.Overlay.BkgPrefix + \"TRT_SDO_Map\")\n else:\n kwargs.setdefault(\"OutputObjectName\", \"TRT_RDOs\")\n kwargs.setdefault(\"OutputSDOName\", \"TRT_SDO_Map\")\n kwargs.setdefault(\"HardScatterSplittingMode\", 0)\n return TRT_DigitizationBasicToolCfg(flags, name, **kwargs)", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def pdf_d(iout,run='',data='../data',iv='d',i4=0,nbin=100,xlim=[-4,3],lnd=False):\n s = di.snapshot(iout,run=run,data=data)\n n = nbin\n bins = np.linspace(xlim[0],xlim[1],n+1)\n htot = 0.0\n i = 0\n for p in s.patches:\n i += 1\n if i%1000==0:\n print('{:.1f}%'.format(i/len(s.patches)*100.0))\n d = p.var(iv,i4=i4)\n if lnd:\n logd = d/np.log(10.)\n else:\n logd = np.log10(d)\n h,e = np.histogram(logd,bins=bins)\n htot += h\n pl.hist(bins[0:n],bins=bins,weights=htot,log=True,density=True)\n return bins,htot", "def makeQuadSubplots(df_rad_obs, \n df_dir_obs, \n df_rad_sen, \n df_dir_sen, \n suptitle='Big title',\n eps=3, \n min_samples=50):\n fig, axs = plt.subplots(2, 2, \n figsize=(10,10)\n )\n\n fig.suptitle('Clustering Output', fontsize=20)\n\n populateSubPlot(df=df_rad_obs,\n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=0, title='Obsever Wards Radiant')\n\n\n populateSubPlot(df=df_dir_obs, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=1, title='Obsever Wards Dire')\n\n\n populateSubPlot(df=df_rad_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=0, title='Sentry Wards Radiant')\n\n populateSubPlot(df=df_dir_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=1, title='Sentry Wards Dire')\n \n \n return fig, axs", "def info_density(self):\n tot_fields = self.tot_col * self.tot_rows # Total number of fields\n pop_fields = 100 - ((self.empty / tot_fields) * 100)\n\n print('Information density (%): ' + str(pop_fields) + '%')\n results.append('Information density (%): ' + str(pop_fields) + '%')", "def fiducial_comparison():\n \n # observed gd1\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n xi, eta = pk['xi'], pk['eta']\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(7,6), sharex=True)\n \n plt.sca(ax[0])\n plt.scatter(g['phi1']+40, g['phi2'], s=g['pmem']*2, c=g['pmem'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1, rasterized=True)\n \n plt.ylabel('$\\phi_2$ [deg]')\n plt.text(0.05, 0.9, 'Most likely GD-1 members', transform=plt.gca().transAxes, va='top', fontsize=17)\n plt.xlim(-20,20)\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n \n plt.sca(ax[1])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=mpl.cm.Blues(0.9), ms=5)\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.text(0.05, 0.9, 'Simulated GD-1\\n0.5 Gyr after subhalo flyby', transform=plt.gca().transAxes, va='top', fontsize=17)\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n \n plt.tight_layout()\n plt.savefig('../plots/fiducial_comparison.pdf')", "def FE_discretize_numeric_variables(train, bin_dict, test='', strategy='kmeans',verbose=0):\r\n df = copy.deepcopy(train)\r\n test = copy.deepcopy(test)\r\n num_cols = len(bin_dict)\r\n nrows = int((num_cols/2)+0.5)\r\n #print('nrows',nrows)\r\n if verbose:\r\n fig = plt.figure(figsize=(10,3*num_cols))\r\n for i, (col, binvalue) in enumerate(bin_dict.items()):\r\n new_col = col+'_discrete'\r\n if strategy == 'gaussian':\r\n kbd = GaussianMixture(n_components=binvalue, random_state=99)\r\n df[new_col] = kbd.fit_predict(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.predict(test[[col]]).astype(int)\r\n else:\r\n kbd = KBinsDiscretizer(n_bins=binvalue, encode='ordinal', strategy=strategy)\r\n df[new_col] = kbd.fit_transform(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.transform(test[[col]]).astype(int)\r\n if verbose:\r\n ax1 = plt.subplot(nrows,2,i+1)\r\n ax1.scatter(df[col],df[new_col])\r\n ax1.set_title(new_col)\r\n if not isinstance(test, str):\r\n return df, test\r\n else:\r\n return df", "def _convert(self):\n logger.info(\"Converting conformers to density\")\n logger.debug(\"Masking\")\n self._transformer.reset(full=True)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self._transformer.mask(self._rmask)\n mask = self._transformer.xmap.array > 0\n self._transformer.reset(full=True)\n\n nvalues = mask.sum()\n self._target = self.xmap.array[mask]\n logger.debug(\"Density\")\n nmodels = len(self._coor_set)\n self._models = np.zeros((nmodels, nvalues), float)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self.conformer.b = self._bs[n]\n self._transformer.density()\n model = self._models[n]\n model[:] = self._transformer.xmap.array[mask]\n np.maximum(model, self.options.bulk_solvent_level, out=model)\n self._transformer.reset(full=True)", "def calc_cat_stats(\n srs: dd.Series,\n df: dd.DataFrame,\n bins: int,\n nrows: int,\n nuniq: Optional[dd.core.Scalar] = None,\n) -> Dict[str, Any]:\n # pylint: disable=too-many-locals\n # overview stats\n stats = {\n \"nrows\": nrows,\n \"npres\": srs.shape[0],\n \"nuniq\": nuniq, # if cfg.bar_endable or cfg.pie_enable else srs.nunique(),\n \"mem_use\": srs.memory_usage(deep=True),\n \"first_rows\": srs.reset_index(drop=True).loc[:4],\n }\n # length stats\n lengths = srs.str.len()\n minv, maxv = lengths.min(), lengths.max()\n hist = da.histogram(lengths.values, bins=bins, range=[minv, maxv])\n leng = {\n \"Mean\": lengths.mean(),\n \"Standard Deviation\": lengths.std(),\n \"Median\": lengths.quantile(0.5),\n \"Minimum\": minv,\n \"Maximum\": maxv,\n }\n # letter stats\n # computed on groupby-count:\n # compute the statistic for each group then multiply by the count of the group\n grp, col = df.columns\n lc_cnt = (df[grp].str.count(r\"[a-z]\") * df[col]).sum()\n uc_cnt = (df[grp].str.count(r\"[A-Z]\") * df[col]).sum()\n letter = {\n \"Count\": lc_cnt + uc_cnt,\n \"Lowercase Letter\": lc_cnt,\n \"Space Separator\": (df[grp].str.count(r\"[ ]\") * df[col]).sum(),\n \"Uppercase Letter\": uc_cnt,\n \"Dash Punctuation\": (df[grp].str.count(r\"[-]\") * df[col]).sum(),\n \"Decimal Number\": (df[grp].str.count(r\"[0-9]\") * df[col]).sum(),\n }\n\n return {\"stats\": stats, \"len_stats\": leng, \"letter_stats\": letter, \"len_hist\": hist}", "def show_discdds_geo_main(config, parser):\n parser.add_option(\"-o\", \"--output\", help=\"Output directory\",\n default='out/dp-show-discdds-geo/')\n parser.add_option(\"-t\", \"--tolerance\", help=\"Normalized tolerance\",\n default=0.3, type='float')\n options, which = parser.parse()\n \n outdir = options.output \n \n if not which:\n todo = config.discdds.keys() \n else:\n todo = config.discdds.expand_names(which)\n\n\n for id_dds in todo:\n dds = config.discdds.instance(id_dds) \n report = Report(id_dds)\n \n show_diffeo_structure(dds, report, tolerance=options.tolerance) \n \n write_report_files(report, basename=os.path.join(outdir, id_dds))", "def plot_BSNR(data_obj, test_num, cmap):\n\n #----------------------------------------------------------------#\n # Create a new page in the pdf and display the title of the test #\n #----------------------------------------------------------------#\n fig = new_pdf_page(data_obj.pdf_obj)\n plt.axis('off')\n plt.suptitle('Test ' + str(test_num) + ': Organic Detection (' + \"{0:.1f}\".format(data_obj.POM_BSNR) + ')')\n\n #------------------------------------------------------------------------------------------#\n # Display the statistics of the test - BSNR result, average signal, and standard deviation #\n #------------------------------------------------------------------------------------------#\n str1 = 'BSNR: ' + \"{0:.1f}\".format(data_obj.POM_BSNR)\n plt.text(0.5, 0.95, str1, ha='center', fontsize=8)\n\n plt.subplots_adjust(hspace=0.3)\n plt.tight_layout\n\n str2 = 'Ave sig ' + \"{0:.3f}\".format(np.average(data_obj.POM_boundary_signal_arr))\n plt.text(0.5, 0.90, str2, ha='center', fontsize=8)\n str3 = 'Stdev ' + \"{0:.3f}\".format(np.std(data_obj.POM_boundary_signal_arr, ddof=1))\n plt.text(0.5, 0.88, str3, ha='center', fontsize=8)\n\n #----------------------------------------------------------------------------------------#\n # Go through each image [file = image?] and plot the POM ROI images with their sub ROI's #\n #----------------------------------------------------------------------------------------#\n\n n_files = len(data_obj.files)\n for i in range(n_files):\n\n # Set up the design of the page - rows / columns, subplots, etc.\n row_col = np.ceil(np.sqrt(n_files * 1.0))\n ax1 = fig.add_subplot(int(row_col * 100 + row_col * 10 + 1 + i))\n plt.axis('off')\n\n # Display the ROI image of the \"POM_piece\", making sure to specify the range of\n # values for the colormap\n vmin = data_obj.img_data[i].POM_piece_ROI.img.min()\n vmax = np.percentile(data_obj.img_data[i].POM_piece_ROI.img.ravel(), 99.5)\n plt.imshow(data_obj.img_data[i].POM_piece_ROI.img, cmap=cmap, vmin=vmin, vmax=vmax)\n\n # Add each sub ROI and plot them to the pdf - specifically to the current image\n for subROI in data_obj.img_data[i].POM_piece_ROI.subROIs:\n subROI.add_rect_to_plot()\n\n # Display the text for the Boundary Signal for each image\n str1 = 'Bound. sig.=' + \"{0:.3f}\".format(data_obj.img_data[i].POM_boundary_signal)\n plt.title(str1, fontsize=7)", "def highlight_de(adata, basis='umap', components=[1, 2], n_top_genes=10,\n de_keys='names, scores, pvals_adj, logfoldchanges',\n cell_keys='', n_neighbors=5, fill_alpha=0.1, show_hull=True,\n legend_loc='top_right', plot_width=None, plot_height=None):\n\n if 'rank_genes_groups' not in adata.uns_keys():\n raise ValueError('Run differential expression first.')\n\n\n if isinstance(de_keys, str):\n de_keys = list(dict.fromkeys(map(str.strip, de_keys.split(','))))\n if de_keys != ['']:\n assert all(map(lambda k: k in adata.uns['rank_genes_groups'].keys(), de_keys)), 'Not all keys are in `adata.uns[\\'rank_genes_groups\\']`.'\n else:\n de_keys = []\n\n if isinstance(cell_keys, str):\n cell_keys = list(dict.fromkeys(map(str.strip, cell_keys.split(','))))\n if cell_keys != ['']:\n assert all(map(lambda k: k in adata.obs.keys(), cell_keys)), 'Not all keys are in `adata.obs.keys()`.'\n else:\n cell_keys = []\n\n if f'X_{basis}' not in adata.obsm.keys():\n raise ValueError(f'Key `X_{basis}` not found in adata.obsm.')\n\n if not isinstance(components, np.ndarray):\n components = np.asarray(components)\n\n key = adata.uns['rank_genes_groups']['params']['groupby']\n if key not in cell_keys:\n cell_keys.insert(0, key)\n\n df = pd.DataFrame(adata.obsm[f'X_{basis}'][:, components - (basis != 'diffmap')], columns=['x', 'y'])\n for k in cell_keys:\n df[k] = list(map(str, adata.obs[k]))\n\n knn = neighbors.KNeighborsClassifier(n_neighbors)\n knn.fit(df[['x', 'y']], adata.obs[key])\n df['prediction'] = knn.predict(df[['x', 'y']])\n\n conv_hulls = df[df[key] == df['prediction']].groupby(key).apply(lambda df: df.iloc[ConvexHull(np.vstack([df['x'], df['y']]).T).vertices])\n\n mapper = _create_mapper(adata, key)\n categories = adata.obs[key].cat.categories\n fig = figure(tools='pan, reset, wheel_zoom, lasso_select, save')\n _set_plot_wh(fig, plot_width, plot_height)\n legend_dict = defaultdict(list)\n\n for k in categories:\n d = df[df[key] == k]\n data_source = ColumnDataSource(d)\n legend_dict[k].append(fig.scatter('x', 'y', source=data_source, color={'field': key, 'transform': mapper}, size=5, muted_alpha=0))\n\n hover_cell = HoverTool(renderers=[r[0] for r in legend_dict.values()], tooltips=[(f'{key}', f'@{key}')] + [(f'{k}', f'@{k}') for k in cell_keys[1:]])\n\n c_hulls = conv_hulls.copy()\n de_possible = conv_hulls[key].isin(adata.uns['rank_genes_groups']['names'].dtype.names)\n ok_patches = []\n prev_cat = []\n for i, isin in enumerate((~de_possible, de_possible)):\n conv_hulls = c_hulls[isin]\n\n if len(conv_hulls) == 0:\n continue\n\n xs, ys, ks = zip(*conv_hulls.groupby(key).apply(lambda df: list(map(list, (df['x'], df['y'], df[key])))))\n tmp_data = defaultdict(list)\n tmp_data['xs'] = xs\n tmp_data['ys'] = ys\n tmp_data[key] = list(map(lambda k: k[0], ks))\n \n if i == 1:\n ix = list(map(lambda k: adata.uns['rank_genes_groups']['names'].dtype.names.index(k), tmp_data[key]))\n for k in de_keys:\n tmp = np.array(list(zip(*adata.uns['rank_genes_groups'][k])))[ix, :n_top_genes]\n for j in range(n_top_genes):\n tmp_data[f'{k}_{j}'] = tmp[:, j]\n\n tmp_data = pd.DataFrame(tmp_data)\n for k in categories:\n d = tmp_data[tmp_data[key] == k]\n source = ColumnDataSource(d)\n\n patches = fig.patches('xs', 'ys', source=source, fill_alpha=fill_alpha, muted_alpha=0, hover_alpha=0.5,\n color={'field': key, 'transform': mapper} if (show_hull and i == 1) else None,\n hover_color={'field': key, 'transform': mapper} if (show_hull and i == 1) else None)\n legend_dict[k].append(patches)\n if i == 1:\n ok_patches.append(patches)\n\n hover_group = HoverTool(renderers=ok_patches, tooltips=[(f'{key}', f'@{key}'),\n ('groupby', adata.uns['rank_genes_groups']['params']['groupby']),\n ('reference', adata.uns['rank_genes_groups']['params']['reference']),\n ('rank', ' | '.join(de_keys))] + [(f'#{i + 1}', ' | '.join((f'@{k}_{i}' for k in de_keys))) for i in range(n_top_genes)]\n )\n \n\n fig.toolbar.active_inspect = [hover_group]\n if len(cell_keys) > 1:\n fig.add_tools(hover_group, hover_cell)\n else:\n fig.add_tools(hover_group)\n\n if legend_loc is not None:\n legend = Legend(items=list(legend_dict.items()), location=legend_loc)\n fig.add_layout(legend)\n fig.legend.click_policy = 'hide' # hide does disable hovering, whereas 'mute' does not\n\n fig.xaxis.axis_label = f'{basis}_{components[0]}'\n fig.yaxis.axis_label = f'{basis}_{components[1]}'\n\n show(fig)", "def construct_occurrence_dico(data) :\n print('Constructing occurence dictionnaries...')\n\n p_kw_dico = dict()\n kw_p_dico = dict()\n full_stem_dico = {}\n for patent in data :\n patent_id = patent['id']\n #[keywords,stem_dico] = extract_keywords(patent[1]+\". \"+patent[2],patent_id)\n [keywords,stem_dico] = extract_keywords(patent['title']+\". \"+patent['abstract'],patent_id)\n #print(keywords)\n\n for k in keywords :\n # add to p_kw dico\n if k in kw_p_dico :\n kw_p_dico[k].append(patent_id)\n else :\n kw_p_dico[k]= [patent_id]\n #\n if patent_id in p_kw_dico :\n p_kw_dico[patent_id].append(k)\n else :\n p_kw_dico[patent_id] = [k]\n\n for k in stem_dico.keys():\n if k in full_stem_dico :\n full_stem_dico[k]=full_stem_dico[k].union(stem_dico[k])\n else :\n full_stem_dico[k] = stem_dico[k]\n\n return([p_kw_dico,kw_p_dico,full_stem_dico])", "def fig_craco_fiducial(outfile='fig_craco_fiducial.png',\n zmax=2.5,DMmax=2500,\n show_Macquart=False,\n log=True,\n label='$\\\\log_{10} \\; p(DM_{\\\\rm EG},z)$',\n Aconts=[0.01, 0.1, 0.5],\n cmap='jet', show=False, figsize=None,\n vmnx=(None,None),\n grid=None, survey=None):\n # Generate the grid\n if grid is None or survey is None:\n survey, grid = analy_H0_I.craco_mc_survey_grid()\n\n # Unpack\n full_zDMgrid, zvals, dmvals = grid.rates, grid.zvals, grid.dmvals\n FRBZ=survey.frbs['Z']\n FRBDM=survey.DMEGs\n \n ##### imshow of grid #######\n fsize = 14.\n plt.figure(figsize=figsize)\n ax1=plt.axes()\n plt.sca(ax1)\n \n plt.xlabel('z')\n plt.ylabel('${\\\\rm DM}_{\\\\rm EG}$')\n #plt.title(title+str(H0))\n \n # Cut down grid\n zvals, dmvals, zDMgrid = figures.proc_pgrid(\n full_zDMgrid, \n zvals, (0, zmax),\n dmvals, (0, DMmax))\n ddm=dmvals[1]-dmvals[0]\n dz=zvals[1]-zvals[0]\n nz, ndm = zDMgrid.shape\n\n # Contours\n alevels = figures.find_Alevels(full_zDMgrid, Aconts, log=True)\n \n # Ticks\n tvals, ticks = figures.ticks_pgrid(zvals)# , fmt='str4')\n plt.xticks(tvals, ticks)\n tvals, ticks = figures.ticks_pgrid(dmvals, fmt='int')# , fmt='str4')\n plt.yticks(tvals, ticks)\n\n # Image \n im=plt.imshow(zDMgrid.T,cmap=cmap,origin='lower', \n vmin=vmnx[0], vmax=vmnx[1],\n interpolation='None',\n aspect='auto')\n \n styles=['--','-.',':']\n ax=plt.gca()\n cs=ax.contour(zDMgrid.T,levels=alevels,origin='lower',colors=\"white\",linestyles=styles)\n\n ax=plt.gca()\n \n muDMhost=np.log(10**grid.state.host.lmean)\n sigmaDMhost=np.log(10**grid.state.host.lsigma)\n meanHost = np.exp(muDMhost + sigmaDMhost**2/2.)\n medianHost = np.exp(muDMhost) \n print(f\"Host: mean={meanHost}, median={medianHost}\")\n plt.ylim(0,ndm-1)\n plt.xlim(0,nz-1)\n zmax=zvals[-1]\n nz=zvals.size\n #DMbar, zeval = igm.average_DM(zmax, cumul=True, neval=nz+1)\n DM_cosmic = pcosmic.get_mean_DM(zvals, grid.state)\n\n \n #idea is that 1 point is 1, hence...\n zeval = zvals/dz\n DMEG_mean = (DM_cosmic+meanHost)/ddm\n DMEG_median = (DM_cosmic+medianHost)/ddm\n\n # Check median\n f_median = scipy.interpolate.interp1d(\n zvals, DM_cosmic+medianHost, \n fill_value='extrapolate')\n eval_DMEG = f_median(FRBZ)\n above = FRBDM > eval_DMEG\n print(f\"There are {np.sum(above)/len(FRBZ)} above the median\")\n\n if show_Macquart:\n plt.plot(zeval,DMEG_mean,color='gray',linewidth=2,\n label='Macquart relation (mean)')\n plt.plot(zeval,DMEG_median,color='gray',\n linewidth=2, ls='--',\n label='Macquart relation (median)')\n l=plt.legend(loc='lower right',fontsize=12)\n #l=plt.legend(bbox_to_anchor=(0.2, 0.8),fontsize=8)\n #for text in l.get_texts():\n #\ttext.set_color(\"white\")\n \n # limit to a reasonable range if logscale\n if log and vmnx[0] is None:\n themax=zDMgrid.max()\n themin=int(themax-4)\n themax=int(themax)\n plt.clim(themin,themax)\n \n ##### add FRB host galaxies at some DM/redshift #####\n if FRBZ is not None:\n iDMs=FRBDM/ddm\n iZ=FRBZ/dz\n # Restrict to plot range\n gd = (FRBDM < DMmax) & (FRBZ < zmax)\n plt.plot(iZ[gd],iDMs[gd],'ko',linestyle=\"\",markersize=2.)\n\n cbar=plt.colorbar(im,fraction=0.046, shrink=1.2,aspect=15,pad=0.05)\n cbar.set_label(label)\n\n fig_utils.set_fontsize(ax, fsize)\n \n plt.tight_layout()\n \n if show:\n plt.show()\n else:\n plt.savefig(outfile, dpi=300)\n print(f\"Wrote: {outfile}\")\n plt.close()", "def create_report(result, *, decimal_places=3):\n\n # TODO add effect sizes to multiple comparisons.\n def single_population_string(population, with_stats=False, pop_pval=None, with_rank=True):\n if pop_pval is not None:\n return \"%s (p=%.*f)\" % (population, decimal_places, pop_pval)\n if with_stats:\n halfwidth = (result.rankdf.at[population, 'ci_upper'] - result.rankdf.at[population, 'ci_lower']) / 2\n mystats = []\n if (result.force_mode is not None and result.force_mode=='parametric') or \\\n (result.force_mode is None and result.all_normal):\n mystats.append(\"M=%.*f+-%.*f\" % (decimal_places, result.rankdf.at[population, 'mean'],\n decimal_places, halfwidth))\n mystats.append(\"SD=%.*f\" % (decimal_places, result.rankdf.at[population, 'std']))\n else:\n mystats.append(\"MD=%.*f+-%.*f\" % (decimal_places, result.rankdf.at[population, 'median'],\n decimal_places, halfwidth))\n mystats.append(\"MAD=%.*f\" % (decimal_places, result.rankdf.at[population, 'mad']))\n if with_rank:\n mystats.append(\"MR=%.*f\" % (decimal_places, result.rankdf.at[population, 'meanrank']))\n return \"%s (%s)\" % (population, \", \".join(mystats))\n else:\n return str(population)\n\n def create_population_string(populations, with_stats=False, pop_pvals=None, with_rank=False):\n if isinstance(populations, str):\n populations = [populations]\n population_strings = []\n for index, population in enumerate(populations):\n if pop_pvals is not None:\n cur_pval = pop_pvals[index]\n else:\n cur_pval = None\n population_strings.append(single_population_string(population, with_stats, cur_pval, with_rank))\n if len(populations) == 1:\n popstr = population_strings[0]\n elif len(populations) == 2:\n popstr = \" and \".join(population_strings)\n else:\n popstr = \", \".join(population_strings[:-1]) + \", and \" + population_strings[-1]\n return popstr\n\n if not isinstance(result, RankResult):\n raise TypeError(\"result must be of type RankResult and should be the outcome of calling the autorank function.\")\n\n print(\"The statistical analysis was conducted for %i populations with %i paired samples.\" % (len(result.rankdf),\n result.num_samples))\n print(\"The family-wise significance level of the tests is alpha=%.*f.\" % (decimal_places, result.alpha))\n\n if result.all_normal:\n not_normal = []\n min_pvalue = min(result.pvals_shapiro)\n print(\"We failed to reject the null hypothesis that the population is normal for all populations \"\n \"(minimal observed p-value=%.*f). Therefore, we assume that all populations are \"\n \"normal.\" % (decimal_places, min_pvalue))\n else:\n not_normal = []\n pvals = []\n normal = []\n for i, pval in enumerate(result.pvals_shapiro):\n if pval < result.alpha_normality:\n not_normal.append(result.rankdf.index[i])\n pvals.append(pval)\n else:\n normal.append(result.rankdf.index[i])\n if len(not_normal) == 1:\n population_term = 'population'\n else:\n population_term = 'populations'\n print(\"We rejected the null hypothesis that the population is normal for the %s %s. \"\n \"Therefore, we assume that not all populations are \"\n \"normal.\" % (population_term, create_population_string(not_normal, pop_pvals=pvals)))\n\n if result.omnibus == 'bayes':\n if result.all_normal:\n central_tendency = 'mean value'\n central_tendency_long = 'mean value (M)'\n variability = 'standard deviation (SD)'\n effect_size = 'd'\n else:\n central_tendency = 'median'\n central_tendency_long = 'median (MD)'\n variability = 'median absolute deviation (MAD)'\n effect_size = 'gamma'\n print(\n \"We used a bayesian signed rank test to determine differences between the mean values of the \"\n \"populations and report the %s and the %s for each population. We distinguish \"\n \"between populations being pair-wise smaller, equal, or larger and make a decision for one \"\n \"of these cases if we estimate that the posterior probability is at least \"\n \"alpha=%.*f.\" % (central_tendency_long, variability, decimal_places, result.alpha))\n if result.rope_mode == 'effsize':\n print(\n 'We used the effect size to define the region of practical equivalence (ROPE) around the %s '\n 'dynamically as %.*f*%s.' % (central_tendency, decimal_places, result.rope, effect_size))\n else:\n print(\n 'We used a fixed value of %.*f to define the region of practical equivalence (ROPE) around the '\n '%s.' % (decimal_places, result.rope, central_tendency))\n decision_set = set(result.rankdf['decision'])\n decision_set.remove('NA')\n if {'inconclusive'} == decision_set:\n print(\"We failed to find any conclusive evidence for differences between the populations \"\n \"%s.\" % create_population_string(result.rankdf.index, with_stats=True))\n elif {'equal'} == decision_set:\n print(\n \"All populations are equal, i.e., the are no significant and practically relevant differences \"\n \"between the populations %s.\" % create_population_string(result.rankdf.index,\n with_stats=True))\n elif {'equal', 'inconclusive'} == decision_set:\n print(\n \"The populations %s are all either equal or the results of the analysis are inconclusive.\")\n print(result.decision_matrix)\n else:\n print(\"We found significant and practically relevant differences between the populations \"\n \"%s.\" % create_population_string(result.rankdf.index, with_stats=True))\n for i in range(len(result.rankdf)):\n if len(result.rankdf.index[result.decision_matrix.iloc[i, :] == 'smaller']) > 0:\n print('The %s of the population %s is larger than of the populations '\n '%s.' % (central_tendency, result.rankdf.index[i],\n create_population_string(\n result.rankdf.index[\n result.decision_matrix.iloc[i, :] == 'smaller'])))\n equal_pairs = []\n for i in range(len(result.rankdf)):\n for j in range(i + 1, len(result.rankdf)):\n if result.decision_matrix.iloc[i, j] == 'equal':\n equal_pairs.append(result.rankdf.index[i] + ' and ' + result.rankdf.index[j])\n if len(equal_pairs) > 0:\n equal_pairs_str = create_population_string(equal_pairs).replace(',', ';')\n print('The following pairs of populations are equal: %s.' % equal_pairs_str)\n if 'inconclusive' in set(result.rankdf['decision']):\n print('All other differences are inconclusive.')\n elif len(result.rankdf) == 2:\n print(\"No check for homogeneity was required because we only have two populations.\")\n if result.effect_size == 'cohen_d':\n effect_size = 'd'\n elif result.effect_size == 'cliff_delta':\n effect_size = 'delta'\n elif result.effect_size == 'akinshin_gamma':\n effect_size = 'gamma'\n else:\n raise ValueError('unknown effect size method, this should not be possible: %s' % result.effect_size)\n if result.omnibus == 'ttest':\n larger = np.argmax(result.rankdf['mean'].values)\n smaller = int(bool(larger - 1))\n if result.all_normal:\n print(\"Because we have only two populations and both populations are normal, we use the t-test to \"\n \"determine differences between the mean values of the populations and report the mean value (M)\"\n \"and the standard deviation (SD) for each population. \")\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'both of them are'\n print(\"Because we have only two populations and %s not normal, we use should Wilcoxon's signed rank \"\n \"test to determine the differences in the central tendency and report the median (MD) and the \"\n \"median absolute deviation (MAD) for each population. However, the user decided to force the \"\n \"use of the t-test which assumes normality of all populations and we report the mean value (M) \"\n \"and the standard deviation (SD) for each population.\" % notnormal_str)\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of the paired t-test that the mean values of \"\n \"the populations %s are are equal. Therefore, we \"\n \"assume that there is no statistically significant difference between the mean values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of the paired t-test that the mean values of the \"\n \"populations %s are \"\n \"equal. Therefore, we assume that the mean value of %s is \"\n \"significantly larger than the mean value of %s with a %s effect size (%s=%.*f).\"\n % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True),\n result.rankdf.index[larger], result.rankdf.index[smaller],\n result.rankdf.magnitude[larger], effect_size, decimal_places, result.rankdf.effect_size[larger]))\n elif result.omnibus == 'wilcoxon':\n larger = np.argmax(result.rankdf['median'].values)\n smaller = int(bool(larger - 1))\n if result.all_normal:\n print(\"Because we have only two populations and both populations are normal, we should use the t-test \"\n \"to determine differences between the mean values of the populations and report the mean value \"\n \"(M) and the standard deviation (SD) for each population. However, the user decided to force the \"\n \"use of the less powerful Wilcoxon signed rank test and we report the median (MD) and the median \"\n \"absolute devivation (MAD) for each population.\")\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'both of them are'\n print(\"Because we have only two populations and %s not normal, we use Wilcoxon's signed rank test to \"\n \"determine the differences in the central tendency and report the median (MD) and the median \"\n \"absolute deviation (MAD) for each population.\" % notnormal_str)\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of Wilcoxon's signed rank test that \"\n \"population %s is not greater than population %s . Therefore, we \"\n \"assume that there is no statistically significant difference between the medians of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index[larger], with_stats=True),\n create_population_string(result.rankdf.index[smaller], with_stats=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of Wilcoxon's signed rank test that population \"\n \"%s is not greater than population %s. Therefore, we assume \"\n \"that the median of %s is \"\n \"significantly larger than the median value of %s with a %s effect size (%s=%.*f).\"\n % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index[larger], with_stats=True),\n create_population_string(result.rankdf.index[smaller], with_stats=True),\n result.rankdf.index[larger], result.rankdf.index[smaller],\n result.rankdf.magnitude[larger], effect_size, decimal_places, result.rankdf.effect_size[larger]))\n else:\n raise ValueError('Unknown omnibus test for difference in the central tendency: %s' % result.omnibus)\n else:\n if result.all_normal:\n if result.homoscedastic:\n print(\"We applied Bartlett's test for homogeneity and failed to reject the null hypothesis \"\n \"(p=%.*f) that the data is homoscedastic. Thus, we assume that our data is \"\n \"homoscedastic.\" % (decimal_places, result.pval_homogeneity))\n else:\n print(\"We applied Bartlett's test for homogeneity and reject the null hypothesis (p=%.*f) that the\"\n \"data is homoscedastic. Thus, we assume that our data is \"\n \"heteroscedastic.\" % (decimal_places, result.pval_homogeneity))\n\n if result.omnibus == 'anova':\n if result.all_normal and result.homoscedastic:\n print(\"Because we have more than two populations and all populations are normal and homoscedastic, we \"\n \"use repeated measures ANOVA as omnibus \"\n \"test to determine if there are any significant differences between the mean values of the \"\n \"populations. If the results of the ANOVA test are significant, we use the post-hoc Tukey HSD \"\n \"test to infer which differences are significant. We report the mean value (M) and the standard \"\n \"deviation (SD) for each population. Populations are significantly different if their confidence \"\n \"intervals are not overlapping.\")\n else:\n if result.all_normal:\n print(\n \"Because we have more than two populations and the populations are normal but heteroscedastic, \"\n \"we should use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the mean values \"\n \"of the populations. However, the user decided to force the use of \"\n \"repeated measures ANOVA as omnibus test which assume homoscedascity to determine if there are \"\n \"any significant difference between the mean values of the populations. If the results of the \"\n \"ANOVA test are significant, we use the post-hoc Tukey HSD test to infer which differences are \"\n \"significant. We report the mean value (M) and the standard deviation (SD) for each \"\n \"population. Populations are significantly different if their confidence intervals are not \"\n \"overlapping.\")\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'some of them are'\n print(\"Because we have more than two populations and the populations and %s not normal, \"\n \"we should use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the median \"\n \"values of the populations and report the median (MD) and the median absolute deviation \"\n \"(MAD). However, the user decided to force the use of repeated measures ANOVA as omnibus \"\n \"test which assume homoscedascity to determine if there are any significant difference \"\n \"between the mean values of the populations. If the results of the ANOVA test are \"\n \"significant, we use the post-hoc Tukey HSD test to infer which differences are \"\n \"significant. We report the mean value (M) and the standard deviation (SD) for each \"\n \"population. Populations are significantly different if their confidence intervals are not \"\n \"overlapping.\" % (notnormal_str))\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of the repeated measures ANOVA that there is \"\n \"a difference between the mean values of the populations %s. Therefore, we \"\n \"assume that there is no statistically significant difference between the mean values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of the repeated measures ANOVA that there is \"\n \"a difference between the mean values of the populations %s. Therefore, we \"\n \"assume that there is a statistically significant difference between the mean values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True)))\n meanranks, names, groups = get_sorted_rank_groups(result, False)\n if len(groups) == 0:\n print(\"Based on post-hoc Tukey HSD test, we assume that all differences between the populations \"\n \"are significant.\")\n else:\n groupstrs = []\n for group_range in groups:\n group = range(group_range[0], group_range[1] + 1)\n if len(group) == 1:\n cur_groupstr = names[group[0]]\n elif len(group) == 2:\n cur_groupstr = \" and \".join([names[pop] for pop in group])\n else:\n cur_groupstr = \", \".join([names[pop] for pop in group[:-1]]) + \", and \" + names[group[-1]]\n groupstrs.append(cur_groupstr)\n print(\"Based post-hoc Tukey HSD test, we assume that there are no significant differences within \"\n \"the following groups: %s. All other differences are significant.\" % (\"; \".join(groupstrs)))\n print()\n elif result.omnibus == 'friedman':\n if result.all_normal and result.homoscedastic:\n print(\"Because we have more than two populations and all populations are normal and homoscedastic, we \"\n \"should use repeated measures ANOVA as omnibus \"\n \"test to determine if there are any significant differences between the mean values of the \"\n \"populations. However, the user decided to force the use of the less powerful Friedman test as \"\n \"omnibus test to determine if there are any significant differences between the mean values \"\n \"of the populations. We report the mean value (M), the standard deviation (SD) and the mean rank \"\n \"(MR) among all populations over the samples. Differences between populations are significant, \"\n \"if the difference of the mean rank is greater than the critical distance CD=%.*f of the Nemenyi \"\n \"test.\" % (decimal_places, result.cd))\n elif result.all_normal:\n print(\"Because we have more than two populations and the populations are normal but heteroscedastic, \"\n \"we use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the mean values \"\n \"of the populations. We use the post-hoc Nemenyi test to infer which differences are \"\n \"significant. We report the mean value (M), the standard deviation (SD) and the mean rank (MR) \"\n \"among all populations over the samples. Differences between populations are significant, if the \"\n \"difference of the mean rank is greater than the critical distance CD=%.*f of the Nemenyi \"\n \"test.\" % (decimal_places, result.cd))\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'some of them are'\n print(\"Because we have more than two populations and the populations and %s not normal, \"\n \"we use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the median values \"\n \"of the populations. We use the post-hoc Nemenyi test to infer which differences are \"\n \"significant. We report the median (MD), the median absolute deviation (MAD) and the mean rank \"\n \"(MR) among all populations over the samples. Differences between populations are significant, \"\n \"if the difference of the mean rank is greater than the critical distance CD=%.*f of the Nemenyi \"\n \"test.\" % (notnormal_str, decimal_places, result.cd))\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of the Friedman test that there is no \"\n \"difference in the central tendency of the populations %s. Therefore, we \"\n \"assume that there is no statistically significant difference between the median values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True, with_rank=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of the Friedman test that there is no \"\n \"difference in the central tendency of the populations %s. Therefore, we \"\n \"assume that there is a statistically significant difference between the median values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True, with_rank=True)))\n meanranks, names, groups = get_sorted_rank_groups(result, False)\n if len(groups) == 0:\n print(\"Based on the post-hoc Nemenyi test, we assume that all differences between the populations \"\n \"are significant.\")\n else:\n groupstrs = []\n for group_range in groups:\n group = range(group_range[0], group_range[1] + 1)\n if len(group) == 1:\n cur_groupstr = names[group[0]]\n elif len(group) == 2:\n cur_groupstr = \" and \".join([names[pop] for pop in group])\n else:\n cur_groupstr = \", \".join([names[pop] for pop in group[:-1]]) + \", and \" + names[group[-1]]\n groupstrs.append(cur_groupstr)\n print(\"Based on the post-hoc Nemenyi test, we assume that there are no significant differences \"\n \"within the following groups: %s. All other differences are \"\n \"significant.\" % (\"; \".join(groupstrs)))\n else:\n raise ValueError('Unknown omnibus test for difference in the central tendency: %s' % result.omnibus)", "def _build_ppdf(self,pdf_dset,renormalize):\n\n if (not hasattr(self,'u')) or (not hasattr(self,'w')) or (not hasattr(self,'sfr')):\n raise AttributeError(\"axes are not set. Call set_axes() first\")\n\n dbinsq = self.dlogcs*self.dlogvout\n\n # Momentum flux PDF\n etaM = pdf_dset['etaM'] # in Msun/kpc^2/yr\n etap = self._etap(self.sfr) # in (Msun*km/s)/kpc^2/yr\n pdf_dset['etap'] = etap\n\n pfact = (self.vout**2+self.cs**2)/(self.vp*self.vout)\n ppdfc = etaM/etap*pdf_dset['Mpdf-cool']*pfact\n ppdfh = etaM/etap*pdf_dset['Mpdf-hot']*pfact\n ppdf = ppdfc + ppdfh\n\n if renormalize:\n renorm = ppdf.sum(dim=['logcs','logvout'])*dbinsq\n ppdfc = ppdfc/renorm\n ppdfh = ppdfh/renorm\n ppdf = ppdf/renorm\n pdf_dset['p_renorm'] = renorm\n\n pdf_dset['ppdf-cool'] = ppdfc\n pdf_dset['ppdf-hot'] = ppdfh\n pdf_dset['etap-cool'] = pdf_dset['etap']*ppdfc.sum(dim=['logcs','logvout'])*dbinsq\n pdf_dset['etap-hot'] = pdf_dset['etap']*ppdfh.sum(dim=['logcs','logvout'])*dbinsq\n pdf_dset['ppdf'] = ppdf", "def plot_sdmult(filedic, imgs_ref, figname, nsd_ref, npx=24, zcut=0.007, beta=-1, thetacut=0.0):\n xvals=np.linspace(LundImage.xval[0], LundImage.xval[1], npx+1)\n yvals=np.linspace(LundImage.yval[0], LundImage.yval[1], npx+1)\n xbins=np.array([0.5*(xvals[i]+xvals[i+1]) for i in range(len(xvals)-1)])\n ybins=np.array([0.5*(yvals[i]+yvals[i+1]) for i in range(len(yvals)-1)])\n\n yy=np.linspace(LundImage.yval[0], LundImage.yval[1], 10000)\n xx=np.linspace(LundImage.xval[0], LundImage.xval[1], 10000)\n \n fct= {}\n nsd= {}\n for lab in filedic.keys():\n ims = np.load(filedic[lab])\n im = np.average(ims, axis=0)\n f = interp2d(xbins, ybins, im, kind='linear') #linear, cubic, quintic\n fct[lab] = f\n nsd[lab]=[]\n for i in range(ims.shape[0]):\n nsd[lab].append(n_sd(ims[i], xbins, ybins, beta, zcut, thetacut))\n \n nsd_ref_im = []\n for i in range(len(imgs_ref)):\n nsd_ref_im.append(n_sd(imgs_ref[i], xbins, ybins, beta, zcut, thetacut))\n \n fig, ax = plt.subplots(figsize=(5,3.5))\n bins = np.arange(0, 25, 1)\n for lab in nsd.keys():\n plt.hist(nsd[lab], bins=bins, histtype='step', density=True, label=lab)\n plt.hist(nsd_ref_im, bins=bins, histtype='step', density=True, label='Pythia 8')\n plt.hist(nsd_ref, bins=bins, histtype='step', color='C3', ls=':', density=True)\n plt.text(0.4,0.275,'$z_\\mathrm{cut}=%.3f,\\, \\\\beta=%i,\\, \\\\theta_\\mathrm{cut}=%.1f$' % (zcut,beta,thetacut))\n ax.set_xlim((0,12))\n ax.set_ylim((0.0,0.30))\n ax.set_xlabel('$n_\\mathrm{SD}$')\n plt.legend()\n ax.grid(linestyle=':')\n plt.savefig(figname, bbox_inches='tight')\n plt.close()", "def generate_report(df_cna):\n df_cnaC = pd.DataFrame(index=df_cna.index)\n df_cnaC['sum_1'] = df_cna[df_cna.iloc[:,:] == 1].count(axis=1)\n df_cnaC['deep_amp'] = df_cna[df_cna.iloc[:,:] == 2].count(axis=1)\n df_cnaC['sum_-1'] = df_cna[df_cna.iloc[:,:] == -1].count(axis=1)\n df_cnaC['deep_del'] = df_cna[df_cna.iloc[:,:] == -2].count(axis=1)\n df_cnaC['amp_total'] = df_cnaC['sum_1'] + df_cnaC['deep_amp']\n df_cnaC['del_total'] = df_cnaC['sum_-1'] + df_cnaC['deep_del']\n return df_cnaC", "def science_plots(settings, onlycnf=False):\n\n if len(settings.prediction_files) == 0:\n print(\n lu.str_to_yellowstr(\"Warning: no prediction files provided. Not plotting\")\n )\n return\n\n # Load data\n df_SNinfo = du.load_HDF5_SNinfo(settings)\n\n # check if files are there\n tmp_not_found = [m for m in settings.prediction_files if not os.path.exists(m)]\n if len(tmp_not_found) > 0:\n print(lu.str_to_redstr(f\"Files not found {tmp_not_found}\"))\n tmp_prediction_files = [\n m for m in settings.prediction_files if os.path.exists(m)\n ]\n settings.prediction_files = tmp_prediction_files\n\n for f in settings.prediction_files:\n if Path(f).suffix == \".pickle\":\n df = pd.read_pickle(f)\n elif Path(f).suffix == \".csv\":\n df = pd.read_csv(f)\n df[\"SNID\"] = df[\"SNID\"].astype(str)\n model_name = Path(f).stem\n\n cols_to_merge = [\n \"SNID\",\n \"SIM_REDSHIFT_CMB\",\n settings.sntype_var,\n \"mB\",\n \"x1\",\n \"c\",\n ]\n cols_to_merge += [c for c in df_SNinfo.columns if \"unique_nights\" in c]\n cols_to_merge += [c for c in df_SNinfo.columns if \"_num_\" in c]\n\n df = df.merge(df_SNinfo.reset_index()[cols_to_merge], how=\"left\", on=\"SNID\")\n\n if onlycnf:\n cnf_matrix(df, model_name, settings)\n else:\n # Science plots\n purity_vs_z(df, model_name, settings)\n # cadence_acc_matrix(df, model_name, settings)\n # Get extra info from fits (for distance modulus)\n fits = du.load_fitfile(settings)\n if len(fits) != 0:\n fits = fits[[\"SNID\", \"cERR\", \"mBERR\", \"x1ERR\"]]\n hubble_residuals(df, model_name, fits, settings)\n cnf_matrix(df, model_name, settings)", "def makecldf(args):\n with_dataset(args, Dataset._install)", "def correlation_analysis():\n\n raw_covid_data = read_covid_data()\n\n pop_data = read_population()\n\n life_expectancy_data = read_life_expectancy()\n\n gdp_data = read_gdp()\n\n edu_data = read_education()\n\n int_data = read_internet()\n\n covid_joined = pd.merge(raw_covid_data, pop_data, on=\"Country\")\n\n covid_joined.insert(4, \"Confirmed rate\", covid_joined[\"Confirmed\"] / covid_joined[\"Population\"])\n covid_joined.insert(5, \"Death rate\", covid_joined[\"Death\"] / covid_joined[\"Population\"])\n\n covid_life_joined = pd.merge(covid_joined, life_expectancy_data, on=\"Country\")\n covid_life_gdp_joined = pd.merge(covid_life_joined, gdp_data, on=\"Country\")\n covid_life_gdp_edu_joined = pd.merge(covid_life_gdp_joined, edu_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = pd.merge(covid_life_gdp_edu_joined, int_data, on=\"Country\")\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Education != '..']\n covid_life_gdp_edu_int_joined = covid_life_gdp_edu_int_joined[covid_life_gdp_edu_int_joined.Internet != '..']\n covid_life_gdp_edu_int_joined['Education'] = covid_life_gdp_edu_int_joined['Education'].astype(float)\n covid_life_gdp_edu_int_joined['Internet'] = covid_life_gdp_edu_int_joined['Internet'].astype(float)\n\n sns.set()\n\n draw_histogram(covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"COVID-19 Confirmed rate\")\n draw_histogram(covid_life_gdp_edu_int_joined[\"Death rate\"], \"COVID-19 Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Life expectancy\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Life expectancy\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Life expectancy\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"GDP\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"GDP\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"GDP\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Education\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Education\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Education\", \"Death rate\")\n\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Confirmed rate\"], \"Internet\", \"Confirmed rate\")\n display_analysis_result(covid_life_gdp_edu_int_joined[\"Internet\"], covid_life_gdp_edu_int_joined[\"Death rate\"], \"Internet\", \"Death rate\")", "def generate_2d_cs_plot(data, atom1=\"CA\", atom2=\"CB\", resid_li=\"\", from_frame='all', to_frame='all', soluplots=False):\n\n #Generate an empty dataframe and pop out the mean and the deviation from the CS/frame pickle\n result = pd.DataFrame()\n data.set_index(['resSeq','name'], inplace=True)\n data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)\n data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)\n\n # If frames were selected, drop also all columns were\n if from_frame != 'all':\n frames = ['resname', 'resname_s']+[ str(f) for f in range(int(from_frame), int(to_frame)+1)]\n data = data.filter(frames, axis=1)\n\n # Take all residues if none were submitted\n if not resid_li:\n resid_li = {index[0] for index in data.index.values}\n\n # Sort residue ids numerically\n resid_li = [ str(i) for i in sorted([int(x) for x in resid_li]) ]\n\n #loop over the residues selcted by the user\n for item in resid_li:\n try: \n df1 = data.loc[int(item),atom1] #select atom 1 the row from the dataframe which matches the inputs from the user\n df2 = data.loc[int(item),atom2] #select atom 2 the row from the dataframe which matches the inputs from the user\n resname = data.loc[[int(item),'CA'], 'resname'].unique()[0]\n # Option to make \"Solution NMR predictions\": make a distribution out of average and variance of our cs values, and plot it\n if soluplots:\n np1=np.array(df1[2:])\n np2=np.array(df2[2:])\n dist1 = np.random.normal(np1.mean(), np1.std()/10, len(np1))\n dist2 = np.random.normal(np2.mean(), np2.std()/10, len(np2))\n df_e1 = pd.DataFrame(data=dist1, columns=[atom1]) #Build the plotting dataframe\n df_e2 = pd.DataFrame(data=dist2, columns=[atom2])\n else:\n df_e1=df1.to_frame(name=atom1)\n df_e2=df2.to_frame(name=atom2)\n except Exception as e:\n continue\n temp_df = pd.concat([df_e1,df_e2], axis=1, join=\"inner\") #concatenate all the residues dataframe into a bigger one\n temp_df[\"IDs\"]=str(item)+' '+resname #give them different ids to have differnete colors in the plot\n result = result.append(temp_df) #build the final DF\n\n # Put atoms in avail_res_atoms dictionary (which I dont remember exactly what does but seems important)\n avail_res_atoms = {\n \"%s.%s\"%(item,atom1) : {\"display\":False,\"color\":\"#FF0000\"},\n \"%s.%s\"%(item,atom2) : {\"display\":False,\"color\":\"#FF0000\"},\n }\n\n # If there are no atoms matching this selection\n if result.empty: \n return('', ['error'])\n\n #plot\n fig = px.density_contour(result, x=atom1, y=atom2, color=\"IDs\", labels={\n atom1 : \"Chemical shift for \"+str(atom1)+\" (ppm)\",\n atom2 : \"Chemical shift for \"+str(atom2)+\" (ppm)\",\n \"IDs\": \"Residue ID\"},\n color_discrete_sequence=px.colors.qualitative.Dark24)\n # Reverse axis\n fig['layout']['yaxis']['autorange'] = \"reversed\"\n fig['layout']['xaxis']['autorange'] = \"reversed\"\n\n\n fig.update_layout(legend=dict(\n itemsizing='constant',\n itemclick='toggleothers',\n itemdoubleclick='toggle',\n ))\n\n #Skip hover info when scrolling through the plot\n fig.update_traces(hoverinfo='skip', hovertemplate=None)\n\n # Return plot\n p = pt.offline.plot(fig, include_plotlyjs=False, output_type='div')\n return(p,avail_res_atoms)" ]
[ "0.5600742", "0.5545156", "0.5511175", "0.5406428", "0.5405446", "0.53374326", "0.52538216", "0.52383655", "0.5226638", "0.519781", "0.51927817", "0.51903063", "0.516903", "0.5139694", "0.5136511", "0.5129903", "0.51277244", "0.51252174", "0.50994223", "0.5087249", "0.5086157", "0.5059", "0.5055046", "0.50485337", "0.50444436", "0.5043972", "0.50431", "0.50398445", "0.5035292", "0.50249326", "0.5015368", "0.501124", "0.50081646", "0.50037515", "0.4988805", "0.49810672", "0.49778324", "0.4977451", "0.49707773", "0.49600223", "0.49487236", "0.4941671", "0.4936454", "0.49332678", "0.49303955", "0.4924732", "0.49245638", "0.49107927", "0.4905669", "0.48980963", "0.48962224", "0.4886532", "0.48812535", "0.48784128", "0.48770428", "0.48767522", "0.4860177", "0.4857843", "0.48485", "0.48391223", "0.48378184", "0.48323196", "0.48312417", "0.48312095", "0.48275477", "0.48240206", "0.4819817", "0.48111534", "0.48026645", "0.480178", "0.47946888", "0.4793216", "0.47923642", "0.47918695", "0.4784344", "0.4780952", "0.4778766", "0.4774035", "0.47696966", "0.47678775", "0.47677362", "0.47675267", "0.476736", "0.4764477", "0.47642273", "0.47589272", "0.47564128", "0.4749366", "0.47485167", "0.47466433", "0.4744355", "0.47432962", "0.47399676", "0.4738555", "0.47333232", "0.47331002", "0.47262192", "0.47188792", "0.47174513", "0.4716974", "0.47137445" ]
0.0
-1
dictionary of boolean multimapping matrices
def build_mm_df(sralist): def convert_to_codon(nts_array): """ pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution. This function converts nucleotide arrays to codon length (nts to codon resolution): """ nts_array = np.array(nts_array) codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3. return codon_array def compute_mm(mmdata): """ get per gene average multi-mapping score """ mm_df = pd.DataFrame(columns=['ORF', 'MM']) counter = 0 for gene in mmdata.keys(): current_matrix = mmdata[gene] current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] ) mm_df.loc[counter] = [gene, current_avrg] counter += 1 return mm_df mm_mat = {} mm_pct = {} N = len(sralist) for ix, dataset in enumerate(sralist): samfile = pysam.AlignmentFile(TMP_DIR+'/ambiguous_reads/'+dataset+'_STAR_transcriptome_multi_mapped_sorted.bam', 'rb') genes_list = list(samfile.references) print(ix, dataset) for geneID in genes_list: # count the coverage of genomic positions by reads in region. # Returns: four array.arrays of the same length in order A C G T # The coverage is computed per-base [ACGT] cov = samfile.count_coverage(geneID, read_callback='nofilter') # Summ all 4 arrays cov_sum = np.sum(cov, axis=0) #print(geneID, cov_sum) codon_cov = convert_to_codon(cov_sum) codon_bool = np.asarray([1 if i > 0 else 0 for i in codon_cov]) M = len(codon_bool) if ix == 0: mm_mat[geneID] = np.zeros((N,M)) * np.nan current_matrix = mm_mat[geneID] current_matrix[ix,:] = np.copy(codon_bool) mm_mat[geneID] = current_matrix mm_avrg = compute_mm(mm_mat) #mm_avrg.to_json('yeast_mm.json') #mm_avrg.to_csv('yeast_mm.txt', header=True, index=False, sep='\t') mm_profile = {} theta_mm = 5 for orf in mm_mat.keys(): current_mat = mm_mat[orf] current_bool = np.sum(current_mat, 0) <= theta_mm mm_profile[orf] = current_bool with open('../data/processed/mm_consensus.pkl', 'wb') as f_mm: pickle.dump(mm_profile, f_mm) return mm_mat, mm_avrg, mm_profile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dict_of_bool2(self):\n pass", "def eye(m):\n data = dict()\n for i, j in itertools.product(range(m), range(m)):\n data[i, j] = mpfr(i == j)\n return MPMatrix((m, m), data)", "def getMatrixMap(self):\n return self.M_array", "def buildDicts(resComp, comparisons, matrices, atom1, res1, atom2, res2, r, FLAG=False):\r\n if resComp not in matrices:\r\n for pairs in comparisons:\r\n if resComp[0] == pairs[1] and resComp[1] == pairs[0]:\r\n return\r\n matrices[resComp] = [r]\r\n comparisons[resComp] = [(atom1, res1, atom2, res2, r)]\r\n FLAG = True\r\n\r\n elif type(comparisons[resComp][-1]) == list:\r\n if comparisons[resComp][-1][-1][1] != res1 or comparisons[resComp][-1][-1][3] != res2:\r\n # Make new matrix\r\n raise Warning\r\n elif comparisons[resComp][-1][-1][0] == atom1:\r\n comparisons[resComp][-1].append((atom1, res1, atom2, res2, r))\r\n matrices[resComp][-1].append(r)\r\n else:\r\n comparisons[resComp].append([(atom1, res1, atom2, res2, r)])\r\n matrices[resComp].append([r])\r\n\r\n elif type(comparisons[resComp][-1]) == tuple:\r\n if comparisons[resComp][-1][0] == atom1:\r\n comparisons[resComp].append((atom1, res1, atom2, res2, r))\r\n matrices[resComp].append(r)\r\n else:\r\n comparisons[resComp] = [comparisons[resComp], [(atom1, res1, atom2, res2, r)]]\r\n matrices[resComp] = [matrices[resComp], [r]]\r\n\r\n else:\r\n print(\"Error: Dictionary should contain Match objects / Lists of Match objects / List of lists of Match objects\")\r\n\r\n return comparisons, matrices, resComp, FLAG", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def matrixMult( self, matrix0, matrix1 ):\r\n result = {}\r\n keys = sorted( set( matrix0.keys() ) )\r\n count = range( len( matrix0.keys() ) )\r\n \r\n for key in keys:\r\n result[ key ] = []\r\n for i in count:\r\n sum = 0\r\n for j in count:\r\n sum += matrix0[ key ][j] * matrix1[ keys[j] ][i]\r\n result[ key ].insert( i, sum )\r\n \r\n return result", "def _tag_to_matrix(tags):\n\n example1 = tf.cast(tags, tf.bool)\n example2 = tf.cast(tf.expand_dims(tags, axis=-1), tf.bool)\n mat = tf.cast(tf.logical_and(example1, example2), tf.int32)\n return mat", "def create_matrix_mapping(train_mh, unk_vec_id):\n mh_index_map = {}\n matrix_idx = 0\n for vector_idx in train_mh:\n if vector_idx == unk_vec_id:\n unk_matrix_id = matrix_idx\n mh_index_map[vector_idx] = matrix_idx\n matrix_idx += 1\n return mh_index_map, unk_matrix_id", "def sparse_kwargs(weight_matrix):\n weight_matrix_t = np.transpose(weight_matrix)\n nonzero_arrays = np.nonzero(weight_matrix_t)\n indices = np.transpose(nonzero_arrays)\n values = weight_matrix_t[nonzero_arrays]\n return {\n \"n_nonzero\": len(values),\n \"indices_initializer\": initializers.Constant(indices),\n \"values_initializer\": initializers.Constant(values),\n }", "def asMatrix(self):\n output = np.zeros((self.size[0],self.size[1]))\n for pos in self.matrixDict:\n output[pos[0]][pos[1]] = self.matrixDict[pos]\n return output", "def __init__(self, transition_matrix, states):\n self.transition_matrix = np.atleast_2d(transition_matrix)\n self.states = states\n self.index_dict = {self.states[index]: index for index in\n range(len(self.states))}\n self.state_dict = {index: self.states[index] for index in\n range(len(self.states))}", "def make_training_matrices(source_dictionary, target_dictionary, bilingual_dictionary):\n source_matrix = []\n target_matrix = []\n\n for (source, target) in bilingual_dictionary:\n if source in source_dictionary and target in target_dictionary:\n source_matrix.append(source_dictionary[source])\n target_matrix.append(target_dictionary[target])\n\n # return training matrices\n return np.array(source_matrix), np.array(target_matrix)", "def in_storage(query,k,m):\n bools = []\n for i in range(1,k+1):\n bools.append(bfilter[mmh3.hash(query,i)%m])\n return np.prod(bools)", "def aPosteriori(self) -> dict:\n\n simbIn = self.simbIn\n simbOut = self.simbOut\n probIn = self.probIn\n probOut = self.probOut\n mat = self.mat\n\n return {\n i: {\n j: mat[i][j] * probIn[i] / probOut[j] for j in simbOut\n } for i in simbIn\n }", "def _calculate_leading_dim_map():\n small_matrixes = [(value, value+64) for value in range(256, 40192+512, 512)]\n large_matrixes = [(value, value+1088) for value in range(1024, 39936+1024, 1024)]\n return dict(small_matrixes + large_matrixes)", "def _all_labels_to_bitmasks(all_labels):\n l_dict = {}\n for i, label in enumerate(all_labels):\n l_dict[label.name] = 1<<i\n return l_dict", "def sur_mat2dict(mat,ndim):\n kwork = np.vstack((np.zeros((1,1),dtype=np.int_),ndim))\n dicts = {}\n ki = 0\n for r in range(1,len(kwork)):\n ki = ki + kwork[r-1][0] \n ke = ki + kwork[r][0]\n dicts[r-1] = mat[ki:ke,:]\n return(dicts)", "def miserables(metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:\n row = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,\n 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12,\n 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,\n 20, 20, 20, 21, 21, 22, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25,\n 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 28, 28, 29, 29, 29, 29, 29, 30, 34, 34, 34, 34, 35, 35, 35, 36, 36, 37, 39,\n 39, 41, 41, 41, 41, 41, 41, 41, 41, 41, 46, 47, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,\n 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 51, 51, 51, 51, 54, 55, 55, 55, 55, 55, 55, 55, 55,\n 55, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59,\n 59, 59, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 63, 63, 63, 63, 64, 64,\n 64, 65, 65, 66, 68, 68, 68, 68, 69, 69, 69, 70, 70, 71, 73])\n col = np.array(\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 3, 11, 11, 11, 12, 13, 14,\n 15, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 43,\n 44, 48, 49, 51, 55, 58, 64, 68, 69, 70, 71, 72, 23, 17, 18, 19, 20,\n 21, 22, 23, 26, 55, 18, 19, 20, 21, 22, 23, 19, 20, 21, 22, 23, 20,\n 21, 22, 23, 21, 22, 23, 22, 23, 23, 24, 25, 27, 29, 30, 31, 25, 26,\n 27, 41, 42, 50, 68, 69, 70, 26, 27, 39, 40, 41, 42, 48, 55, 68, 69,\n 70, 71, 75, 27, 43, 49, 51, 54, 55, 72, 28, 29, 31, 33, 43, 48, 58,\n 68, 69, 70, 71, 72, 44, 45, 34, 35, 36, 37, 38, 31, 35, 36, 37, 38,\n 36, 37, 38, 37, 38, 38, 52, 55, 42, 55, 57, 62, 68, 69, 70, 71, 75,\n 47, 48, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68, 69, 71, 73,\n 74, 75, 76, 50, 51, 54, 55, 56, 52, 53, 54, 55, 55, 56, 57, 58, 59,\n 61, 62, 63, 64, 65, 58, 59, 61, 62, 63, 64, 65, 67, 59, 60, 61, 62,\n 63, 64, 65, 66, 70, 76, 60, 61, 62, 63, 64, 65, 66, 61, 62, 63, 64,\n 65, 66, 62, 63, 64, 65, 66, 63, 64, 65, 66, 76, 64, 65, 66, 76, 65,\n 66, 76, 66, 76, 76, 69, 70, 71, 75, 70, 71, 75, 71, 75, 75, 74])\n data = np.array(\n [1, 8, 10, 1, 1, 1, 1, 2, 1, 5, 6, 3, 3, 1, 1, 1, 1,\n 1, 9, 7, 12, 31, 17, 8, 2, 3, 1, 2, 3, 3, 2, 2, 2, 3,\n 1, 1, 2, 2, 19, 4, 1, 1, 1, 1, 1, 1, 2, 4, 4, 4, 3,\n 3, 3, 3, 1, 1, 4, 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4,\n 3, 3, 3, 5, 4, 4, 4, 4, 4, 2, 1, 5, 1, 1, 2, 13, 4,\n 1, 2, 1, 1, 1, 1, 1, 1, 5, 1, 1, 3, 2, 1, 2, 5, 6,\n 4, 1, 3, 1, 1, 3, 2, 1, 21, 2, 1, 1, 1, 1, 1, 1, 6,\n 1, 2, 1, 1, 1, 3, 2, 2, 2, 1, 1, 1, 2, 3, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 1, 1, 2, 5, 1, 1, 1, 1, 1, 1, 1,\n 1, 2, 4, 1, 7, 6, 1, 2, 7, 5, 5, 3, 1, 1, 1, 1, 2,\n 2, 1, 1, 1, 9, 1, 12, 1, 1, 1, 2, 6, 1, 1, 1, 7, 5,\n 1, 9, 1, 5, 2, 1, 2, 1, 2, 2, 1, 1, 3, 15, 4, 6, 17,\n 4, 10, 5, 3, 1, 1, 2, 5, 13, 5, 9, 5, 1, 2, 3, 2, 2,\n 2, 1, 6, 3, 6, 5, 1, 6, 12, 5, 2, 1, 4, 5, 1, 1, 7,\n 3, 1, 2, 1, 1, 6, 4, 2, 3, 4, 2, 3, 2, 1, 1, 3])\n adjacency = sparse.csr_matrix((data, (row, col)), shape=(77, 77))\n adjacency = adjacency + adjacency.T\n\n if metadata:\n names = ['Myriel', 'Napoleon', 'Mlle Baptistine', 'Mme Magloire', 'Countess de Lo', 'Geborand',\n 'Champtercier', 'Cravatte', 'Count', 'Old man', 'Labarre', 'Valjean', 'Marguerite', 'Mme Der',\n 'Isabeau', 'Gervais', 'Tholomyes', 'Listolier', 'Fameuil', 'Blacheville', 'Favourite', 'Dahlia',\n 'Zephine', 'Fantine', 'Mme Thenardier', 'Thenardier', 'Cosette', 'Javert', 'Fauchelevent',\n 'Bamatabois', 'Perpetue', 'Simplice', 'Scaufflaire', 'Woman1', 'Judge', 'Champmathieu', 'Brevet',\n 'Chenildieu', 'Cochepaille', 'Pontmercy', 'Boulatruelle', 'Eponine', 'Anzelma', 'Woman2',\n 'Mother Innocent', 'Gribier', 'Jondrette', 'Mme Burgon', 'Gavroche', 'Gillenormand', 'Magnon',\n 'Mlle Gillenormand', 'Mme Pontmercy', 'Mlle Vaubois', 'Lt Gillenormand', 'Marius', 'Baroness',\n 'Mabeuf', 'Enjolras', 'Combeferre', 'Prouvaire', 'Feuilly', 'Courfeyrac', 'Bahorel', 'Bossuet',\n 'Joly', 'Grantaire', 'MotherPlutarch', 'Gueulemer', 'Babet', 'Claquesous', 'Montparnasse',\n 'Toussaint', 'Child1', 'Child2', 'Brujon', 'Mme Hucheloup']\n x = np.array(\n [0.53, 0.98, 0.41, 0.4, 1., 0.92, 0.84, 0.74, 0.78, 1., 0.51, 0.09, -0., 0.29, 0.37,\n 0.41, -0.35, -0.46, -0.42, -0.46, -0.41, -0.37, -0.36, -0.2, -0.06, -0.04, -0.01, -0.02, 0.33,\n 0.17, -0.29, -0.1, 0.58, 0.29, 0.29, 0.26, 0.29, 0.37, 0.35, 0.04, -0.01, -0.18, -0.09,\n 0.2, 0.51, 0.7, -0.95, -0.7, -0.37, -0.08, -0.18, -0.05, 0.04, -0.12, -0.06, -0.13, -0.24, -0.48,\n -0.25, -0.33, -0.43, -0.39, -0.33, -0.42, -0.31, -0.38, -0.48, -0.74, -0.08, -0.1, -0.02, -0.1,\n 0.14, -0.76, -0.75, -0.18, -0.58])\n y = np.array(\n [-0.23, -0.42, -0.14, -0.18, -0.31, -0.52, -0.6, -0.65, -0.38, -0.19, 0.39, 0.03, 0.44, -0.44,\n 0.51, -0.36, 0.27, 0.37, 0.4, 0.32, 0.32, 0.36, 0.4, 0.2, 0.07, 0.14, -0.05, 0.06, 0.06,\n 0.24, -0.26, -0.1, 0.24, -0.04, 0.17, 0.23, 0.31, 0.21, 0.27, -0.36, 0.69, 0.11, 0.38, -0.09,\n 0.05, 0.12, 0.82, 0.44, 0.06, -0.2, -0.4, -0.28, -0.68, -0.79, -0.4, -0.07, -0.51, -0.17, -0.03,\n -0.09, -0.14, -0.04, -0.04, -0.07, -0.06, -0.11, -0.06, -0.35, 0.24, 0.19, 0.22, 0.29, -0.2,\n 0.06, 0.14, 0.3, -0.1])\n graph = Bunch()\n graph.adjacency = adjacency\n graph.names = np.array(names)\n graph.position = np.vstack((x, y)).T\n graph.name = 'miserables'\n return graph\n else:\n return adjacency", "def get_same_mapping(self):\n sames = {}\n for clue in self.clueset:\n if clue[\"type\"] == SAME:\n sames[clue[\"vals\"][0]] = clue[\"vals\"][1]\n sames[clue[\"vals\"][1]] = clue[\"vals\"][0]\n\n return sames", "def test_setitem_bool(self):\n random.seed(12345)\n nside_coverage = 32\n nside_map = 128\n pxnums = np.arange(0, 2000)\n pxvalues = np.ones(pxnums.size, dtype=bool)\n full_map = np.zeros(hpg.nside_to_npixel(nside_map), dtype=bool)\n full_map[pxnums] = pxvalues\n\n sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage=nside_coverage,\n nside_sparse=nside_map, dtype=pxvalues.dtype)\n\n sparse_map[pxnums[0]] = pxvalues[0]\n testing.assert_equal(sparse_map[pxnums[0]], full_map[pxnums[0]])\n\n sparse_map[int(pxnums[1])] = bool(pxvalues[1])\n testing.assert_equal(sparse_map[pxnums[1]], full_map[pxnums[1]])\n\n sparse_map[pxnums] = pxvalues\n testing.assert_array_almost_equal(sparse_map[pxnums], full_map[pxnums])", "def __mul__(self, matrix: 'MatrixBoolean') -> 'MatrixBoolean':\n\t\t# wrong dimensions\n\t\tif matrix.dimM != self.dimN:\n\t\t\traise ValueError(\"Matrix have wrong dimensions\")\n\t\t\n\t\tr = MatrixBoolean(empty=(self.dimM, matrix.dimN))\n\t\tfor m in range(self.dimM):\n\t\t\tfor n in range(matrix.dimN):\n\t\t\t\tl = (self.get_value(m, i) & matrix.get_value(i, n) for i in range(self.dimN))\n\t\t\t\t# with functools package\n\t\t\t\tr.matrix[m][n] = functools.reduce(lambda x, y: x | y, l)\n\t\t\t\t#r.matrix[n][m] = 1 if sum(l) > 0 else 0\n\t\t\t\t#r.matrix[n][m] = sum(self.matrix[n][i] * matrix.matrix[i][m] for i in range(self.self.dimN))\n\t\treturn r", "def contrast_matrices(self):\n \n p = len(self.formula.terms)\n matrices = {}\n for crep in self._contrast_reps:\n s = self.slices[crep]\n l = s.stop - s.start\n array = np.zeros((l,p), np.float)\n for i, j in enumerate(range(l)):\n array[i,j+s.start] = 1.\n matrices[crep] = array\n return matrices", "def simplify(self):\n new = {}\n for k,v in self.__m__.items():\n if numpy.any(v!=0):\n new[k] = v\n self.__m__ = new", "def k_map(self):\n\t\tt1 = time.time()\n\t\tmapping_matrix = [] \n\t\tfor index in self.mapping:\n\t\t\tvector = np.zeros(len(self.unique_char),dtype=float)\n\t\t\tvector[index] = 1.0\n\t\t\tmapping_matrix.append(vector)\n\t\tprint(\"Time creating k map {:.3f} sec\".format(time.time()-t1))\n\t\tself.mapping_matrix = mapping_matrix\n\t\treturn mapping_matrix", "def similarity_map(svd_collect):\n sim_map = defaultdict(list)\n for x in svd_collect:\n sim_map[x[0]] = x[1]\n return dict(sim_map)", "def select_dic(old_dic,**kwargs):\n \n \n bool_all=np.ones(old_dic['MODEL_KEY'].shape,dtype=bool)\n for key in old_dic:\n lim_val = kwargs.get(key,None)\n if lim_val is None:\n continue\n array_val=old_dic[key]\n bool_sel=(array_val<=lim_val[1]) & (array_val>=lim_val[0])\n \n bool_all=bool_all & bool_sel\n \n new_dic={key:old_dic[key][bool_all] for key in old_dic}\n \n logging.info('Initial number of elements is %i'%(len(old_dic['MODEL_KEY'])))\n logging.info('Final number of elements is %i'%(len(new_dic['MODEL_KEY'])))\n\n \n return new_dic,bool_all", "def toBitString(m):\n\n for k, v in MATRIX.items():\n if k in m:\n m = m.replace(k, v)\n return m", "def cbindMatrices(hm, args):\n hm2 = heatmapper.heatmapper()\n\n # Make a dict of region name:row associations\n hm.read_matrix_file(args.matrixFile[0])\n d = dict({x: dict() for x in hm.parameters[\"group_labels\"]})\n for idx, group in enumerate(hm.parameters[\"group_labels\"]):\n s = hm.parameters[\"group_boundaries\"][idx]\n e = hm.parameters[\"group_boundaries\"][idx + 1]\n for idx2, reg in enumerate(hm.matrix.regions[s:e]):\n d[group][reg[2]] = idx2 + s\n\n # Iterate through the other matrices\n for idx in range(1, len(args.matrixFile)):\n hm2.read_matrix_file(args.matrixFile[idx])\n # Add the sample labels\n hm.parameters['sample_labels'].extend(hm2.parameters['sample_labels'])\n # Add the sample boundaries\n lens = [x + hm.parameters['sample_boundaries'][-1] for x in hm2.parameters['sample_boundaries']][1:]\n hm.parameters['sample_boundaries'].extend(lens)\n\n # Add on additional NA initialized columns\n ncol = hm.matrix.matrix.shape[1]\n hm.matrix.matrix = np.hstack((hm.matrix.matrix, np.empty(hm2.matrix.matrix.shape)))\n hm.matrix.matrix[:, ncol:] = np.NAN\n\n # Update the values\n for idx2, group in enumerate(hm2.parameters[\"group_labels\"]):\n if group not in d:\n continue\n s = hm2.parameters[\"group_boundaries\"][idx2]\n e = hm2.parameters[\"group_boundaries\"][idx2 + 1]\n for idx3, reg in enumerate(hm2.matrix.regions[s:e]):\n if reg[2] not in d[group]:\n continue\n hm.matrix.matrix[d[group][reg[2]], ncol:] = hm2.matrix.matrix[s + idx3, :]\n\n # Append the special params\n for s in hm.special_params:\n hm.parameters[s].extend(hm2.parameters[s])\n\n # Update the sample parameters\n hm.matrix.sample_labels = hm.parameters['sample_labels']\n hm.matrix.sample_boundaries = hm.parameters['sample_boundaries']", "def test_apply_flags():\n true_value = dqflags.pixel['HOT'] + dqflags.pixel['DO_NOT_USE']\n\n print(true_value)\n\n badmap = np.zeros((10, 10), dtype=np.int)\n true_map = np.zeros((10, 10), dtype=np.uint32)\n for i in range(10):\n badmap[i, i] = 1\n true_map[i, i] = true_value\n\n\n print(true_map)\n\n\n flag_names = ['HOT', 'DO_NOT_USE']\n pixmap = bpd.apply_flags(badmap, flag_names)\n\n\n print(pixmap)\n\n\n assert np.all(pixmap == true_map)", "def sample_search(self):\n result = dict()\n for mutable in self.mutables:\n if isinstance(mutable, LayerChoice):\n gen_index = torch.randint(high=len(mutable), size=(1, ))\n result[mutable.key] = F.one_hot(gen_index, num_classes=len(mutable)).view(-1).bool()\n elif isinstance(mutable, InputChoice):\n if mutable.n_chosen is None:\n result[mutable.key] = torch.randint(high=2, size=(mutable.n_candidates,)).view(-1).bool()\n else:\n perm = torch.randperm(mutable.n_candidates)\n mask = [i in perm[:mutable.n_chosen] for i in range(mutable.n_candidates)]\n result[mutable.key] = torch.tensor(mask, dtype=torch.bool) # pylint: disable=not-callable\n return result", "def explode_a(self, mask, axes, new_axis):\n return dict((i, self.flatten(mask == i, axes, new_axis))\n for i in np.unique(mask))", "def createdict(Matrix,List):\r\n n = len(List)\r\n #to get all possible combinations\r\n input_combns = list(itertools.combinations(range(0,n),2))\r\n d = defaultdict(dict)\r\n for x in input_combns:\r\n i,j = x\r\n p,q = List[i],List[j]\r\n d[p][q] = Matrix[i][j]\r\n return d", "def incidence_matrix(edges, excpt=[]):\n I = dict()\n for e in edges:\n a_i = e[0]\n a_j = e[1]\n if (a_i in excpt) | (a_j in excpt):\n continue\n if a_i not in I:\n I[a_i] = dict()\n if a_j not in I[a_i]:\n I[a_i][a_j] = 1\n return I", "def getGM2MIsd(self):\r\n return {self._pga: self._constants[self._pga]['SMMI'],\r\n self._pgv: self._constants[self._pgv]['SMMI'],\r\n self._sa03: self._constants[self._sa03]['SMMI'],\r\n self._sa10: self._constants[self._sa10]['SMMI'],\r\n self._sa30: self._constants[self._sa30]['SMMI']}", "def init_bitarrays_class(self, target_values) -> Tuple[Dict[Any, np.ndarray],Dict[Any, np.ndarray]] :\n for namecol, colvals in target_values.iteritems():\n self.bit_arrays_var_class[namecol] = dict()\n self.counts[namecol] = dict()\n self.prob_var_class[namecol] = dict()\n for icat, category in enumerate(self.categories[namecol]):\n category_indexes = np.where(colvals.values == category)[0]\n self.bit_arrays_var_class[namecol][category] = indexes2bitset(category_indexes)\n self.counts[namecol][category] = len(category_indexes)\n self.prob_var_class[namecol][category] = self.counts[namecol][category]/target_values.shape[0]\n return self.bit_arrays_var_class, self.counts, self.prob_var_class", "def kindiffdict(self):\n if not self._qdot_u_map:\n raise AttributeError('Create an instance of KanesMethod with '\n 'kinematic differential equations to use this method.')\n return self._qdot_u_map", "def get_P34(self):\n msk = self.load_mask()\n dimsrc = sum(msk)\n dimtgt = self.vs.get_dimension()\n M = matrix(dimtgt, dimsrc, sparse = True)\n j = 0\n for (i, v) in enumerate(msk):\n if v == 1:\n M[i, j] = 1\n j=j+1\n return M", "def _cvt_mask3d_to_mask2d(m, mapper={(1,1,1):1, (2,2,2):2}, default = 0):\n #mapping\n sub_masks = []\n for k,v in mapper.items():\n # check elementwise equal\n sub_masks.append(np.where(np.all(m == k, axis=-1), v, default))\n\n # o = reduce(np.add, sub_masks)\n o = reduce(np.bitwise_or, sub_masks)\n # o = o[:,:,0] # remove redundant channels\n return o", "def generate_true_dict(all_triples):\n heads, tails = {(p, o) : [] for _, p, o in all_triples}, {(s, p) : [] for s, p, _ in all_triples}\n\n for s, p, o in all_triples:\n heads[p, o].append(s)\n tails[s, p].append(o)\n\n return heads, tails", "def _build_maps(self, list_of_matrices):\n # get the list of all unique nonzeros across the matrices\n nz_tuples = set()\n for m in list_of_matrices:\n nz_tuples.update(zip(m.row, m.col))\n nz_tuples = sorted(nz_tuples)\n self._nz_tuples = nz_tuples\n self._row, self._col = list(zip(*nz_tuples))\n row_col_to_nz_map = {t: i for i, t in enumerate(nz_tuples)}\n\n self._shape = None\n self._maps = list()\n for m in list_of_matrices:\n nnz = len(m.data)\n map_row = np.zeros(nnz)\n map_col = np.zeros(nnz)\n for i in range(nnz):\n map_col[i] = i\n map_row[i] = row_col_to_nz_map[(m.row[i], m.col[i])]\n mp = coo_matrix(\n (np.ones(nnz), (map_row, map_col)), shape=(len(row_col_to_nz_map), nnz)\n )\n self._maps.append(mp)\n if self._shape is None:\n self._shape = m.shape\n else:\n assert self._shape == m.shape", "def create_support_matrix(patch, point, direction, threshold):\n height, width, _ = patch.shape\n support_matrix = np.full((height, width), False, dtype=np.bool_)\n\n for idy, row in enumerate(patch):\n for idx, pixel in enumerate(row):\n d2 = pixel - point\n dist = distance(direction, d2)\n if dist < threshold:\n support_matrix[idy][idx] = True\n\n return support_matrix", "def matrix_map(self, bkg_reduction=True, data_correction=True):\r\n\r\n if bkg_reduction is True:\r\n if data_correction is True:\r\n data = self.df4\r\n \r\n else:\r\n data = self.df2\r\n\r\n else:\r\n if data_correction is True:\r\n data = self.df3\r\n \r\n else:\r\n data = self.df1\r\n\r\n return data", "def _generate_adjacency_matrices(self):\n self.adj_matrices = dict()\n mes = []\n args = []\n for metaedge in self.metaedges:\n mes.append(metaedge)\n args.append(self._prepare_parallel_adj_matrix_args(self.edge_df.query('abbrev == @metaedge')))\n res = parallel_process(array=args, function=mt.get_adj_matrix, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.adj_matrices[metaedge] = matrix", "def get_confusion_matrix_intersection_mats(groundtruth, predicted):\n\n confusion_matrix_arrs = {}\n\n groundtruth_inverse = np.logical_not(groundtruth)\n predicted_inverse = np.logical_not(predicted)\n\n confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)\n confusion_matrix_arrs['tn'] = np.logical_and(groundtruth, predicted_inverse)\n confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)\n confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)\n\n return confusion_matrix_arrs", "def coherent_subsequent_states(Infomap_labels):\r\n unique_labels= np.unique(Infomap_labels)\r\n dictionary= {}\r\n for i in range(len(unique_labels)):\r\n label_index=[]\r\n for j in range(len(Infomap_labels)):\r\n if unique_labels[i]==Infomap_labels[j]:\r\n label_index.append(j)\r\n subsequent=groupSequence(label_index)\r\n \r\n dictionary[i]=subsequent\r\n \r\n return dictionary", "def LQ_markov_mapping(A22,C2,Ug,p1,p2,c1=0):\n \n # Make sure all matrices can be treated as 2D arrays #\n A22 = np.atleast_2d(A22)\n C2 = np.atleast_2d(C2)\n Ug = np.atleast_2d(Ug)\n p1 = np.atleast_2d(p1)\n p2 = np.atleast_2d(p2)\n \n # Find number of states (z) and shocks (w)\n nz, nw = C2.shape\n \n # Create A11, B1, S1, S2, Sg, S matrices\n A11 = np.zeros((2,2))\n A11[0,1]=1\n \n B1 = np.eye(2)\n \n S1 = np.hstack((np.eye(1),np.zeros((1,nz+1))))\n Sg = np.hstack((np.zeros((1,2)),Ug))\n S = S1 + Sg\n \n # Create M matrix\n M = np.hstack((-p1,-p2))\n \n # Create A,B,C matrices\n A_T = np.hstack((A11,np.zeros((2,nz))))\n A_B = np.hstack((np.zeros((nz,2)),A22))\n A = np.vstack((A_T,A_B))\n \n B = np.vstack((B1,np.zeros((nz,2))))\n \n C = np.vstack((np.zeros((2,nw)),C2))\n\n # Create Q^c matrix\n Qc = np.array([[1,-1],[-1,1]])\n \n # Create R,Q,W matrices\n \n R = S.T.dot(S)\n Q = M.T.dot(M) + c1*Qc\n W = M.T.dot(S)\n \n return A,B,C,R,Q,W", "def state_dict_with_masks(model, **kwargs):\n\n with torch.no_grad():\n masks = compute_ard_masks(model, **kwargs)\n state_dict, masks = binarize_masks(model.state_dict(), masks)\n\n state_dict.update(masks)\n return state_dict, masks", "def adjacency_matrices_(self):\n return self._adjacency_matrices", "def adjacency_matrices_(self):\n return self._adjacency_matrices", "def __bool__(self):\n return _osgAnimation.vectorMatrixKeyframe___bool__(self)", "def _identifyModels(self):\n self._models = {}\n metals = np.unique(self.dat[::, 0])\n ys = np.unique(self.dat[::, 1])\n ls = np.unique(self.dat[::, 2])\n masses = np.unique(self.dat[::, 3])\n nl = pyaC.NestedLoop([len(metals), len(ys), len(ls), len(masses)])\n for i in nl:\n indi = np.where(\n np.logical_and(\n self.dat[::, 0] == metals[i[0]],\n np.logical_and(\n self.dat[::, 1] == ys[i[1]],\n np.logical_and(\n self.dat[::, 2] == ls[i[2]], self.dat[::, 3] == masses[i[3]]\n ),\n ),\n )\n )[0]\n if len(indi) > 0:\n self._models[\n (metals[i[0]], ys[i[1]], ls[i[2]], masses[i[3]])\n ] = indi.copy()", "def result(\n metrics: Dict[metric_types.MetricKey, Any]\n ) -> Dict[metric_types.MetricKey, Any]:\n matrix = metrics[matrices_metric_key]\n examples = metrics[examples_metric_key]\n\n output = {}\n for i, threshold in enumerate(matrix.thresholds):\n output[metric_key_by_name_by_threshold[threshold]\n ['positive_to_negative']] = matrix.fn[i]\n output[metric_key_by_name_by_threshold[threshold]\n ['negative_to_positive']] = matrix.fp[i]\n output[metric_key_by_name_by_threshold[threshold]\n ['positive_to_negative_examples_ids']] = np.array(\n examples.fn_examples[i])\n output[metric_key_by_name_by_threshold[threshold]\n ['negative_to_positive_examples_ids']] = np.array(\n examples.fp_examples[i])\n output[metric_key_by_name_by_threshold[threshold]\n ['positive_examples_count']] = matrix.fn[i] + matrix.tp[i]\n output[metric_key_by_name_by_threshold[threshold]\n ['negative_examples_count']] = matrix.fp[i] + matrix.tn[i]\n\n return output", "def _demo_mm_inputs(input_shape=(1, 3, 256, 256)):\n (N, C, H, W) = input_shape\n\n rng = np.random.RandomState(0)\n\n imgs = rng.rand(*input_shape)\n target = np.zeros([N, 17, H // 32, W // 32], dtype=np.float32)\n mask = np.ones([N, H // 32, W // 32], dtype=np.float32)\n joints = np.zeros([N, 30, 17, 2], dtype=np.float32)\n\n img_metas = [{\n 'image_file':\n 'test.jpg',\n 'aug_data': [torch.zeros(1, 3, 256, 256)],\n 'test_scale_factor': [1],\n 'base_size': (256, 256),\n 'center':\n np.array([128, 128]),\n 'scale':\n np.array([1.28, 1.28]),\n 'flip_index':\n [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]\n } for _ in range(N)]\n\n mm_inputs = {\n 'imgs': torch.FloatTensor(imgs).requires_grad_(True),\n 'target': [torch.FloatTensor(target)],\n 'mask': [torch.FloatTensor(mask)],\n 'joints': [torch.FloatTensor(joints)],\n 'img_metas': img_metas\n }\n return mm_inputs", "def convert_flags_to_boolean_dict(flags):\n return {f: True for f in flags}", "def custom_confusion_matrix(prediction_vector, true_vector, feature_dict ):\n \n values = list(feature_dict.keys())\n values.sort()\n nvals = len(values)\n confusion_matrix = np.zeros((nvals, nvals))\n for i in range(len(values)):\n for j in range(len(values)):\n mask = (true_vector==values[i]) & (prediction_vector==values[j]) \n confusion_matrix[i,j] = mask.sum()\n \n return confusion_matrix", "def __bool__(self):\n return _osgAnimation.BoneMap___bool__(self)", "def test_degrade_map_bool(self):\n random.seed(12345)\n nside_coverage = 32\n nside_map = 1024\n nside_new = 256\n\n full_map = np.zeros(hpg.nside_to_npixel(nside_map), dtype=bool)\n pixels = np.random.choice(full_map.size, size=full_map.size//4, replace=False)\n full_map[pixels] = True\n\n sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, np.bool_)\n sparse_map[pixels] = full_map[pixels]\n\n # Degrade original map\n test_map = full_map.astype(np.float64)\n test_map[~full_map] = hpg.UNSEEN\n deg_map = hp.ud_grade(test_map, nside_out=nside_new,\n order_in='NESTED', order_out='NESTED')\n\n # Degrade sparse map and compare to original\n new_map = sparse_map.degrade(nside_out=nside_new)\n\n # Test the coverage map generation and lookup\n testing.assert_almost_equal(deg_map, new_map.generate_healpix_map())", "def _parse_matrix(matrix, bare_keys=()):\n matrix = dict(matrix)\n result = {}\n\n # Insert non-dict (\"bare\") keys first\n for key in bare_keys:\n if key in matrix:\n result[key, None] = matrix.pop(key)\n\n # Insert remaining matrix entries\n matrix_types = ('req', 'env', 'env_nobuild')\n if any(t in matrix for t in matrix_types):\n # New-style config\n matrices = []\n for t in matrix_types:\n submatrix = matrix.pop(t, {})\n matrices.append((t, submatrix))\n\n # Check if spurious keys left\n remaining_keys = tuple(matrix.keys())\n if remaining_keys:\n raise util.UserError('Unknown keys in \"matrix\" configuration: {}, expected: {}'.format(\n remaining_keys, matrix_types + tuple(bare_keys)))\n else:\n # Backward-compatibility for old-style config\n matrices = [('req', matrix)]\n\n # Convert values\n for t, m in matrices:\n for key, value in m.items():\n result[t, key] = value\n\n return result", "def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n op0 = self.args[0].operator_dict(index, vars, **kw)\n op1 = self.args[1].operator_dict(index, vars, **kw)\n for var in set().union(op0, op1):\n if (var in op0) and (var in op1):\n out[var] = add_sparse(op0[var], op1[var])\n elif (var in op0):\n out[var] = op0[var]\n else:\n out[var] = op1[var]\n return out", "def _demo_mm_inputs(input_shape: tuple, num_classes: int):\n (N, C, H, W) = input_shape\n rng = np.random.RandomState(0)\n imgs = rng.rand(*input_shape)\n gt_labels = rng.randint(\n low=0, high=num_classes, size=(N, 1)).astype(np.uint8)\n mm_inputs = {\n 'imgs': torch.FloatTensor(imgs).requires_grad_(False),\n 'gt_labels': torch.LongTensor(gt_labels),\n }\n return mm_inputs", "def feature_dist_func_dict():\n return {\"tanimoto_dissimilarity\": tanimoto_dissimilarity}", "def physical_maps(x, y):\n assert x.shape == (3,) and y.shape == (3,)\n assert x.dtype == np.float64 and y.dtype == np.float64\n\n C = np.empty((21,21), dtype=np.float64)\n B = np.empty((2,2), dtype=np.float64)\n b = np.empty((2,), dtype=np.float64)\n _ap.ap_physical_maps(x, y, C, B, b)\n return (C, B, b)", "def make_contingency_tables(\n y: np.ndarray, flagged_A: np.ndarray, flagged_B: np.ndarray\n) -> Dict[int, np.ndarray]:\n\n y = np.array(y).astype(np.int64).flatten()\n flagged_A = np.array(flagged_A).astype(np.bool_).flatten()\n flagged_B = np.array(flagged_B).astype(np.bool_).flatten()\n\n if len(flagged_A) != len(y) or len(flagged_B) != len(y):\n raise ValueError(\n f\"Expected arrays y, flagged_A, and flagged_B of the same length: \\\n got {len(y)}, {len(flagged_A)}, and {len(flagged_B)}.\"\n )\n\n contingency_tables = {}\n for class_id in np.unique(y):\n\n items_flagged_A = flagged_A[y == class_id]\n items_flagged_B = flagged_B[y == class_id]\n\n a = (~items_flagged_A & ~items_flagged_B).sum()\n b = (~items_flagged_A & items_flagged_B).sum()\n c = (items_flagged_A & ~items_flagged_B).sum()\n d = (items_flagged_A & items_flagged_B).sum()\n\n table = np.array([[a, b], [c, d]])\n contingency_tables[class_id] = table\n\n return contingency_tables", "def get_mapped_answers(self):\r\n answers = (\r\n dict([(ie.get('id'), ie.get(\r\n 'rectangle')) for ie in self.ielements]),\r\n dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))\r\n return answers", "def probOut(self) -> dict:\n \n return {\n j: sum(\n [ self.mat[i][j] * self.probIn[i] for i in self.simbIn ]\n ) for j in self.simbOut\n }", "def isequal_dict_of_ndarray(first, second):\n if first.keys() != second.keys():\n return False\n return all(np.array_equal(first[key], second[key]) for key in first)", "def get_embedding_matrix(word_vecs, k=300):\n vocab_size = len(word_vecs)\n word_idx_map = dict()\n W = np.zeros(shape=(vocab_size+1, k), dtype='float32') \n W[0] = np.zeros(k, dtype='float32')\n i = 1\n for word in word_vecs:\n W[i] = word_vecs[word]\n word_idx_map[word] = i\n i += 1\n return W, word_idx_map", "def np_to_belief(np_array,labels):\n return dict((l,np_array[0,i]) for i,l in enumerate(labels))", "def get_mappings():\n original_dict = ClassifierDataset.get_labels()\n return dict(zip(original_dict.values(), original_dict.keys()))", "def package_feature_matrix(feature_matrix):\n return {'feat': feature_matrix\n , 'stat': {\n 'mean': numpy.mean(feature_matrix, axis=0),\n 'std': numpy.std(feature_matrix, axis=0),\n 'N': feature_matrix.shape[0],\n 'S1': numpy.sum(feature_matrix, axis=0),\n 'S2': numpy.sum(feature_matrix ** 2, axis=0),\n }\n }", "def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n # Freeze arg1 metadata for caching ncc matrices\n frozen_arg1_basis_meta = freeze_meta(self.args[1].meta)[-1]\n op0 = self.args[0].as_ncc_operator(frozen_arg1_basis_meta, **kw)\n op1 = self.args[1].operator_dict(index, vars, **kw)\n for var in op1:\n out[var] = op0 * op1[var]\n return out", "def matrices(self):\n return [ self.__class__(labels=self.labels,\n labels_map=self.labels_map,\n sets=[x]) for x in self.sets]", "def getGM2MIsd(self):\n return { 'pga' : self.__constants['pga']['SMMI'],\n 'pgv' : self.__constants['pgv']['SMMI'],\n 'psa03' : self.__constants['psa03']['SMMI'],\n 'psa10' : self.__constants['psa10']['SMMI'],\n 'psa30' : self.__constants['psa30']['SMMI'] }", "def part1():\n memory = {}\n for line in lines:\n if line[:4] == 'mask':\n mask = line.split('=')[1].strip()\n on_mask = int(mask.replace('X', '0'), 2)\n off_mask = int(mask.replace('X', '1'), 2)\n else:\n a = int(line.split('[')[1].split(']')[0])\n to_write = int(line.split('=')[1])\n\n memory[a] = to_write & off_mask | on_mask # apparently the AND needs to come before the OR?\n\n #print(a, to_write, memory[a])\n\n answer = 0\n for v in memory.values():\n answer += v\n print(answer)", "def extract_iv(b):\n return dict((k, v) for k, v in b.items() if true_param(k))", "def mset(self, kvs: Mapping[K, V]) -> List[bool]:\n raise NotImplementedError('mset must be reimplemented in concrete implementation')", "def tensors(self):\n mapper = {}\n for v in self.values():\n handle = retrieve_indexed(v)\n for i in handle:\n found = mapper.setdefault(i.base.function, [])\n if i not in found:\n # Not using sets to preserve order\n found.append(i)\n return mapper", "def onequbit_modes(statemat):\n nqubit = int(np.log2(statemat.shape[0]))\n rep = np.array(list(itertools.product((0, 1), repeat=nqubit)))\n inds = [i for i, x in enumerate(np.sum(rep, 1)) if x==1]\n \n instates = np.around(statemat[:, inds], 3)\n\n outstates = np.zeros((len(inds), len(inds)), dtype=complex)\n #print(inds)\n for ii in range(len(inds)):\n shortstate = np.around(instates[sum(instates[:,ii].nonzero()), ii], 3).todense()\n outstates[:, ii] = np.squeeze( np.array( shortstate ) )\n \n return outstates", "def _conv_filter(state_dict, patch_size=16):\n out_dict = {}\n for k, v in state_dict.items():\n if 'patch_embed.proj.weight' in k:\n v = v.reshape((v.shape[0], 3, patch_size, patch_size))\n out_dict[k] = v\n return out_dict", "def __simInputTransform(weight_matrix: Union[pd.DataFrame, np.ndarray], initial_state: dict) -> tuple:\n if type(weight_matrix) != np.ndarray:\n # Align the initial_vector order for correct computations (vec . mat)\n initial_state = {k : initial_state[k] for k in weight_matrix.columns}\n weight_matrix=weight_matrix.to_numpy()\n else:\n warnings.warn(\"When passing an initial state with a weight matrix type \\\n numpy.ndarray make sure that the order of the keys in the dictionary \\\n with the initial states matches the order of the column of the numpy.ndarray!\")\n \n return weight_matrix, initial_state", "def exists(matrix,i,j):\n return j in matrix[i].keys()", "def pmi_boolbool(single_counts, pair_counts, N_docs, wi, wj, normalized=False):\n # Enforce alphabetical order in pair\n pair = tuple(sorted([wi, wj]))\n Nij = pair_counts.get(pair,0)\n Ni = single_counts.get(wi,0)\n Nj = single_counts.get(wj,0)\n # The single and pair counts must be non-zero\n # otherwise, the PMI is undefined\n if (Nij != 0) and (Ni != 0) and (Nj != 0):\n pmi = np.log2(N_docs*Nij / (Ni*Nj))\n if normalized:\n pmi = pmi / (- np.log2(Nij / N_docs))\n return pmi\n else:\n return None", "def _transform(self, matrix):\n for x in list(self.keys()):\n ar = self[x]\n if len(ar.shape) == 2 and ar.shape[1] == 3:\n self[x] = np.dot(matrix, ar.transpose()).transpose()", "def _sparse_to_dict(sparse_matrix):\n return {\n 'data': sparse_matrix.data,\n 'row': sparse_matrix.row,\n 'col': sparse_matrix.col,\n 'shape': sparse_matrix.shape,\n }", "def feature_magnitude_dict(three_feature_list):\n\n values_list = [[1, 2, 3],\n [4, 5, 6, 7],\n [7, 8, 9, 10]]\n\n feature_dict = {}\n for feature, values in zip(three_feature_list, values_list):\n\n for value in values:\n feature_dict[value] = feature\n feature_dict[-value] = feature\n\n return feature_dict", "def matrix_multiply_mapper(m, element):\n\n name, i, j, value = element\n\n if name == \"A\":\n for k in range(m):\n yield ((i, k), (j, value))\n \n else:\n for k in range(m):\n yield((k, j), (i, value))", "def get_sim_matrix(centroids):\n\n matrix = {}\n length = len(centroids)\n\n for i in xrange(0, length):\n matrix[i] = {}\n\n for j in xrange(i + 1, length):\n matrix[i][j] = similarity(centroids[i], centroids[j])\n\n return matrix", "def dic_zeros_like(x):\n\n result = {}\n for k in x:\n result[k] = torch.zeros_like(x.get(k), device=x.get(k).device)\n\n return result", "def _cache_checker_matrices(self):\r\n if self.model.mat_texid is not None:\r\n self._geom_checker_mats = []\r\n for geom_id in range(self.model.ngeom):\r\n mat_id = self.model.geom_matid[geom_id]\r\n tex_id = self.model.mat_texid[mat_id]\r\n texture = self.textures[tex_id]\r\n h, w = texture.bitmap.shape[:2]\r\n self._geom_checker_mats.append(self._make_checker_matrices(h, w))\r\n\r\n # add skybox\r\n skybox_tex_id = -1\r\n for tex_id in range(self.model.ntex):\r\n skybox_textype = 2\r\n if self.model.tex_type[tex_id] == skybox_textype:\r\n skybox_tex_id = tex_id\r\n if skybox_tex_id >= 0:\r\n texture = self.textures[skybox_tex_id]\r\n h, w = texture.bitmap.shape[:2]\r\n self._skybox_checker_mat = self._make_checker_matrices(h, w)\r\n else:\r\n self._skybox_checker_mat = None", "def wc_matrix(matrix):\n return [{\"A\": position[\"T\"], \"T\": position[\"A\"], \"C\": position[\"G\"], \"G\": position[\"C\"]} for position in matrix[::-1]]", "def get_checked_status_map(self):\r\n checked_status = {} # {'BJT':True, ...}\r\n for item_index in xrange(self.count()):\r\n item = self.item(item_index)\r\n if item is None:\r\n continue\r\n item_text = str(item.text())\r\n if item.checkState() != Qt.Checked:\r\n checked_status[item_text] = False\r\n else:\r\n checked_status[item_text] = True\r\n return checked_status", "def _merge_template_search(self, inputs):\n seq_dict = defaultdict(list)\n # flatten and permute\n for input_dic in inputs:\n for name, x in input_dic.items():\n if name == 'mask':\n seq_dict[name].append(x.flatten(1))\n else:\n seq_dict[name].append(\n x.flatten(2).permute(2, 0, 1).contiguous())\n # concatenate\n for name, x in seq_dict.items():\n if name == 'mask':\n seq_dict[name] = torch.cat(x, dim=1)\n else:\n seq_dict[name] = torch.cat(x, dim=0)\n return seq_dict", "def columns_state_to_matrix(state):\n m = []\n lits = []\n for key, values in state.items():\n if key != \"parity\":\n m.append(values)\n lits.append(key)\n m += [state[\"parity\"]]\n m = np.array(m).T.tolist()\n return m, lits", "def __call__(self):\n return {self.idx: rle_encoding(self.mask)}", "def filter_dict(lam,bigZ,bigZlag):\n n_eq = lam.shape[0]\n if not(len(bigZ.keys()) == n_eq and len(bigZlag.keys()) == n_eq):\n raise Exception(\"Error: incompatible dimensions\")\n Zfilt = {}\n for r in range(n_eq):\n lami = lam[r][0]\n Zfilt[r] = bigZ[r] - lami*bigZlag[r]\n return Zfilt", "def getData_goodmaps(liste_dictionnaires = [], liste_categories = [], liste_phonemes = [],liste_cartes=[]):\n if liste_dictionnaires!=[] and liste_categories!=[] and liste_phonemes!=[]:\n tableau = np.array(liste_dictionnaires[0][liste_categories[0]][liste_phonemes[0]])\n nb_exemple,nb_carte,lign,col=tableau.shape\n else:\n return [],[],[],[]\n\n Mat = []\n Reference = []\n\n\n for inddict,dict in enumerate(liste_dictionnaires):\n for indcat,cat in enumerate(liste_categories):\n for indpho,pho in enumerate(liste_phonemes):\n for ex in range(nb_exemple):\n goodmaps = []\n for map in liste_cartes:\n goodmaps.append(np.array(dict[cat][pho][ex][map]).flatten())\n Mat.append(np.array(goodmaps).flatten())\n Reference.append([inddict,indcat ,indpho])\n Reference = np.array(Reference)\n Y_c_inc = change_reference(Reference[:,1])\n Y_r_v = Reference[:,2]\n Y_fr_jap = Reference[:,0]\n return np.array(Mat), np.array(Y_c_inc), np.array(Y_r_v), np.array(Y_fr_jap)", "def state_array_spec(self) -> Dict[str, Any]:", "def __init__(self, transition_matrix, states):\n self.transition_matrix = np.atleast_2d(transition_matrix)\n self.states = states\n self.index_dict = {self.states[index]: index for index in \n range(len(self.states))}\n self.state_dict = {index: self.states[index] for index in\n range(len(self.states))}", "def setup_dict(self, keys=None):\n keys = keys or []\n return {key: True for key in keys}", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")" ]
[ "0.6008547", "0.559379", "0.5578552", "0.55711734", "0.5570441", "0.5570441", "0.55518544", "0.5523987", "0.5333082", "0.5278061", "0.5247944", "0.5236596", "0.5219008", "0.5200241", "0.5198334", "0.51891565", "0.5130926", "0.5113897", "0.51134247", "0.50944567", "0.5081737", "0.5080843", "0.5074982", "0.507111", "0.50423884", "0.5021558", "0.5019483", "0.49841693", "0.49826798", "0.49588144", "0.49453682", "0.4927109", "0.492308", "0.48927802", "0.4881621", "0.48762748", "0.4869263", "0.486639", "0.48524532", "0.48445892", "0.48430642", "0.48394686", "0.4838697", "0.48360625", "0.4825742", "0.48155463", "0.4810855", "0.48056936", "0.47935474", "0.47935474", "0.47848222", "0.47846898", "0.47767633", "0.47727618", "0.4765399", "0.4759692", "0.47561958", "0.4753031", "0.47527826", "0.47508594", "0.475011", "0.4749454", "0.47390085", "0.47353062", "0.47327778", "0.47264555", "0.47217733", "0.47175017", "0.47153866", "0.47136927", "0.47131538", "0.47115305", "0.47110057", "0.47099492", "0.47073278", "0.46995223", "0.46950993", "0.46936885", "0.4682907", "0.46815962", "0.4674009", "0.46697238", "0.46633437", "0.46623617", "0.46610138", "0.46569285", "0.46567345", "0.4654145", "0.46534985", "0.46498123", "0.46493286", "0.4648469", "0.4648141", "0.46478865", "0.46477956", "0.464628", "0.46410072", "0.46365273", "0.46361542", "0.46323904", "0.46310067" ]
0.0
-1
pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution.
def convert_to_codon(nts_array): nts_array = np.array(nts_array) codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3. return codon_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_mm_df(sralist):\n\n def convert_to_codon(nts_array):\n \"\"\"\n pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution.\n This function converts nucleotide arrays to codon length (nts to codon resolution):\n \"\"\"\n \n nts_array = np.array(nts_array)\n codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3.\n\n return codon_array\n\n\n def compute_mm(mmdata):\n \"\"\"\n get per gene average multi-mapping score\n \"\"\"\n\n mm_df = pd.DataFrame(columns=['ORF', 'MM'])\n counter = 0\n\n for gene in mmdata.keys():\n current_matrix = mmdata[gene]\n current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] )\n mm_df.loc[counter] = [gene, current_avrg]\n counter += 1\n\n return mm_df\n\n\n mm_mat = {}\n mm_pct = {}\n\n N = len(sralist)\n\n for ix, dataset in enumerate(sralist):\n samfile = pysam.AlignmentFile(TMP_DIR+'/ambiguous_reads/'+dataset+'_STAR_transcriptome_multi_mapped_sorted.bam', 'rb')\n genes_list = list(samfile.references)\n print(ix, dataset)\n\n for geneID in genes_list:\n # count the coverage of genomic positions by reads in region.\n # Returns: four array.arrays of the same length in order A C G T\n # The coverage is computed per-base [ACGT]\n cov = samfile.count_coverage(geneID, read_callback='nofilter')\n # Summ all 4 arrays\n cov_sum = np.sum(cov, axis=0)\n #print(geneID, cov_sum)\n codon_cov = convert_to_codon(cov_sum)\n codon_bool = np.asarray([1 if i > 0 else 0 for i in codon_cov])\n \n M = len(codon_bool)\n\n if ix == 0:\n \tmm_mat[geneID] = np.zeros((N,M)) * np.nan\n \n current_matrix = mm_mat[geneID]\n current_matrix[ix,:] = np.copy(codon_bool)\n mm_mat[geneID] = current_matrix\n\n\n mm_avrg = compute_mm(mm_mat)\n #mm_avrg.to_json('yeast_mm.json')\n #mm_avrg.to_csv('yeast_mm.txt', header=True, index=False, sep='\\t')\n\n \n mm_profile = {}\n theta_mm = 5\n for orf in mm_mat.keys():\n current_mat = mm_mat[orf]\n current_bool = np.sum(current_mat, 0) <= theta_mm\n mm_profile[orf] = current_bool\n\n with open('../data/processed/mm_consensus.pkl', 'wb') as f_mm:\n pickle.dump(mm_profile, f_mm)\n\n\n return mm_mat, mm_avrg, mm_profile", "def df_seqs_concepts(self):\n # Get the data #\n df = pandas.DataFrame(self.a.seq_to_counts)\n df = df.fillna(0)\n # Rename to original names #\n df = df.rename(columns=self.a.renamed_to_orig)\n # Rename envo integers to envo strings #\n envo_int_to_id = lambda e: \"ENVO:%08d\" % e\n df = df.rename(index=envo_int_to_id)\n # Return\n return df", "def matthewscc(self):\n if not self.total_examples:\n return 0.\n\n true_pos = float(self.true_positives)\n false_pos = float(self.false_positives)\n false_neg = float(self.false_negatives)\n true_neg = float(self.true_negatives)\n terms = [(true_pos + false_pos),\n (true_pos + false_neg),\n (true_neg + false_pos),\n (true_neg + false_neg)]\n denom = 1.\n for t in filter(lambda t: t != 0., terms):\n denom *= t\n return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)", "def uCSIsUnifiedCanadianAboriginalSyllabics(code):\n ret = libxml2mod.xmlUCSIsUnifiedCanadianAboriginalSyllabics(code)\n return ret", "def get_mod_freq_clf(df, cols, chr_pos, strains, clf, method=\"GMM\"):\n results = []\n for cp in chr_pos:\n # min-max normalisation\n _df = df.loc[(df[\"chr_pos\"]==cp)&(df.Strain.isin(strains)), cols+[\"Strain\"]]\n _X = min_max_norm(_df[cols].to_numpy().astype(\"float\"))\n # get fit and clusters\n clusters = clf.fit_predict(_X)\n # for outlier method, store outliers (-1) as cluster_1 and normal (1) as cluster_0\n if max(clusters)>1: \n clusters[clusters!=0] = 1\n elif -1 in clusters and 1 in clusters: # outlier method\n clusters[clusters==1] = 0\n clusters[clusters<0] = 1\n # get modification freuqency - simply number of 1s over all for each sample\n freqs = [clusters[_df[\"Strain\"]==s].mean() for s in strains]\n results.append((cp, method, *freqs, \", \".join(map(str, strains[1:]))))\n return results", "def _compute_pc(df_embs, npc=1):\n svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)\n svd.fit(df_embs)\n return svd.components_", "def get_cs(self, model=1, limits=None, prev_aa=None, next_aa=None,\n piqc=False, sigma_n=None, like_ss=True, debug=False):\n\n # Validate input.\n aa = self.correlation.aa\n if aa not in aminoacids.aa_list:\n mesg = '{} is not a supported amino-acid!'.format(aa)\n raise ValueError(mesg)\n\n for atom in self.correlation.atoms:\n if atom not in aminoacids.aa_atoms[aa]:\n raise ValueError('{} is not an atom in {}'.format(atom, aa))\n if prev_aa:\n prev_aa = tuple(set(prev_aa))\n for prev_aa_i in prev_aa:\n if prev_aa_i not in aminoacids.aa_list:\n mesg = '{} is not an amino-acid!'.format(prev_aa_i)\n raise ValueError(mesg)\n if next_aa:\n next_aa = tuple(set(next_aa))\n for next_aa_i in next_aa:\n if next_aa_i not in aminoacids.aa_list:\n mesg = '{} is not an amino-acid!'.format(next_aa_i)\n raise ValueError(mesg)\n\n if piqc and not self.database.table_exist('SEQ_CS_DB'):\n mesg = 'The PIQC table is not in {}.'.format(self.database)\n raise ValueError(mesg)\n\n ss = self.correlation.ss\n n = self.dims\n\n # Scary sub-query to find unique secondary structure or the most common\n # secondary structure. I am sorry ...\n if model == 'all':\n sub_sql = \"\"\"INNER JOIN(\n SELECT DISTINCT c.KEY_ID, c.SND_STRC FROM {0}_strc_db as c\n INNER JOIN (\n SELECT KEY_ID, COUNT(a.KEY_ID) as count FROM (\n SELECT DISTINCT KEY_ID, SND_STRC FROM {0}_strc_db\n GROUP BY KEY_ID, SND_STRC)as a\n GROUP BY KEY_ID\n HAVING count = 1\n ) as b ON c.KEY_ID = b.KEY_ID ) as strc\n ON cs_0.KEY_ID = strc.KEY_ID\"\"\".format(aa)\n\n if model == 'most':\n sub_sql = \"\"\"INNER JOIN (SELECT KEY_ID,\n SUBSTRING_INDEX(GROUP_CONCAT(x.SND_STRC\n ORDER BY x.count DESC SEPARATOR ':::'), ':::', 1) AS snd_strc_mode\n FROM (SELECT KEY_ID, SND_STRC, COUNT(*) as count FROM {0}_strc_db\n GROUP BY KEY_ID, SND_STRC) as x\n GROUP BY x.KEY_ID ) as xx\n ON cs_0.KEY_ID = xx.KEY_ID\"\"\".format(aa)\n\n # Build SQL query\n # Select Chemical Shifts From Sub Table(s)\n cs = 'cs_{0}.C_SHIFT as cs{0}'\n sql = [(\"SELECT \" + ', '.join([cs.format(x) for x in range(n)])),\n \"FROM {0}_cs_db as cs_0\".format(aa)]\n\n # Join other sub table for chemical shift.\n for ni in range(1, n):\n sql.append(\"INNER JOIN {0}_cs_db AS cs_{1} \".format(aa, ni))\n sql.append(\"ON cs_0.KEY_ID = cs_{0}.KEY_ID\".format(ni))\n\n # Join other sub table for structure\n if ss and ss != 'X':\n if model in {'all', 'most'}:\n sql.append(sub_sql)\n else:\n sql.append(\"INNER JOIN {0}_strc_db AS strc\".format(aa))\n sql.append(\"ON cs_0.KEY_ID = strc.KEY_ID\")\n\n if prev_aa or next_aa:\n sql.append(\"INNER JOIN {0}_db AS info\".format(aa))\n sql.append(\"ON cs_0.KEY_ID = info.KEY_ID\")\n\n if piqc:\n sql.append(\"INNER JOIN SEQ_CS_DB \")\n sql.append(\"ON cs_0.FIRSTKEY_ID = SEQ_CS_DB.KEY_ID\")\n\n # Start of the where statements.\n # Atoms and Limits for first atom\n atom_0 = self.correlation.atoms[0]\n sql.append(\"WHERE cs_0.ATOM_NAME = '{0}'\".format(atom_0))\n\n if prev_aa:\n if len(prev_aa) == 1:\n sql.append(\"AND info.PREV_X = '{}'\".format(prev_aa[0]))\n else:\n sql.append(\"AND info.PREV_X in {}\".format(prev_aa))\n\n if next_aa:\n if len(next_aa) == 1:\n sql.append(\"AND info.NEXT_X = '{}'\".format(next_aa[0]))\n else:\n sql.append(\"AND info.NEXT_X in {}\".format(next_aa))\n\n if limits:\n sql.append(\"AND cs_0.C_SHIFT\")\n limits_0 = (limits[0][0], limits[0][1])\n sql.append(\"BETWEEN {0} AND {1}\".format(limits_0))\n\n # Atoms and Limits for rest of the atom\n for ni in list(range(1, n)):\n sql.append(\"AND cs_{0}.ATOM_NAME = '{1}'\".format(\n ni, self.correlation.atoms[ni]))\n\n if limits:\n sql.append(\"AND cs_{0}.C_SHIFT \".format(ni))\n limits_n = (limits[ni][0], limits[ni][1])\n sql.append(\"BETWEEN {0} AND {1}\".format(limits_n))\n\n # If secondary structure\n if ss and ss != 'X':\n try:\n ss_list = aminoacids.similar_sndstr[ss]\n except KeyError:\n raise ValueError('{} is not a valid sndstr'.format(ss))\n\n if model == 'all':\n if like_ss:\n sql.append(\"AND strc.SND_STRC IN {0}\".format(ss_list))\n else:\n sql.append(\"AND strc.SND_STRC = '{}'\".format(ss))\n elif model == 'most':\n if like_ss:\n sql.append(\"AND xx.snd_strc_mode IN {0}\".format(ss_list))\n else:\n sql.append(\"AND xx.snd_strc_mode = '{0}'\".format(ss))\n else:\n if like_ss:\n sql.append(\"AND SND_STRC IN {0}\".format(ss_list))\n else:\n sql.append(\"AND SND_STRC = '{0}'\".format(ss))\n sql.append(\"AND MODEL_NO={0}\".format(model))\n\n if piqc:\n sql.append(\"AND ELEMENT='C'\")\n sql.append(\"AND PIQC = 1\")\n\n sql = [x.strip() for x in sql]\n sql = '\\n'.join(sql)\n\n if debug:\n print(sql)\n\n cs = self.database.query(sql)\n\n if not cs:\n raise ValueError\n\n if sigma_n:\n avg = np.mean(cs, axis=0)\n std = np.std(cs, axis=0)\n ind = np.all(np.abs(cs-avg) <= std*sigma_n, axis=1)\n cs = np.compress(ind, cs, axis=0)\n\n return cs", "def fit_covid_function(self):\r\n return", "def test_codon_usage_custom(self):\n # We need a FASTA file of CDS sequences to count the codon usage...\n dna_fasta_filename = \"fasta.tmp\"\n dna_genbank_filename = \"GenBank/NC_005816.gb\"\n record = SeqIO.read(dna_genbank_filename, \"genbank\")\n records = []\n for feature in record.features:\n if feature.type == \"CDS\" and len(feature.location.parts) == 1:\n start = feature.location.start.position\n end = feature.location.end.position\n table = int(feature.qualifiers[\"transl_table\"][0])\n if feature.strand == -1:\n seq = record.seq[start:end].reverse_complement()\n else:\n seq = record.seq[start:end]\n # Double check we have the CDS sequence expected\n # TODO - Use any cds_start option if/when added to deal with the met\n a = \"M\" + str(seq[3:].translate(table))\n b = feature.qualifiers[\"translation\"][0] + \"*\"\n self.assertEqual(a, b, \"%r vs %r\" % (a, b))\n records.append(SeqRecord(seq, id=feature.qualifiers[\"protein_id\"][0],\n description=feature.qualifiers[\"product\"][0]))\n\n with open(dna_fasta_filename, \"w\") as handle:\n SeqIO.write(records, handle, \"fasta\")\n\n CAI = CodonAdaptationIndex()\n # Note - this needs a FASTA file which containing non-ambiguous DNA coding\n # sequences - which should each be a whole number of codons.\n CAI.generate_index(dna_fasta_filename)\n # Now check codon usage index (CAI) using this species\n self.assertEqual(record.annotations[\"source\"],\n \"Yersinia pestis biovar Microtus str. 91001\")\n self.assertEqual(\"%0.5f\" % CAI.cai_for_gene(\"ATGCGTATCGATCGCGATACGATTAGGCGGATG\"),\n \"0.67213\")\n os.remove(dna_fasta_filename)", "def pca_2(emb) :\n pcaer = skd.PCA(n_components=2)\n pca = pcaer.fit_transform(emb)\n \n return pca", "def cole_coeff(self):\n return self.diseq_coeff(standardize=True)", "def n_cs(self):\n pass", "def codonfreqs_kmerdf(kmertable): \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)", "def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return", "def clus_func(df_all, n_components, feat_subset):\n\n df = df_all[featureSet_dic[feat_subset]].copy()\n\n X = df.values\n\n # # Fit a Gaussian mixture with EM\n # gmm_model = mixture.GaussianMixture(n_components=n_components,\n # covariance_type=cv_type,\n # random_state=1,\n # n_init=10)\n # gmm_model = gmm_model.fit(X)\n\n model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time\n model_name = os.path.join(model_path, 'gmm.joblib')\n gmm_model = joblib.load(model_name)\n\n # predic labels & probabilities\n labels = gmm_model.predict(X)\n labels_prob = gmm_model.predict_proba(X)\n\n # adding all droped features (for plotting purposes) of the standardized dataframe\n added_feat = [feat for feat in data_columns if feat not in df.columns]\n df[added_feat] = df_all_stand[added_feat].copy()\n df = df[data_columns]\n\n # adding the labels to the dataframe\n df.insert(0, 'Clus_label', labels)\n\n for n in range(n_components):\n df['Prob_L'+str(n)] = labels_prob[:, n]\n\n return gmm_model, df # export all gmm models and a dictionary of all labeled datasets", "def gain_sequence(det_name, psf_results_file, chiprob_min=0.1, logger=None):\n warnings.simplefilter('ignore')\n with fits.open(psf_results_file) as cluster_data:\n seqnums = sorted(list(set(cluster_data[1].data['SEQNUM'])))\n all_amps = range(1, cluster_data[0].header['NAMPS'] + 1)\n det_names, amps, gains, seq_nums = [], [], [], []\n for seqnum in seqnums:\n if logger is not None:\n logger.info(f'{seqnum}')\n for amp in all_amps:\n chiprob = cluster_data[amp].data['CHIPROB']\n index = np.where((chiprob > chiprob_min) &\n (cluster_data[amp].data['SEQNUM'] == seqnum))\n dn = cluster_data[amp].data['DN'][index]\n fitter = Fe55GainFitter(dn)\n fitter.fit()\n det_names.append(det_name)\n amps.append(amp)\n gains.append(fitter.gain)\n seq_nums.append(seqnum)\n return pd.DataFrame({'det_name': det_names, 'amp': amps,\n 'gain': gains, 'seqnum': seq_nums})", "def cmu(df, mu, alphamu=0.0, alphacov=2.0):\r\n c = alphacov * (alphamu + mu - 2 + 1/mu) / ((N + 2)**2 + alphacov * mu / 2)\r\n # c = alphacov * (alphamu + mu - 2 + 1/mu) / (2 * (N + 2)**1.5 + alphacov * mu / 2)\r\n # print 'cmu =', c\r\n return c", "def performpca(df, nb_pc=5):\n # Remove uncomplete series\n print(df.shape)\n normalized=(df-df.mean())/df.std()\n # normalized.plot()\n # plt.show()\n pca = PCA(nb_pc)\n pca.fit(normalized)\n return pca, normalized", "def get_all_guides_that_cut_in_cds(self,pam, seq_len_around_cut,\n min_mut_pos_in_guide, max_mut_pos_in_guide,\n excluded_seqs, mapping_cmd, sort_by = '5to3'):\n \n ordered_lefts = self.cds_lefts\n ordered_rights = self.cds_rights\n if (self.is_neg_strand()):\n ordered_lefts = ordered_lefts[::-1]\n ordered_rights = ordered_rights[::-1]\n \n ######\n # search positive strand for pam\n ######\n cur_cds_nt_start = 0\n exon_num = 0\n guides0_chr_pos = np.empty(0,dtype=int)\n guides_cut_chr_pos = np.empty(0,dtype=int)\n guides_cut_gene_dna_pos = np.empty(0,dtype=int)\n guides_exon_num = np.empty(0,dtype=int)\n\n for left,right in zip(ordered_lefts,ordered_rights):\n \n # cut is to the right of the nucleotide\n cur_left_for_pam = left + (self.CRISPR_CUT_INDEX + len(pam) - 1) + (1 * self.is_neg_strand()) \n cur_right_for_pam = right + (self.CRISPR_CUT_INDEX + len(pam) - 1) + (1 * self.is_neg_strand()) \n \n\n \n seq = self.genome_seq[self.chrom].seq[cur_left_for_pam:cur_right_for_pam]\n \n # returns a list of all the positions in that cut in cds\n cur_pam_dists = np.array([m.start() for m in re.finditer(\"(?=\"+pam+\")\", str(seq))])\n \n # removing guides that are not entirely in the CDS\n if ( (not np.isnan(min_mut_pos_in_guide)) and (not np.isnan(max_mut_pos_in_guide)) ):\n min_mut_pos_in_guide = int(min_mut_pos_in_guide)\n max_mut_pos_in_guide = int(max_mut_pos_in_guide)\n \n cur_pam_dists = cur_pam_dists[cur_pam_dists >= (-min_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand() ) ]\n cur_pam_dists = cur_pam_dists[cur_pam_dists <= (len(seq) - 1 + len(pam) - 1 ) + ( (-max_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand()) ) ]\n \n \n cur_guides0_chr_pos = (cur_pam_dists-1) + cur_left_for_pam \n \n if (self.is_neg_strand()): # negative\n cur_guides_cut_gene_dna_pos = (len(seq)-1-cur_pam_dists) + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos - (self.CRISPR_CUT_INDEX + 1) # the cut is right of the nt\n else:\n cur_guides_cut_gene_dna_pos = cur_pam_dists + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos - self.CRISPR_CUT_INDEX # the cut is left of the nt\n \n \n cur_guides_exon_num = np.full_like(cur_guides_cut_gene_dna_pos,exon_num)\n \n \n guides0_chr_pos = np.concatenate((guides0_chr_pos,cur_guides0_chr_pos))\n guides_cut_chr_pos = np.concatenate((guides_cut_chr_pos,cur_guides_cut_chr_pos))\n guides_cut_gene_dna_pos = np.concatenate((guides_cut_gene_dna_pos,cur_guides_cut_gene_dna_pos))\n guides_exon_num = np.concatenate((guides_exon_num,cur_guides_exon_num))\n \n \n cur_cds_nt_start = cur_cds_nt_start + (right - left)\n exon_num = exon_num + 1\n \n \n pos_strand_guides_df = self.__guide_positions_to_df(pam, False, seq_len_around_cut, excluded_seqs, \\\n guides0_chr_pos, guides_cut_chr_pos, guides_cut_gene_dna_pos, guides_exon_num) \n \n ######\n # search negative strand for pam\n ######\n cur_cds_nt_start = 0\n exon_num = 0\n guides0_chr_pos = np.empty(0,dtype=int)\n guides_cut_chr_pos = np.empty(0,dtype=int)\n guides0_gene_dna_pos = np.empty(0,dtype=int)\n guides_cut_gene_dna_pos = np.empty(0,dtype=int)\n guides_exon_num = np.empty(0,dtype=int)\n \n for left,right in zip(ordered_lefts,ordered_rights):\n \n \n cur_left_for_pam = int(left) - (self.CRISPR_CUT_INDEX + len(pam)+1) + (1 * self.is_neg_strand())\n cur_right_for_pam = int(right) - (self.CRISPR_CUT_INDEX + len(pam)+1) + (1 * self.is_neg_strand())\n \n seq = self.genome_seq[self.chrom].seq[cur_left_for_pam:cur_right_for_pam]\n \n revcomp_pam = Seq(pam,generic_dna).reverse_complement()\n \n # returns a list of all the positions in that cut in cds\n cur_pam_dists = np.array([m.start() for m in re.finditer(\"(?=\"+str(revcomp_pam)+\")\", str(seq))])\n \n \n if ( (not np.isnan(min_mut_pos_in_guide)) and (not np.isnan(max_mut_pos_in_guide)) ):\n min_mut_pos_in_guide = int(min_mut_pos_in_guide)\n max_mut_pos_in_guide = int(max_mut_pos_in_guide)\n \n cur_pam_dists = cur_pam_dists[cur_pam_dists >= (-min_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand() ) ]\n cur_pam_dists = cur_pam_dists[cur_pam_dists <= (len(seq) - 1 + len(pam) - 1 ) + ( (-max_mut_pos_in_guide) - (self.CRISPR_CUT_INDEX + 1 * self.is_neg_strand()) ) ]\n \n \n \n cur_guides0_chr_pos = (cur_pam_dists+2) + cur_left_for_pam\n \n if (self.is_neg_strand()): # negative \n cur_guides_cut_gene_dna_pos = (len(seq)-1-cur_pam_dists) + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos + self.CRISPR_CUT_INDEX # the cut is right of the nt\n else: # positive\n cur_guides_cut_gene_dna_pos = cur_pam_dists + cur_cds_nt_start\n cur_guides_cut_chr_pos = cur_guides0_chr_pos + self.CRISPR_CUT_INDEX + 1 # the cut is leftot the nt\n \n \n cur_guides_exon_num = np.full_like(cur_guides_cut_gene_dna_pos,exon_num)\n \n \n guides0_chr_pos = np.concatenate((guides0_chr_pos,cur_guides0_chr_pos))\n guides_cut_chr_pos = np.concatenate((guides_cut_chr_pos,cur_guides_cut_chr_pos))\n guides_cut_gene_dna_pos = np.concatenate((guides_cut_gene_dna_pos,cur_guides_cut_gene_dna_pos))\n guides_exon_num = np.concatenate((guides_exon_num,cur_guides_exon_num))\n \n cur_cds_nt_start = cur_cds_nt_start + (right - left)\n exon_num = exon_num + 1\n \n \n neg_strand_guides_df = self.__guide_positions_to_df(pam, True, seq_len_around_cut, excluded_seqs, \\\n guides0_chr_pos, guides_cut_chr_pos, guides_cut_gene_dna_pos, guides_exon_num)\n \n \n # concating the positive and negative strands guides\n guides_df = pd.concat([pos_strand_guides_df, neg_strand_guides_df])\n \n # adding for each guide its location in the gene (5' -> 3'; fraction)\n guides_df[\"guide_cut_gene_pos_frac\"] = guides_df[\"guide_cut_gene_nt_pos\"] / guides_df[\"CDS_len_nts\"]\n\n \n # if the 'sort' method is onlyStopCodon then leave only guide that cut the stop codon\n if sort_by == 'onlyStopCodon':\n guides_df = guides_df.ix[( ( (guides_df['CDS_len_nts']).values / 3) == ( (guides_df['guide_cut_gene_aa_pos']).values + 1) ) ,:]\n \n # calculating Azimuth score\n #print \"Calculating Azimuth score\"\n guides_df = cal_azimuth_score(guides_df, output_filename_GUIDE_withScores = \"\", guides_PAMm4p3_col_name=\"guide_PAM_m4p3\")\n \n # calculating off targets\n #print \"Testing off targets\"\n guides_df = eval_guides_off_targets(guides_df, self.genome_seq, 'guide_id', 'guide_noPAM', pam, mapping_cmd)\n \n \n return (guides_df)", "def df_sample_concepts(self):\n return self.abundance_mat_mult(False)", "def calculate_piN_piS(codonseqs, method, codon_table, het=False):\n analysis = {\"seqname\": \"\", \"piN\": -1, \"piS\": -1, \"piNpiS\": -1, \"pi\": -1, \"method\":method}\n x = seqfreqs(codonseqs)\n #if 'piNpiS' in options.debug:\n # print(\"freqs are: {}\".format(x))\n # print(\"len codonseqs is: \", len(codonseqs))\n piN = 0\n piS = 0\n for i in range(len(codonseqs)):\n for j in range(i+1, len(codonseqs)):\n #print(codonseqs[i], codonseqs[j])\n if not het:\n dN, dS = cal_dn_ds(codonseqs[i], codonseqs[j], codon_table=codon_table, method=method)\n piN = piN + (x[i] * x[j] * dN)\n piS = piS + (x[i] * x[j] * dS)\n #if 'piNpiS' in options.debug:\n # print(\"{0} dN{1}{2}={3} dS{1}{2}={4}\".format(method, i, j, dN, dS))\n else:\n try:\n dN, dS = cal_dn_ds(codonseqs[i], codonseqs[j], codon_table=codon_table, method=method)\n piN = piN + (x[i] * x[j] * dN)\n piS = piS + (x[i] * x[j] * dS)\n except:\n pass\n\n analysis['piN'] = piN\n analysis['piS'] = piS\n try:\n analysis['piNpiS'] = piN/piS\n except:\n analysis['piNpiS'] = 0\n #if 'piNpiS' in options.debug:\n # print (\"{0} dN={1:.3f} dS={2:.3f} piN/piS = {3:.3f}\".format(\n # method, analysis['piN'], analysis['piS'], analysis['piNpiS']))\n\n return analysis", "def get_gbif_occs(self):\n\n # Create a file to store occurrence data.\n self.occfile = os.path.join(self.outdir, self.params['spname'].replace(\" \", \"_\") + \".csv\")\n\n # Get the usageKey for species of interest.\n self.key = species.name_backbone(name = self.params['spname'], rank = 'species')['usageKey']\n\n # Create latitude/longitude lists.\n self.lats = []\n self.lons = []\n\n # Run a while-loop to go through all observations.\n curr_offset = 0\n end_records = False\n while not end_records:\n occ_records = occ.search(taxonKey = self.key, hasCoordinate = True, \n decimalLatitude = ','.join([str(self.params['ymin']), str(self.params['ymax'])]),\n decimalLongitude = ','.join([str(self.params['xmin']), str(self.params['xmax'])]),\n offset = curr_offset\n )\n end_records = occ_records['endOfRecords']\n curr_offset += occ_records['limit']\n\n # Add latitude/longitude results to lists.\n self.lats.extend([i['decimalLatitude'] for i in occ_records['results']])\n self.lons.extend([i['decimalLongitude'] for i in occ_records['results']])\n\n # Print a dot on each cycle to show progress.\n print(\".\", end = \"\")\n\n # When end of data is reached: build pandas dataframe from lists and remove duplicate data points.\n if occ_records['endOfRecords']:\n df = pd.DataFrame({'Latitude': self.lats, 'Longitude': self.lons})\n df = df.drop_duplicates().reset_index()\n df = df.drop('index', axis = 1)\n\n # Reform the lists by subsetting the dataframe.\n self.lats = list(df['Latitude'])\n self.lons = list(df['Longitude'])\n\n # Print final number of records.\n print(f' Found {len(self.lats)} records.')\n\n # Build array to write to CSV file. np.vstack layers arrays vertically, where each layer is species-lat-lon. \n # np.repeat copies the species names as many times as there are entries. It also combines with zip() to put\n # a newline char at the end of each layer.\n csvarr = np.vstack([np.repeat(self.params['spname'].replace(\" \", \"_\"), len(self.lats)), self.lats,\n [\"{}{}\".format(a_, b_) for a_, b_ in zip(self.lons, np.repeat('\\n', len(self.lats)))]]\n ).T\n\n # Write array to CSV file.\n with open(self.occfile, 'w') as f:\n f.write('Species,Latitude,Longitude\\n')\n for line in csvarr:\n f.write(\",\".join(line))\n\n # Transform lists to arrays for downstream application.\n self.lats = np.array(self.lats)\n self.lons = np.array(self.lons)", "def codon_bgfreq(codon_seqs, data_mm):\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts", "def generate_2d_cs_plot(data, atom1=\"CA\", atom2=\"CB\", resid_li=\"\", from_frame='all', to_frame='all', soluplots=False):\n\n #Generate an empty dataframe and pop out the mean and the deviation from the CS/frame pickle\n result = pd.DataFrame()\n data.set_index(['resSeq','name'], inplace=True)\n data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)\n data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)\n\n # If frames were selected, drop also all columns were\n if from_frame != 'all':\n frames = ['resname', 'resname_s']+[ str(f) for f in range(int(from_frame), int(to_frame)+1)]\n data = data.filter(frames, axis=1)\n\n # Take all residues if none were submitted\n if not resid_li:\n resid_li = {index[0] for index in data.index.values}\n\n # Sort residue ids numerically\n resid_li = [ str(i) for i in sorted([int(x) for x in resid_li]) ]\n\n #loop over the residues selcted by the user\n for item in resid_li:\n try: \n df1 = data.loc[int(item),atom1] #select atom 1 the row from the dataframe which matches the inputs from the user\n df2 = data.loc[int(item),atom2] #select atom 2 the row from the dataframe which matches the inputs from the user\n resname = data.loc[[int(item),'CA'], 'resname'].unique()[0]\n # Option to make \"Solution NMR predictions\": make a distribution out of average and variance of our cs values, and plot it\n if soluplots:\n np1=np.array(df1[2:])\n np2=np.array(df2[2:])\n dist1 = np.random.normal(np1.mean(), np1.std()/10, len(np1))\n dist2 = np.random.normal(np2.mean(), np2.std()/10, len(np2))\n df_e1 = pd.DataFrame(data=dist1, columns=[atom1]) #Build the plotting dataframe\n df_e2 = pd.DataFrame(data=dist2, columns=[atom2])\n else:\n df_e1=df1.to_frame(name=atom1)\n df_e2=df2.to_frame(name=atom2)\n except Exception as e:\n continue\n temp_df = pd.concat([df_e1,df_e2], axis=1, join=\"inner\") #concatenate all the residues dataframe into a bigger one\n temp_df[\"IDs\"]=str(item)+' '+resname #give them different ids to have differnete colors in the plot\n result = result.append(temp_df) #build the final DF\n\n # Put atoms in avail_res_atoms dictionary (which I dont remember exactly what does but seems important)\n avail_res_atoms = {\n \"%s.%s\"%(item,atom1) : {\"display\":False,\"color\":\"#FF0000\"},\n \"%s.%s\"%(item,atom2) : {\"display\":False,\"color\":\"#FF0000\"},\n }\n\n # If there are no atoms matching this selection\n if result.empty: \n return('', ['error'])\n\n #plot\n fig = px.density_contour(result, x=atom1, y=atom2, color=\"IDs\", labels={\n atom1 : \"Chemical shift for \"+str(atom1)+\" (ppm)\",\n atom2 : \"Chemical shift for \"+str(atom2)+\" (ppm)\",\n \"IDs\": \"Residue ID\"},\n color_discrete_sequence=px.colors.qualitative.Dark24)\n # Reverse axis\n fig['layout']['yaxis']['autorange'] = \"reversed\"\n fig['layout']['xaxis']['autorange'] = \"reversed\"\n\n\n fig.update_layout(legend=dict(\n itemsizing='constant',\n itemclick='toggleothers',\n itemdoubleclick='toggle',\n ))\n\n #Skip hover info when scrolling through the plot\n fig.update_traces(hoverinfo='skip', hovertemplate=None)\n\n # Return plot\n p = pt.offline.plot(fig, include_plotlyjs=False, output_type='div')\n return(p,avail_res_atoms)", "def get_cd_samples(self):\n \n if \"PCD\" in self.algorithm:\n \n input_vars = []\n \n given_vars = []\n \n else:\n \n input_vars = [self.minibatch_set]\n \n given_vars = {self.x_gibbs: self.train_inputs[self.minibatch_set,:]} \n \n get_samples = theano.function(inputs = input_vars,\n outputs = [self.p_xi_given_x_[-1], \n self.gibbs_samples[-1]\n ], \n givens = given_vars,\n #start the chain at the data distribution\n updates = self.gibbs_updates)\n \n return get_samples", "def fetch_compas_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"compas\", \"classification\", astype=\"pandas\", preprocess=False\n )\n orig_X = pd.concat([train_X, test_X]).sort_index().astype(np.float64)\n orig_y = pd.concat([train_y, test_y]).sort_index().astype(np.float64)\n if preprocess:\n race = pd.Series(orig_X[\"race_caucasian\"] == 1, dtype=np.float64)\n dropped_X = orig_X.drop(\n labels=[\"race_african-american\", \"race_caucasian\"], axis=1\n )\n encoded_X = dropped_X.assign(race=race)\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"sex\", \"reference_group\": [1]},\n {\"feature\": \"race\", \"reference_group\": [1]},\n ],\n }\n return encoded_X, orig_y, fairness_info\n else:\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"sex\", \"reference_group\": [1]},\n {\"feature\": \"race_caucasian\", \"reference_group\": [1]},\n ],\n }\n return orig_X, orig_y, fairness_info", "def covar_samp(self):\n if self.count <= 1:\n return None\n return self.Ck / (self.count - 1)", "def pseudo_seurat(adata, arg_minpct, arg_mindiffpct, arg_logfcdiff):\n # define cells\n cluster_cells_ind = which_ind(adata.obs[\"idents\"] == \"1\")\n other_cells_ind = which_ind(adata.obs[\"idents\"] == \"0\")\n\n # compute perecentage expressed\n # from normnalised but not scaled data\n # remember cells are rows and genes are columns\n\n # note: I don't know why norm_counts[cluster_cell_ind:, col_ind] deosn\"t work, but it doesn't\n cluster_pct = (adata.X[cluster_cells_ind, :] > 0).sum(axis=0) / len(cluster_cells_ind)\n other_pct = (adata.X[other_cells_ind, :] > 0).sum(axis=0) / len(other_cells_ind)\n\n pcts = pd.DataFrame(np.vstack((cluster_pct, other_pct)).transpose())\n max_pct = pcts.max(axis=1)\n min_pct = pcts.min(axis=1)\n diff_pct = max_pct - min_pct\n take_diff_pct = diff_pct > arg_mindiffpct\n\n # remove genes that are not expressed higher than 0.1 in one of the groups\n take_min_pct = max_pct > arg_minpct\n\n\n # KEEP IN CASE NP.ARRAY METHOD USES TOO MUCH MEMORY\n # import time\n # this has the potential to be very slow. Transposeing it speeds it up a bit.\n # I need to understand sparse matrices better to make it work\n # start = time.time()\n # nct = adata.X.T[:,cluster_cells_ind]\n # cluster_mean0 = [exp_mean_sparse(nct[x,:]) for x in range(0,nct.shape[0])]\n # end = time.time()\n # print(end - start)\n #\n # start = time.time()\n # nct = adata.X.T[:, other_cells_ind]\n # other_mean0 = [exp_mean_sparse(nct[x,:]) for x in range(0, nct.shape[0])]\n # end = time.time()\n # print(end - start)\n\n # extract the counts for cluster cells and calculate exp means on each row\n nct = adata.X.T[:, cluster_cells_ind]\n cluster_mean = np.apply_along_axis(exp_mean_dense, 1, nct.todense())\n\n # likewise for non-cluster cells\n nct = adata.X.T[:, other_cells_ind]\n other_mean = np.apply_along_axis(exp_mean_dense, 1, nct.todense())\n diff_mean = abs(cluster_mean - other_mean)\n\n # remove genes with less than threshold difference\n take_thresh = diff_mean > arg_logfcdiff\n # take = if a cell passes all the tests then it is to be kept.\n take = [a and b and c for a, b, c in zip(take_thresh, take_min_pct, take_diff_pct)]\n print(\"saving universe for fisher test\")\n stats_df = pd.DataFrame(np.vstack((adata.var_names, cluster_mean, other_mean, diff_mean,\n cluster_pct, other_pct, max_pct, min_pct, diff_pct, take)).transpose(),\n columns=[\"gene\", \"cluster_mean\", \"other_mean\", \"diff_mean\",\n \"cluster_pct\", \"other_pct\",\n \"max_pct\", \"min_pct\", \"diff_pct\", \"background\"])\n return stats_df", "def get_sequence(self):\n\n search_params = [\n '|',\n ('active', '=', True),\n ('year', '=', self.year),\n ('cod_gestion.name', '!=', 'Z8_01_dl15')\n ]\n codis_gestio = self.connection.model('cir8.2021.d2').search(search_params)\n\n return codis_gestio", "def test_pmt_pos_nt():\r\n pandas.DataFrame(straxen.pmt_positions(False))", "def pcd(dw, qpts=50):\n w = w0+dw\n pcm.set_qpts(qpts)\n sml = pcm.sml_w(w)\n avgchi = pcm.avgchi\n pcm.set_qpts(0)\n sml2 = pcm.sml_w(w)\n print sml, log(sml) - pcm.offset, avgchi\n print sml2, log(sml2) - pcm.offset, pcm.avgchi", "def recommend_cosim():\n pass", "def get_covid_term() -> pd.DataFrame:\n return NOTICE_GETTER.term", "def get_pca():\n from sklearn.decomposition import PCA\n return PCA()", "def df_to_cdm(cdm, cdmd, out_dir, dataset, dic_obstab_attributes, fn): \n \n #station_id_fails = open('station_id_fail.log' , 'a') \n #station_id_ok = open('station_id_correct.log' , 'a')\n \n t=time.time()\n if not False: \n # era5 analysis feedback is read from compressed netcdf files era5.conv._?????.nc.gz in $RSCRATCH/era5/odbs/1\n \"\"\" Reading the odb and convert to xarray \"\"\" \n if 'bufr' in dataset :\n df, stations_id= bufr_to_dataframe(fn) # fdbs: the xarray converted from the pandas dataframe \n \n elif 'ncar' in dataset:\n df, stations_id = uadb_ascii_to_dataframe(fn)\n \n elif 'igra2' in dataset:\n df, stations_id = igra2_ascii_to_dataframe(fn)\n \n else:\n #print('Unidentified file is: ', fn)\n raise ValueError('Cannot identify the type of file to be analized!!! ')\n \n station_configuration_retrieved = get_station_configuration_f( stations_id, cdm['station_configuration'] ) \n #primary_id = station_configuration_retrieved['primary_id'].values[0].decode('utf-8') \n try:\n primary_id = station_configuration_retrieved['primary_id'].values[0].decode('utf-8')\n except:\n #print('CANT FIND STATION PRIMARY ID ')\n out =open(dataset + \"_wrong_ids.txt\" , 'a+')\n out.write(fn + '\\n')\n \n primary_id = '-1'\n \n fno, source_file = initialize_output(fn, output_dir, primary_id, dataset) \n #if primary_id == 'uknown_primary':\n # return \n \n \"\"\" Casting the original variable types to appropriate numpy types \"\"\" \n #df = convert_variable_type(df) # ->think this is overly complicated. \n df = convert_variable_type_n(df)\n #df = df.replace( -2147483648 , np.nan ) \n \n \n \"\"\" Extract the unique indices of each date observation, one for only dates, one for date_time (i.e. record index). \n Converts the time variables in seconds since 1900-01-01 00:00:00 \"\"\" \n di=xr.Dataset() \n \n \n if 'igra2' in dataset:\n releasetime_toseconds = datetime_toseconds( df['report_timestamp'] ) \n df['report_timestamp'] = releasetime_toseconds # will fill the header_table TODO see if it improves by replacing values with dictionaries in pandas \n \n record_timestamp_seconds = datetime_toseconds( df['record_timestamp'] ) # will fill the header_table TODO see if it improves by replacing values with dictionaries in pandas \n df['record_timestamp'] = record_timestamp_seconds # replacing with seconds from 1900-01-01 00:00:00 \n\n indices, day, counts = make_datetime_indices( df['iday'].values ) #only date information\n di['dateindex'] = ( { 'dateindex' : day.shape } , indices ) \n \n indices, date_times , counts = make_datetime_indices( df['record_timestamp'].values ) #date_time plus indices \n di['recordindex'] = ( {'recordindex' : indices.shape }, indices )\n di['recordtimestamp'] = ( {'recordtimestamp' : date_times.shape }, date_times )\n\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n di.to_netcdf( fno, format='netCDF4', engine='h5netcdf', mode='w' )\n \n \n \n \"\"\" Storing the variable encodings \"\"\"\n fbencodings={}\n for d,v in df.items(): \n if v.dtype==numpy.dtype('float64'):\n fbencodings[d]={'dtype':numpy.dtype('float32'), 'compression': 'gzip'} \n \n elif v.dtype==numpy.dtype('float32'):\n fbencodings[d]={'dtype':numpy.dtype('float32'), 'compression': 'gzip'} \n \n elif v.dtype==numpy.dtype('int32'):\n fbencodings[d]={'dtype':numpy.dtype('int32'), 'compression': 'gzip'} \n \n elif v.dtype==numpy.dtype('int64'):\n fbencodings[d]={'dtype':numpy.dtype('int64'), 'compression': 'gzip'} \n \n elif type(v.values[0])==bytes:\n fbencodings[d]={'compression': 'gzip', 'chunksizes': ( min( [10000,v.shape[0] ] ), 10 ) }#,'chunksizes':(10000,10)\n else:\n fbencodings[d]={'compression': 'gzip'}\n fbencodings['index']={'compression': 'gzip'}\n \n \n #write_dict_h5(fno, df, 'era5fb', fbencodings, var_selection=[],mode='a')\n dcols=[]\n for d in df.columns:\n if d not in ['date@hdr','time@hdr','statid@hdr','vertco_reference_1@body','varno@body', 'lon@hdr','lat@hdr','seqno@hdr',\n 'obsvalue@body','fg_depar@body','an_depar@body','biascorr@body','sonde_type@conv', 'record_timestamp' , 'report_timestamp',\n 'observation_id', 'report_id' , 'units' , 'vertco_type@body' ] :\n \n dcols.append(d)\n df.drop(columns=dcols,inplace=True)\n \n groups={}\n groupencodings={}\n for k in cdmd.keys(): # loop over all the table definitions \n if k in ('observations_table'):\n pass #groups[k]=pd.DataFrame()\n else:\n groups[k]=xr.Dataset() # create an xarray\n groupencodings[k]={} # create a dict of group econding\n \n for i in range(len(cdmd[k])):\n d=cdmd[k].iloc[i] \n \n \"\"\" Filling the observations_table \"\"\"\n if k in ('observations_table'):\n groups[k]=pd.DataFrame() # creating dataframes that will be written to netcdf via h5py methods by the write_dict_h5() method \n \n try: \n groups[k][d.element_name]= fromfb_l(df, di._variables, cdmfb_noodb[d.element_name], ttrans(d.kind,kinds=okinds)) \n except KeyError:\n x=numpy.zeros( df['record_timestamp'].shape[0], dtype=numpy.dtype(ttrans(d.kind,kinds=okinds) ) )\n x.fill(numpy.nan)\n groups[k][d.element_name]=x\n\n \n elif k in ('header_table'):\n if d.element_name == 'report_timestamp':\n groups[k][d.element_name]= ( {'hdrlen':di['recordindex'].shape[0]} , np.take(df[d.element_name], di['recordindex'] ) )\n groups[k][d.element_name].attrs['units'] = 'seconds since 1900-01-01 00:00:00'\n elif d.element_name == 'record_timestamp':\n groups[k][d.element_name]= ( {'hdrlen':di['recordindex'].shape[0]} , np.take(df[d.element_name], di['recordindex'] ) )\n groups[k][d.element_name].attrs['units'] = 'seconds since 1900-01-01 00:00:00' \n else:\n try:\n \n if d.element_name not in station_configuration_retrieved.columns: # variables might be from the df (input file) or from the retrieved station configuration \n \n try: \n # groups[k][d.element_name] = ({'hdrlen':di['recordindex'].shape[0]}, hdrfromfb(df,di._variables, cdmfb[d.element_name],ttrans(d.kind,kinds=gkinds) ) )\n groups[k][d.element_name]= (di['recordindex'].shape[0], hdrfromfb(df, di._variables, cdmfb_noodb[d.element_name], ttrans(d.kind,kinds=gkinds) ) )\n except: \n x=numpy.zeros(di['recordindex'].shape[0], dtype=numpy.dtype(ttrans(d.kind,kinds=gkinds)))\n x.fill(numpy.nan)\n groups[k][d.element_name]=({'hdrlen':di['recordindex'].shape[0]},x) \n \n else: \n x=numpy.zeros(di['recordindex'].shape[0], dtype=numpy.dtype(ttrans(d.kind,kinds=gkinds)))\n x.fill( station_configuration_retrieved[d.element_name].values[0] )\n groups[k][d.element_name]= ({'hdrlen':di['recordindex'].shape[0]},x) \n except: # in case I cannot retrieve the station configuration file \n try: \n groups[k][d.element_name]=(di['recordindex'].shape[0], hdrfromfb(df, di._variables, cdmfb_noodb[d.element_name], ttrans(d.kind,kinds=gkinds) ) )\n except: \n x=numpy.zeros(di['recordindex'].shape[0], dtype=numpy.dtype(ttrans(d.kind,kinds=gkinds)))\n x.fill(numpy.nan)\n groups[k][d.element_name]=({'hdrlen':di['recordindex'].shape[0]},x) \n \n elif k in ('station_configuration'): # station_configurationt contains info of all the stations, so this extracts only the one line for the wanted station with the numpy.where\n try: # case when the station+conf cannot be retrieved \n if d.element_name in station_configuration_retrieved.columns: \n groups[k][d.element_name]=({'hdrlen': 1}, np.full( 1 , station_configuration_retrieved[d.element_name].values[0] ) )\n except:\n pass\n \n elif k in ('source_configuration'): # storing the source configuration info, e.g. original file name, \n if d.element_name=='source_file':\n # groups[k][d.element_name] = ( {'hdrlen':fbds.variables['date@hdr'].shape[0] } , np.full( fbds.variables['date@hdr'].shape[0] , source_file ) ) \n groups[k][d.element_name] = ( {'hdrlen': 1 }, np.full( 1 , source_file) )\n else:\n try: \n groups[k][d.element_name] = ( {'hdrlen': 1 }, np.full( 1 , np.nan) ) # element_name is the netcdf variable name, which is the column name of the cdm table k \n except KeyError:\n pass\n\n else : # this is the case where the cdm tables DO exist\n try: \n groups[k][d.element_name]=({k+'_len':len(cdm[k])}, cdm[k][d.element_name].values) # element_name is the netcdf variable name, which is the column name of the cdm table k \n except KeyError:\n pass\n \n \n \"\"\" Tryin to add attributes, e.g. description and external tables \"\"\" \n try:\n groups[k][d.element_name].attrs['external_table']=d.external_table # defining variable attributes that point to other tables (3rd and 4th columns)\n groups[k][d.element_name].attrs['description']=d.description # it faisl when trying with the observations_table \n except:\n pass\n \n try: \n if type(groups[k][d.element_name].values[0])==str:\n s=groups[k][d.element_name].values.shape\n groupencodings[k][d.element_name]={'dtype':numpy.dtype('S80'),'compression': 'gzip','chunksizes':(min(100000,s[0]),80)}\n else:\n groupencodings[k][d.element_name]={'compression': 'gzip'}\n \n if k in ('observations_table'):\n write_dict_h5(fno, groups[k], k, groupencodings[k], var_selection=[],mode='a', attrs= dic_obstab_attributes )\n \n except:\n #print('bad:',k,d.element_name)\n pass\n \n for k in groups.keys(): \n if k not in ('observations_table') : \n groups[k].to_netcdf(fno,format='netCDF4',engine='h5netcdf',encoding=groupencodings[k],group=k,mode='a') #\n \n del df\n \n return 0", "def test_pmt_pos_1t():\r\n pandas.DataFrame(straxen.pmt_positions(True))", "def get_dysbiosis_metrics(diseases, datasets, df, pthresh, samplesizes,\n overall=None):\n\n # Keep only OTUs which were signficant in at least one study\n # if x is zero, this returns zero (i.e. if there is no effect, it\n # doesn't count as significant so don't worry)\n sigmap = lambda x: np.sign(x) if abs(x) < pthresh else 0\n # This one is for the score-based metric\n simplesigmap = lambda x: np.sign(x) if abs(x) < pthresh else 0.5*np.sign(x)\n\n results = [[],[],[],[]]\n\n for dis in diseases:\n print(dis)\n ## Prepare subset df\n keep_datasets = [i for i in datasets if i.startswith(dis + '_')]\n disdf = df[keep_datasets]\n # Keep only genera which are significant in at least one study\n disdf = disdf.loc[disdf.applymap(sigmap).apply(abs).sum(axis=1) != 0]\n\n if disdf.empty:\n print('\\tempty, everything is zero')\n metrics = ['rep_score', 'rep_twostudies', 'rep_twostudies_norm',\n 'rep_stouffer', 'rep_stouffer_norm', 'n_sig', 'balance',\n 'rep_dataset', 'rep_dataset_norm']\n for metric in metrics:\n val = 0\n if metric == \"balance\" or metric == 'rep_dataset':\n val = np.nan\n if metric in ['rep_score']:\n # No genera are significant, therefore none are scored...\n pass\n elif metric in ['n_sig', 'balance', 'rep_dataset',\n 'rep_dataset_norm']:\n # Dataset-wise results, each disdf column gets its own label\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[val]*disdf.shape[1],\n newvariables=disdf.columns.tolist(),\n newmetric_label=metric,\n newdisease_label=dis)\n elif metric in ['rep_twostudies', 'rep_twostudies_norm',\n 'rep_stouffer', 'rep_stouffer_norm']:\n # Disease-wise results, each disease gets just one result\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[val],\n newvariables=[dis],\n newmetric_label=metric,\n newdisease_label=dis)\n\n else:\n ## Reproducibility score: +1/-1 if significant,\n # +/- 0.5 if not significant - don't weight by sample size\n # Metric is the row sum divided by number of columns\n # (i.e. sum across datasets / number of datasets), and is\n # genus-wise\n df_simple_rep = disdf.applymap(simplesigmap)\n reproducibility = list(\n df_simple_rep.sum(axis=1)/float(df_simple_rep.shape[1]))\n\n results = update_reproducibility_df_lists(\n *results,\n newvalues=reproducibility,\n newvariables=disdf.index.tolist(),\n newmetric_label='rep_score',\n newdisease_label=dis)\n\n ## Reproducibility co-occurence: genus is 'reproducibly significant'\n # if it's sig in at least 2 studies\n # Metric returns one number per disease)\n dfcooccur = disdf.applymap(sigmap)\n # Genus is reproducible if it is significant in the same\n # direction in at least net 2 studies\n reproducibility = sum(dfcooccur.sum(axis=1).apply(abs) > 1)\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dis],\n newmetric_label='rep_twostudies',\n newdisease_label=dis)\n\n ## Normalize co-occurence: same as above, but value is normalized by total number of sig OTUs in that disease (one number per disease)\n reproducibility = reproducibility/float(dfcooccur.shape[0])\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dis],\n newmetric_label='rep_twostudies_norm',\n newdisease_label=dis)\n\n ## Fisher's method: number of 'reproducible' genera, i.e. genera\n # with combine p-value < pthresh\n # This returns one number per disease\n reproducibility = \\\n reproducibility_from_fisher(disdf, samplesizes, pthresh)\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dis],\n newmetric_label='rep_stouffer',\n newdisease_label=dis)\n\n ## Normalized Fisher's method: same as above, but normalized\n # by total number of datasets in that disease\n reproducibility = reproducibility/float(disdf.shape[0])\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dis],\n newmetric_label='rep_stouffer_norm',\n newdisease_label=dis)\n\n ## Total number of significant OTUs\n n_otus = disdf.applymap(sigmap)\\\n .applymap(lambda x: 1 if x == 1 or x == -1 else 0)\\\n .sum()\n # index of this Series is the datasets (i.e. columns of disdf)\n results = update_reproducibility_df_lists(\n *results,\n newvalues=list(n_otus),\n newvariables=list(n_otus.index),\n newmetric_label='n_sig',\n newdisease_label=dis)\n\n ## Balance metric is number of significant disease-associated\n # (i.e. positive q-value) genera divided by total number of\n # significant genera\n n_pos = (disdf.applymap(sigmap) == 1).sum().astype(float)\n # If there are no significant OTUs, this needs to return nan,\n # otherwise the zero makes it look the same as \"all significant\n # genera are health-associated\"\n n_otus = n_otus.replace(0, np.nan)\n balance = n_pos/n_otus\n results = update_reproducibility_df_lists(\n *results,\n newvalues=list(balance),\n newvariables=list(balance.index),\n newmetric_label='balance',\n newdisease_label=dis)\n\n ## Reproducibility per dataset. For each dataset, count the\n # number of significant genera which are significant (in same dir)\n # in at least one other dataset of that disease\n for dataset in disdf:\n # Keep just the genera which are significant in the one study\n coldf = disdf[dataset].apply(sigmap)\n coldf = coldf[coldf != 0]\n # If there's at least one significant bug in that study\n if coldf.shape[0] > 0:\n coldf = disdf.loc[coldf.index].applymap(sigmap)\n # Count how many genera are significant in at least\n # two studies\n reproducibility = (coldf.sum(axis=1).apply(abs) >= 2).sum()\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dataset],\n newmetric_label='rep_dataset',\n newdisease_label=dis)\n\n # Normalize by the total number of significant bugs in\n # that dataset\n reproducibility = reproducibility/float(coldf.shape[0])\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dataset],\n newmetric_label='rep_dataset_norm',\n newdisease_label=dis)\n else:\n reproducibility = np.nan\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dataset],\n newmetric_label='rep_dataset',\n newdisease_label=dis)\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[reproducibility],\n newvariables=[dataset],\n newmetric_label='rep_dataset_norm',\n newdisease_label=dis)\n\n ## Also, if overall is given, calculate the specificity\n # (i.e. how much overlap with the \"core\" response each dataset has)\n if overall is not None:\n # Note: overall can be a pandas series or one-column dataframe\n overall_healthy = overall[(overall == -1).values].index\n overall_disease = overall[(overall == 1).values].index\n # Note: not looking at overall_mixed bc these *would* be\n # interesting disease-specific bugs! :)\n\n for dataset in disdf:\n # Keep just the genera which are significant in the study\n coldf = disdf[dataset].apply(sigmap)\n coldf = coldf[coldf != 0]\n # If there's at least one significant bug in that study\n if coldf.shape[0] > 0:\n\n healthy = coldf[coldf == -1].index\n disease = coldf[coldf == 1].index\n\n healthy_overlap = \\\n len([i for i in healthy if i in overall_healthy])\n disease_overlap = \\\n len([i for i in disease if i in overall_disease])\n total_overlap = healthy_overlap + disease_overlap\n total_sig = len(healthy) + len(disease)\n total_nonoverlap = total_sig - total_overlap\n\n # I don't think I should ever get this error...\n try:\n perc_overlap = total_overlap / float(total_sig)\n perc_nonoverlap = 1.0 - perc_overlap\n except ZeroDivisionError:\n perc_overlap = np.nan\n perc_nonoverlap = np.nan\n\n # If nothing was significant, all of these metrics are NaN\n else:\n total_overlap = np.nan\n total_nonoverlap = np.nan\n total_sig = np.nan\n perc_overlap = np.nan\n perc_nonoverlap = np.nan\n\n # Update the big results with these new metrics\n all_overlaps = [total_overlap, total_nonoverlap,\n total_sig, perc_overlap,\n perc_nonoverlap]\n all_labels = ['total_overlap', 'total_nonoverlap',\n 'total_sig', 'perc_overlap',\n 'perc_nonoverlap']\n for newvalue, newlabel in zip(all_overlaps, all_labels):\n results = update_reproducibility_df_lists(\n *results,\n newvalues=[newvalue],\n newvariables=[dataset],\n newmetric_label=newlabel,\n newdisease_label=dis)\n\n df_results = pd.DataFrame(data=np.column_stack(results),\n columns=['value', 'label', 'metric', 'disease'])\n return df_results", "def test_codon_usage_ecoli(self):\n CAI = CodonAdaptationIndex()\n self.assertEqual(\"%0.5f\" % CAI.cai_for_gene(\"ATGCGTATCGATCGCGATACGATTAGGCGGATG\"),\n \"0.09978\")", "def get_cod_freq(gene):\r\n header = gene.iloc[:,0].values[0].split(' ')\r\n geneID=header[0][1:]\r\n\r\n\r\n #get coding sequence\r\n cds = gene.iloc[:,1].values[0].upper().replace('T','U')\r\n codon_count=dict() \r\n \r\n #build dictionary to accumulate codon counts; ignore with stop codons\r\n for codon in list(codon_aa.keys()):\r\n if codon not in [ \"UAA\",\"UAG\", \"UGA\" ]:\r\n codon_count[codon]=0\r\n \r\n ##count codons in cds\r\n codons = []\r\n for c in range(0,len(cds),3): #O(len cds)\r\n cod=cds[c:c+3]\r\n try:\r\n codon_count[cod]+=1\r\n except KeyError:\r\n continue\r\n \r\n #store the fractional freq of each codon in the codon dictionary\r\n total_cod=len(cds)/3 #total number of codons in the cds\r\n for c in list(codon_count.keys()): #O(len codondict)\r\n codon_count[c]/=total_cod\r\n \r\n df_codcnt=pd.DataFrame(list(codon_count.items()) )\r\n df_codcnt.columns=['Codon', 'Fractional_Freq']\r\n df_codcnt=df_codcnt.set_index('Codon').T.reset_index(drop=True)\r\n \r\n df_codcnt['GeneID']=geneID\r\n\t#reorder columns\r\n cols2=[df_codcnt.columns[-1]]+sorted(df_codcnt.columns[:61])\r\n df_codcnt=df_codcnt[cols2]\r\n return df_codcnt", "def samsemPlots41and42(samsem_data,path,dict):\n coldef_type = dict['coldef_type']\n print(\"Starting SAMSEM_RES#41+42: Computing of Chi2 for simulation methods of \" + str(settings.id2ColDefLong[coldef_type])+\".\")\n \n if 'subfolder' in dict:\n subfolder = dict['subfolder']\n path_res = os.path.join(path,subfolder)\n if not os.path.exists(path_res): os.makedirs(path_res)\n \n # Ignore dummy algorithm\n #whatArr_tmp = [['sim_id',operator.ne,99]];howArr_tmp=[]\n #samsem_data = organizeArray(samsem_data,whatArr_tmp,howArr_tmp)\n \n corrArr = []; uncorrArr = []; data = {}; sim_names = []; chi2_pandas = {}\n \n sim_ids = sorted(set(samsem_data['sim_id'].values.astype(int)))\n for sim_id in sim_ids:\n sim_name_tmp = settings.id2Sim[sim_id]\n sim_names.append(sim_name_tmp)\n if (sim_id !=3) and (sim_id != 99):\n whatArr_tmp = [['coldef_type',operator.eq,coldef_type],['observer_coldef_type',operator.eq,coldef_type],['sim_id',operator.eq,sim_id]];howArr_tmp=[]\n else:\n whatArr_tmp = [['observer_coldef_type',operator.eq,coldef_type],['sim_id',operator.eq,sim_id]]\n alg_data_tmp = organizeArray(samsem_data,whatArr_tmp)\n \n #pandas_dict\n chi2_pandas.update({sim_name_tmp: alg_data_tmp})\n \n # Make Chi2 contingency test\n obs_array, obs_pandas = preparePandas4Chi2(chi2_pandas,{0: 'vienot', 1: 'vienot-adjusted', 2: 'kotera', 3: 'brettel', 4: 'dummy'})\n \n start = 0; end = 4\n obs_adj = obs_array[:,start:end]\n chi2, p, dof, ex = stats.chi2_contingency(obs_adj) # Compare only simulation methods\n \n res_str = \"\"\n res_str = res_str + \"Simulation methods and observations:\\n\" + str(obs_pandas)\n res_str = res_str + \"\\n\\nSimulation methods included in test:\\n\" + str(sim_names[start:end])\n res_str = res_str + \"\\nChi2: %f, p-value: %E, dof: %i, expect: \" % (chi2, p, dof) + \"\\n\"+str(ex)\n text_file = open(os.path.join(path_res,settings.id2ColDefLong[coldef_type]+\"-methods-ACC_pearson-chi2-contingency-test_p-value.txt\"), \"w+\")\n text_file.write(res_str)\n text_file.close()\n \n writePandastoLatex(obs_pandas, os.path.join(path_res,settings.id2ColDefLong[coldef_type]+\"-methods-ACC_observations.tex\"))\n \n # Make Chi2 contingency 2x2 test matrix\n dict.update({'filename': dict['filename']+'-ACC'})\n makePearsonChi2Contingency2x2Test(obs_array, path_res, sim_names, dict)", "def annotate_ISM(data_df, REFERENCE, position_list, reference_genbank_name=\"data/covid-19-genbank.gb\"):\n seq_list = data_df['sequence'].values.tolist()\n \n seq_index = []\n index = 0\n for base in REFERENCE[1]:\n if base == '-':\n seq_index.append(index)\n else:\n index += 1\n seq_index.append(index)\n reference_local_index_map = np.array(seq_index)\n mapped_reference_index = []\n for index, entropy in position_list:\n mapped_reference_index.append((index, reference_local_index_map[index], entropy))\n REFERENCE_ISM = ''.join([REFERENCE[1][item[0]] for item in position_list])\n logging.info('Reference ISM: {}.'.format(REFERENCE_ISM))\n \n gene_dict = load_gene_dict(reference_genbank_name)\n reference_raw = REFERENCE[1].replace('-', '')\n res = OrderedDict()\n res['Ref position'] = []\n res['Entropy'] = []\n res['Gene'] = []\n res['Is silent'] = []\n res['AA position'] = []\n for align_index, ref_index, entropy in mapped_reference_index:\n codon, codon_idx, name, codon_pos = find_SNP(ref_index, gene_dict, reference_raw)\n base_freq = Counter([item[align_index] for item in seq_list]).most_common()\n for alt_base, count in base_freq:\n if alt_base != reference_raw[ref_index-1]:\n break\n if codon is None:\n if_silence = True\n else:\n alt_codon = list(codon)\n alt_codon[codon_idx] = alt_base\n alt_codon = ''.join(alt_codon)\n ref_aa = translate(codon)\n ism_aa = translate(alt_codon)\n if ref_aa == ism_aa:\n if_silence = True\n else:\n if_silence = False\n res['Ref position'].append(ref_index)\n res['Entropy'].append(entropy)\n if name is None:\n name = 'Non-coding'\n res['Gene'].append(name)\n res['Is silent'].append(if_silence)\n if codon_pos is None:\n res['AA position'].append('NaN')\n else:\n res['AA position'].append('{}{}{}'.format(ref_aa, codon_pos, ism_aa))\n annotation_df = pd.DataFrame.from_dict(res)\n return annotation_df", "def main(n_pairs, cutoff, resDir, matFile, msa_name=None):\n dca_df = run_analysis(msa_name, n_pairs, cutoff, resDir, matFile, fni=False, plot=False)\n return dca_df", "def crs(self):\n return self.dataframe.crs", "def make_prog(self):\r\n\r\n self.cnv.clear()\r\n cdf = self.df[self.df.L != 0]\r\n c0 = cdf['C0'].value_counts().idxmax()\r\n c1 = cdf['C1'].value_counts().idxmax()\r\n c2 = cdf['C2'].value_counts().idxmax()\r\n c3 = cdf['C3'].value_counts().idxmax()\r\n self.cnv.extend([c0, c1, c2, c3])", "def __call__(self, q):\n # SASCalculator ignores the scale, so we add it in here\n yout = BasePDFGenerator.__call__(self, q)\n yout *= self.scale.value\n return yout", "def get_mod_freq_two_step(df, cols, chr_pos, strains, method=\"GMM+eIF\", clf_name=\"GMM\", \n clf=GaussianMixture(n_components=4, random_state=0), \n clf2_name=\"eIF\", clf2=iso_new.iForest(random_state=0), \n OFFSET=None):\n results = []\n for cp in chr_pos:\n _df = df.loc[(df[\"chr_pos\"]==cp)&(df.Strain.isin(strains)), cols+[\"Strain\"]]\n _X = min_max_norm(_df[cols].to_numpy().astype(\"float\"))\n # get clusters from GMM using only SIGNAL INTENSITY\n clusters = clf.fit_predict(_X) #[:,:3]\n c2i = Counter(clusters)#; print(c2i)\n # get outliers using every cluster as training sset\n mod_freqs = np.zeros((len(c2i), len(strains)))\n mod_freqs1 = np.zeros_like(mod_freqs)\n for cl in list(c2i.keys())[:3]:\n Xtrain = _X[clusters==cl]\n if len(Xtrain)<3: continue # this is arbitrary value\n scores = clf2.fit(Xtrain).score_samples(_X)\n offset = (max(scores)-min(scores))/2 if not OFFSET else OFFSET\n y_pred = scores>offset\n # get mod_freq from outlier score cut-off\n mod_freqs1[cl] = [y_pred[_df[\"Strain\"]==s].mean() for s in strains]\n # and using quantile method\n mod_freqs[cl] = get_modfreq_from_quantiles_many_samples([scores[_df[\"Strain\"]==s] for s in strains])\n\n # pick cluster that gave the largest difference in mod_freq between any two samples\n extremes = np.vstack([np.nanmin(mod_freqs, axis=1), np.nanmax(mod_freqs, axis=1)])\n mod_freq_idx = np.abs(np.diff(extremes, axis=0)).argmax()#; print(mod_freq_idx)\n # and report\n #results.append((cp, \"%s+%s_c\"%(clf_name, clf2_name), *mod_freqs1[mod_freq_idx], \n # \", \".join(map(str, strains[1:]))))\n results.append((cp, method, *mod_freqs[mod_freq_idx], \", \".join(map(str, strains[1:]))))\n return results", "def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data", "def get_ccdf(degseq):\n uniques, counts = np.unique(degseq, return_counts=True)\n cumprob = np.cumsum(counts).astype(np.double) / (degseq.size)\n return uniques[::-1], (1. - cumprob)[::-1]", "def extract_scc(data, N_max):\n\n # copy scalar coupling constants\n scc = data[['molecule_name', 'scalar_coupling_constant']].copy()\n\n # write all scc of a molecule into one line\n scc = scc.groupby('molecule_name').apply(lambda x: x.values.reshape(-1))\n\n # pad with zeros\n scc_list = []\n for i in range(len(scc)):\n n = N_max*2 - len(scc[i])\n scc_list.append(np.pad(scc[i], (0, n), 'constant',\n constant_values=(0, np.nan)))\n\n # build np array and remove molecule names\n scc = np.vstack(scc_list)\n del scc_list\n scc = np.delete(scc, [2*i for i in range(N_max)], 1)\n\n # convert to array and return\n return scc.astype(float)", "def tocsc(self):\n return self.tocsr().tocsc()", "def proc_dataset_v1(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.log1p(Y) - np.log1p(X)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes.mat', data)\n return T, E, M, data", "def get_covariates_df(dataset_name: str) -> pd.DataFrame:\n path = Path(dataset_name) / COVARIATES_FILE\n return get_dataframe(path)", "def pca_pubdev_4167_OOM():\n h2o.remove_all()\n transform_types = [\"NONE\", \"STANDARDIZE\", \"NORMALIZE\", \"DEMEAN\", \"DESCALE\"] # make sure we check all tranforms\n transformN = transform_types[randint(0, len(transform_types)-1)]\n print(\"transform used on dataset is {0}.\\n\".format(transformN))\n\n training_data = h2o.import_file(path=pyunit_utils.locate(\"/Users/wendycwong/gitBackup/SDatasets/pubdev_4167_Avkash/m120K.tar\")) # Nidhi: import may not work\n\n gramSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN)\n gramSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n powerSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN, pca_method=\"Power\")\n powerSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n # compare singular values and stuff between power and GramSVD methods\n print(\"@@@@@@ Comparing eigenvalues between GramSVD and Power...\\n\")\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"importance\"],\n powerSVDPCA._model_json[\"output\"][\"importance\"],\n [\"Standard deviation\", \"Cumulative Proportion\", \"Cumulative Proportion\"],\n tolerance=1e-5, check_all=False)\n print(\"@@@@@@ Comparing eigenvectors between GramSVD and Power...\\n\")\n # compare singular vectors\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"names\"], tolerance=1e-1,\n check_sign=True)", "def _find_cusps(self):\n N = self.level()\n s = []\n\n for d in arith.divisors(N):\n w = arith.gcd(d, N//d)\n if w == 1:\n if d == 1:\n s.append(Cusp(1,0))\n elif d == N:\n s.append(Cusp(0,1))\n else:\n s.append(Cusp(1,d))\n else:\n for a in range(1, w):\n if arith.gcd(a, w) == 1:\n while arith.gcd(a, d//w) != 1:\n a += w\n s.append(Cusp(a,d))\n return sorted(s)", "def run_sampler(query, nc_lcs=1000, store_path='../data/lc_output/'):\n\n # Unpack & load cadences\n sim_table = generator.simulation.load_table() #complete simulation table\n sim_table_pointing = generator.fetch_cadence_info(sim_table, query['coordinates'][0], query['coordinates'][1], filter_band=query['sample_bands'])\n\n for i in tqdm(range(nc_lcs)):\n sim_table_gen = generator.simple_mjd_sampler(sim_table_pointing, time_separation=query['survey_duration'], mode='random') # should have all the epochs of observation withiin that time frame\n #phase = sim_table_gen['mjd'] - sim_table_gen['mjd'][0] # starting from the first detection\n\n return calc_lc_band(sim_table_gen)\n #Select each filter?\n\n\n\n\n # TODO: Clean\n #model_M = np.zeros(shape=(5000,6))\n #lim_5s = np.zeros(shape=(5000,6)) # no more than >1000 pointings per field (set to 5k to be safe); mark as nan the empty ones\n #snr = np.zeros(shape=(5000,6))\n #sigma_band = np.zeros(shape=(5000,6))\n #for index, filter in tqdm(enumerate(lsst_bands)):\n # model_M[0:len(phase[sim_table_gen['filter']==filter]), index] = models.photometricmodel(phase[sim_table_gen['filter']==filter]).fourier_cos(*query['model_theta'][index])\n # model_M[:,index][model_M[:,index]==0] = np.inf # mask as infinity TODO -- numpy mask instead", "def get_unit_comp(nsys):\n num_species = nsys.natm_per_species()\n gcd = np.gcd.reduce(num_species)\n unum_sp = [ n/gcd for n in num_species ]\n return gcd,unum_sp", "def main():\n\tdb, cursor = connect()\n\t#chroms = ['1','22']\n\t#chroms = ['2','21']\n\t#chroms = ['3','20']\n\t#chroms = ['4','19']\n\t#chroms = ['5','18']\n\t#chroms = ['6','17']\n\t#chroms = ['7','16']\n\t#chroms = ['8','15']\n\t#chroms = ['9','14']\n\t#chroms = ['10','13']\n\tchroms = ['11','12']\n\t#chroms = [str(i) for i in range(10,23)]\n\t#chroms = ['X','Y']\n\tchroms.reverse()\n\tfor chrom in chroms:\n\t\tt0 = time()\n\t\ttable = \"gnomad_freqs_chr_\" + chrom\n\t\tprint\n\t\tprint \"*\"*20\n\t\tprint table\n\t\tprint \"number of variants:\", search_db(cursor, \"select count(1) from %s\" % table)[0][0]\n\t\tqry = \"select count(1) from %s \" % table\n\t\tqry += \"where char_length(reference)=1 and char_length(variant)=1\"\n\t\tprint \"simple SNPs\", search_db(cursor, qry)[0][0]\n\n\t\tcandidates, long_vars_ct = find_complex_variants(cursor, table)\n\t\tprint\n\t\tprint \"Complex variants with reference<30:\", len(candidates),\n\t\tprint \" long variants: \", long_vars_ct\n\n\t\tclusters = find_clusters_of_candidates(candidates)\n\t\tprint\n\t\tprint \"Done clustering. Max pos:\", max([cluster[0][0] for cluster in clusters])\n\t\tprint \"Number of hotspot regions:\", len(clusters)\n\n\n\t\tnumber_of_vars_in_clusters = 0\n\t\tnumber_of_clusters_with_periodic_motifs = 0\n\t\tfor cluster in clusters:\n\t\t\t# no varaints: cluster is just the number of positions here, not the number of\n\t\t\t# vars repoted for each\n\t\t\t[start,end, number_of_variants] = characterize_region(cluster)\n\t\t\tif number_of_variants<2: continue\n\t\t\tnumber_of_vars_in_clusters += number_of_variants\n\t\t\tfixed_fields = {'chrom':chrom, 'start':start, 'end':end}\n\t\t\tstore_without_checking(cursor, 'gnomad_hotspots', fixed_fields)\n\t\tprint\n\t\tprint \"Number of variants with clusters:\", number_of_vars_in_clusters\n\t\tprint \"Number of clusters with periodic motifs:\", number_of_clusters_with_periodic_motifs\n\t\tprint\n\t\tprint \"time taken %.2f min\" % ((time() - t0) / 60.0)\n\t\tprint\n\tcursor.close()\n\tdb.close()\n\n\treturn", "def plot_bias(clf_list = ['test_small','rt_small','test2_small'],return_df = False,XKCD = False):\n if XKCD = True:\n plt.xkcd()\n print('damn')\n df = load_all_dfs(clf_list)\n df = df.swaplevel(0,1)\n del df['std']\n df.hist()\n plt.figure()\n\n for clf in clf_list:\n df.ix[clf].mean().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('mean')\n plt.figure()\n \n # c = df.columns\n for clf in clf_list:\n #df[c[1:]].ix[clf].max().plot(label = clf,figsize=(16, 4))\n df.ix[clf].max().plot(label = clf,figsize=(16, 4))\n plt.legend(loc='upper right')\n plt.title('max')\n \n plt.figure()\n for clf in clf_list:\n df.ix[clf].std().plot(label = clf,figsize=(16, 4))\n\n \n plt.legend(loc='upper right')\n plt.title('std')\n plt.figure()\n used_list = []\n for clf in clf_list:\n for clf2 in clf_list:\n if (clf != clf2) and ({clf,clf2} not in used_list):\n diff = ((df.ix[clf] - df.ix[clf2])**2)**(1/2)\n diff.mean().plot(label = clf+' - ' +clf2,figsize=(16, 4))\n used_list.append({clf,clf2})\n \n \n \n \n \n plt.legend(loc='upper right')\n plt.title('difference')\n print('damnover')\n if return_df == True:\n return df", "def df_sample_names(self):\n return self.abundance_mat_mult(True)", "def create_geneIDsDF():\n datas=data.plfam_to_matrix()\n datas.run()\n print('***Dataframe created***')", "def getCodonSeqs(self):\r\n combinations = list(self.codonTable[aa] for aa in self.peptide) # creates a list of possible codons based on AA\r\n self.allPepSeqs = list(''.join(codon) for codon in itertools.product(*combinations)) # creates list of peptides\r\n return", "def plot_qc_reads(qc_df):\n # Record NA values as 0\n qc_df = qc_df.fillna(0)#.set_index(\"sample\")\n cols = [\"sample\",\n \"num_reads\",\n \"num_mapped\",\n \"num_unique_mapped\",\n \"num_junctions\"]\n qc_df = qc_df[cols]\n melted_qc = pandas.melt(qc_df, id_vars=[\"sample\"])\n qc_r = conversion_pydataframe(melted_qc)\n labels = tuple([\"num_reads\",\n \"num_mapped\",\n \"num_unique_mapped\",\n \"num_junctions\"])\n labels = robj.StrVector(labels)\n variable_i = qc_r.names.index('variable')\n qc_r[variable_i] = robj.FactorVector(qc_r[variable_i],\n levels = labels)\n ggplot2.theme_set(ggplot2.theme_bw(12))\n scales = importr(\"scales\")\n r_opts = r.options(scipen=4)\n p = ggplot2.ggplot(qc_r) + \\\n ggplot2.geom_point(aes_string(x=\"sample\", y=\"value\")) + \\\n ggplot2.scale_y_continuous(trans=scales.log10_trans(),\n breaks=scales.trans_breaks(\"log10\",\n robj.r('function(x) 10^x')),\n labels=scales.trans_format(\"log10\",\n robj.r('math_format(10^.x)'))) + \\\n r.xlab(\"CLIP-Seq samples\") + \\\n r.ylab(\"No. reads\") + \\\n ggplot2.coord_flip() + \\\n ggplot2.facet_wrap(Formula(\"~ variable\"), ncol=1) + \\\n theme(**{\"panel.grid.major.x\": element_blank(),\n \"panel.grid.minor.x\": element_blank(),\n \"panel.grid.major.y\": theme_line(size=0.5,colour=\"grey66\",linetype=3)})\n p.plot()\n\n return\n r.par(mfrow=np.array([1,2]))\n num_samples = len(qc_df.num_reads)\n r.par(bty=\"n\", lwd=1.7, lty=2)\n r_opts = r.options(scipen=4)\n r.options(r_opts)\n r.dotchart(convert_to_r_matrix(qc_df[[\"num_reads\",\n \"num_mapped\",\n \"num_unique_mapped\"]]),\n xlab=\"No. reads\",\n lcolor=\"black\",\n pch=19,\n gcolor=\"darkblue\",\n cex=0.8)\n r.par(bty=\"n\")\n r.dotchart(convert_to_r_matrix(qc_df[[\"num_ribosub_mapped\",\n \"num_ribo\",\n \"num_junctions\"]]),\n xlab=\"No. reads\",\n lcolor=\"black\",\n pch=19,\n gcolor=\"darkblue\",\n cex=0.8)", "def convertToSpectroGram(self):", "def uCSIsCypriotSyllabary(code):\n ret = libxml2mod.xmlUCSIsCypriotSyllabary(code)\n return ret", "def notebook_01():\n\n freq_list, volt_list = las.load_freq_volt()\n\n n_steps, n_det, n_f, _ = np.shape(volt_list)\n\n #y_sym_mat_o = ds.by_sym_mat(volt_list, det_ind=0)\n #y_sym_mat_i = ds.by_sym_mat(volt_list, det_ind=1)\n\n # print(np.shape(y_sym_mat_o))\n # print(np.shape(y_sym_mat_i))\n # (mu_o, sigma_o) = stats.norm.fit(y_sym_mat_o[:,0])\n # (mu_i, sigma_i) = stats.norm.fit(y_sym_mat_i[:,0])\n # print(mu_o, sigma_o)\n # print(mu_i, sigma_i)\n # print(mu_o*89000, mu_i*89000.0, -mu_i*89000.0, -mu_o*89000.0)\n\n volt_list_sym = ds.volt_list_sym_calc(volt_list)\n\n fit_params_mat = fp.fit_params(ff.f_b_field, volt_list_sym)\n\n fit_params_mat_s = fp.fit_params(ff.f_b_field_off, volt_list_sym)\n\n # pbd.plot_bare_signal_and_fit_norm_shifted(0, volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)\n\n # pfp.plot_fit_sym_comp(volt_list_sym, fit_params_mat, fit_params_mat_s, freq_list)\n\n\n # pfp.plot_fit_sym_comp_2(volt_list_sym, fit_params_mat_s, freq_list)\n\n #pfp.plot_symmetry_along_z(volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)\n\n fp.fit_params_FH_data(ff.f_b_field)\n\n # pbd.plot_rel_diff_bare_signal_and_fit_norm_shifted(0, volt_list_sym, freq_list, fit_params_mat_s, ff.f_b_field_off)", "def coded_output(database_df, output_loc='coded_output.xlsx'):\n\n LE = LabelEncoder()\n output_df = pd.DataFrame()\n labels_ser = pd.Series()\n\n # ID no coding\n output_df[\"ID\"] = database_df[\"ID\"]\n labels_ser[\"ID\"] = \"no coding\"\n\n # Age no coding\n output_df[\"Age\"] = database_df['Age']\n labels_ser[\"Age\"] = \"no coding\"\n\n # Sex coded\n LE.fit(database_df['Sex'])\n labels_ser['Sex'] = create_key_string(list(LE.classes_))\n output_df['Sex'] = LE.transform(database_df['Sex'])\n\n # Race coded\n LE.fit(database_df['Race'])\n labels_ser['Race'] = create_key_string(list(LE.classes_))\n output_df['Race'] = LE.transform(database_df['Race'])\n\n # Smoking coded\n LE.fit(database_df['Smoking'])\n labels_ser['Smoking'] = create_key_string(list(LE.classes_))\n output_df['Smoking'] = LE.transform(database_df['Smoking'])\n\n # BMI no coding\n output_df[\"BMI\"] = database_df[\"BMI\"]\n labels_ser[\"BMI\"] = \"no coding\"\n\n # Comorbs broken into:\n # - HTN T / F\n output_df[\"has_htn\"] = database_df['Comorb'].apply(dz_is_in, args=(\"htn\",))\n labels_ser[\"has_htn\"] = \"0 = no HTN, 1 = has HTN\"\n\n # - DM\n output_df[\"has_dm\"] = database_df['Comorb'].apply(dz_is_in, args=(\"dm\",))\n labels_ser[\"has_dm\"] = \"0 = no DM, 1 = has DM\"\n\n # - Psych\n output_df[\"has_psych\"] = database_df['Comorb'].apply(dz_is_in, args=(\"psych\",))\n labels_ser[\"has_psych\"] = \"0 = no psych, 1 = has psych\"\n\n # - Renal\n output_df[\"has_ckd\"] = database_df['Comorb'].apply(dz_is_in, args=(\"ckd\",))\n labels_ser[\"has_ckd\"] = \"0 = no CKD, 1 = has CKD\"\n\n # - Heart?\n #output_df[\"has_cv\"] = database_df[\"Heart\"].apply(is_dz_free)\n #labels_ser[\"has_cv\"] = \"0 = no CV disease, 1 = has some CV disease\"\n\n output_df[\"has_cv\"] = database_df['PostDx'].apply(dz_is_in, args=(\"Cardiac\",))\n labels_ser[\"has_cv\"] = \"0 = no CV disease, 1 = has some CV disease\"\n\n # - CNS\n #output_df[\"has_cns\"] = database_df[\"CNS\"].apply(is_dz_free)\n #labels_ser[\"has_cns\"] = \"0 = no CNS disease, 1 = has some CNS disease\"\n\n output_df[\"has_cns\"] = database_df['PostDx'].apply(dz_is_in, args=(\"Neurologic\",))\n labels_ser[\"has_cns\"] = \"0 = no CNS disease, 1 = has some CNS disease\"\n\n # - Opiate\n output_df[\"has_opiate\"] = database_df['PostDx'].apply(dz_is_in, args=(\"Medication\",))\n labels_ser[\"has_opiate\"] = \"0 = no opiate, 1 = on opiates\"\n\n # AHI no coding\n output_df[\"AHI\"] = database_df[\"AHI\"]\n labels_ser[\"AHI\"] = \"no coding\"\n\n # HFrEF y/n\n output_df[\"has_hfref\"] = database_df[\"Heart\"].apply(dz_is_in, args=(\"hfref\",))\n labels_ser[\"has_hfref\"] = \"0 = no hfref, 1 = has hfref\"\n\n #TODO: HfpEF y/n? Afib y/n?\n\n # HFpEF and AF\n output_df[\"has_hfpef_and_af\"] = database_df[\"Heart\"].apply(dzs_are_in, args=(\"hfpef\", \"afib\",))\n labels_ser[\"has_hfpef_and_af\"] = \"0 = no hfpef or no afib, 1 = has both hfpef and af\"\n\n # HFrEF and AF y/n\n output_df[\"has_hfref_and_af\"] = database_df[\"Heart\"].apply(dzs_are_in, args=(\"hfref\", \"afib\",))\n labels_ser[\"has_hfref_and_af\"] = \"0 = no hfref or no afib, 1 = has both hfref and af\"\n\n # Stroke y/n\n output_df[\"has_cva\"] = database_df[\"CNS\"].apply(dz_is_in, args=(\"cva\",))\n labels_ser[\"has_cva\"] = \"0 = no cva, 1 = has had a cva\"\n\n # Dementia or neurodegen\n\n # output_df[\"has_dementia\"] = database_df[\"CNS\"].apply(dz_is_in, args=(\"dementia\",))\n # labels_ser[\"has_dementia\"] = \"0 = no dementia, 1 = has dementia\"\n # output_df[\"has_neurodegen\"] = database_df[\"CNS\"].apply(dz_is_in, args=(\"neurodegenerative\",))\n # labels_ser[\"has_neurodegen\"] = \"0 = no neurodegenerative disorder, 1 = has neurodegenerative disorder\"\n\n # then add those two together, then round 2 (= has both) back to 1 (= has any)\n output_df[\"has_dem_or_neurodegen\"] = (database_df[\"CNS\"].apply(dz_is_in, args=(\"dementia\",)) + \\\n database_df[\"CNS\"].apply(dz_is_in, args=(\"neurodegenerative\",))).replace(2, 1)\n labels_ser[\"has_dem_or_neurodegen\"] = \"0 = no dementia or neurodegen, 1 = has dementia or neurodegen\"\n\n # Dementia and (stroke or neurodegen)\n\n output_df[\"has_neurodegen_and_cva\"] = database_df[\"CNS\"].apply(dzs_are_in, args=(\"neurodegenerative\", \"cva\",))\n labels_ser[\"has_neurodegen_and_cva\"] = \"0 = no cva or no neurodegen, 1 = has both cva and neurodegen\"\n output_df[\"has_dem_and_cva\"] = database_df[\"CNS\"].apply(dzs_are_in, args=(\"dementia\", \"cva\",))\n labels_ser[\"has_dem_and_cva\"] = \"0 = no cva or no dementia, 1 = has both cva and dementia\"\n\n # then add those two together, then round 2 (= has both) back to 1 (= has any)\n output_df[\"has_dem_and_cva_or_degen\"] = (database_df[\"CNS\"].apply(dzs_are_in, args=(\"neurodegenerative\", \"cva\",)) + \\\n database_df[\"CNS\"].apply(dzs_are_in, args=(\"dementia\", \"cva\",))).replace(2,1)\n labels_ser[\"has_dem_and_cva_or_degen\"] = \"1 = has (dem and cva) or (neurodegen and cva), 0 = doesn't have those combos\"\n\n # Final Treatment\n\n # #Similarly do the same for ASV (group 0) vs. CPAP and BPAP together.\n database_df['FinalTx_coll'] = database_df.apply(collapse_final_treatment, axis=1)\n\n LE.fit(database_df['FinalTx_coll'])\n class_for_swap = list(LE.classes_) # swapped the order of the labels to be more intuitive\n class_for_swap[0], class_for_swap[1] = class_for_swap[1], class_for_swap[0]\n labels_ser['FinalTx_coll'] = create_key_string(class_for_swap) # switch labels for PAP to be 0 (not 1) and ASV to be 1 (not 0)\n\n output_df['FinalTx_coll'] = LE.transform(database_df['FinalTx_coll'])\n output_df['FinalTx_coll'] = output_df['FinalTx_coll'].apply(swap_value) #so that the labels are correct order\n\n # Collapse: percOSA/CSA as 0 and 1 (0 being >50% OSA –combine 0 and 1 groups and 1 being >50% CSA –combine the 3 and 4).\n\n database_df['PercOSA'] = database_df.apply(collapse_base_dx, axis=1)\n\n # Category of OSA\n LE.fit(database_df['PercOSA'])\n class_for_swap = list(LE.classes_) # swapped the order of the labels to be more intuitive\n labels_ser['PercOSA'] = create_key_string(class_for_swap[1:] + [class_for_swap[0]]) # Perc(entage) OSA: more descriptive term for BaseDx\n output_df['PercOSA'] = LE.transform(database_df['PercOSA'])\n output_df['PercOSA'] = output_df['PercOSA'].apply(swap_value) #so that the labels are correct order\n\n # Dx Study\n LE.fit(database_df['StudyType'])\n labels_ser['StudyType'] = create_key_string(list(LE.classes_))\n output_df['StudyType'] = LE.transform(database_df['StudyType'])\n\n output_df.to_excel(output_loc)\n labels_ser.to_excel(\"keys_\"+output_loc)\n return", "def parse_dataframes(genome_gtf, sralist):\n\n def gather_strand_by_geneID_dict(genome_gtf):\n \"\"\"\n Returns dictionary with strand orientation as values and geneIDs as Keys/\n e.g.: {'YAL012W': '+',\n 'YAL069W': '+',\n 'YAL068W-A': '+',\n \"\"\"\n strand_by_geneID_dict = {}\n with open(genome_gtf) as f: \n for line in f: \n current_line = line.split('\\t')\n if current_line[2] == \"CDS\":\n current_orf = current_line[8].split(';')[2].split()[1].strip('\\\"')\n current_strand = current_line[6]\n strand_by_geneID_dict[current_orf] = current_strand\n return strand_by_geneID_dict\n\n\n def import_scikit_data(sralist):\n \"\"\"\n Import results from scikit pipeline for all datasets contained in datsets_names.\n \"\"\"\n scikit_data_dict = {}\n for dataset in sralist:\n with open(TMP_DIR+'scikit_'+dataset+'/ALL_genes_profile_dict.json', 'r') as scikit_data:\n scikit_data_dict[dataset] = [json.load(scikit_data)]\n return scikit_data_dict\n\n\n def build_mat_scikit_strandOriented(sralist, scikit_data):\n \"\"\"\n Building of scikit_df based on the output of plot_ribo_density_dict.py script.\n\n C/-/reverse/complementary strand are taken into account and the profile values\n (\"codon_density_profile\", \"codon_triplet\", \"codon_AA\") are reversed. This is\n performed by adding [::-1] to C strands profile ends.\n\n Same profile values are also have their extremities trimmed out of 8 codons.\n (This is because the scikit-ribo pipeline considers 8 extra codons on each end,\n but here we are only interested in the coding sequence). This is performed by\n adding [8:-8] to profile lists ends.\n \"\"\"\n\n scikit_mat = {}\n seq_codons = {}\n seq_aa = {}\n\n for geneID in scikit_data[sralist[0]][0].keys():\n for ix, dataset in enumerate(sralist):\n\n if geneID in scikit_data[dataset][0].keys():\n current_profile = scikit_data[dataset][0].get(geneID, np.nan)\n current_ribo = current_profile[0]\n current_ribo = current_ribo[8:-8]\n N = len(sralist)\n M = len(current_ribo)\n print(geneID, M)\n\n if ix == 0:\n current_matrix = np.zeros((N,M)) * np.nan\n\n current_seq_codons = current_profile[1]\n current_seq_codons = current_seq_codons[8:-8]\n\n current_seq_aa = current_profile[2]\n current_seq_aa = current_seq_aa[8:-8]\n\n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n seq_codons[geneID] = current_seq_codons\n seq_aa[geneID] = current_seq_aa\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n seq_codons[geneID] = current_seq_codons[::-1]\n seq_aa[geneID] = current_seq_aa[::-1]\n \n \n if strand_by_geneID_dict.get(geneID, \"NA\") == \"+\":\n current_matrix[ix,:] = current_ribo\n\n elif strand_by_geneID_dict.get(geneID, \"NA\") == \"-\":\n current_matrix[ix,:] = current_ribo[::-1]\n \n if np.sum(current_matrix) > 0: \n scikit_mat[geneID] = current_matrix\n\n# scikit_df = pd.DataFrame(values_list, columns=columns_list)\n\n return scikit_mat, seq_codons, seq_aa\n\n\n def mean_norm(row):\n codon_dens_prof = row.codon_density_profile\n profile_average = np.average(codon_dens_prof)\n\n return [x/profile_average for x in codon_dens_prof]\n \n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df.apply(mean_norm, axis=1)\n #scikit_data_df[\"mean_norm_codon_density_profile\"] = scikit_data_df['mean_norm_codon_density_profile'].apply(lambda x: x[8:-8])\n\n strand_by_geneID_dict = gather_strand_by_geneID_dict(genome_gtf)\n scikit_data_dict = import_scikit_data(sralist)\n scikit_data_mat, seq_codons_dict, seq_aa_dict = build_mat_scikit_strandOriented(sralist, scikit_data_dict)\n\n with open('../data/processed/scikit_mat.pkl', 'wb') as f:\n \tpickle.dump(scikit_data_mat, f)\n\n with open('../data/processed/scikit_codonseq.pkl', 'wb') as f_seq:\n pickle.dump(seq_codons_dict, f_seq)\n \n\n return scikit_data_mat", "def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])", "def Ncen(self, m):\n pass", "def snparamdf(self):\n if self._snparamdf is None:\n self.salt2params.paramSamples.set_index('snid', inplace=True)\n self.catsimpos.galdf.set_index('snid', inplace=True)\n self._snparamdf = self.salt2params.paramSamples.join(self.catsimpos.galdf)\n return self._snparamdf", "def _qsd_l2_cx_count(self, n):\n return 9 / 16 * 4**n - 3 / 2 * 2**n", "def ADM_SM_QCD(nf):\n\n adm_qqp_qqp = np.array([[0, 0, 0, 0, 0, 12, 0, 0],\n [0, 0, 0, 0, 12, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 12],\n [0, 0, 0, 0, 0, 0, 12, 0],\n [0, 8/3, 0, 0, - 19/3, 5, 0, 0],\n [8/3, 0, 0, 0, 5, - 9, 0, 0],\n [0, 0, 0, 8/3, 0, 0, - 23/3, 5],\n [0, 0, 8/3, 0, 0, 0, 5, - 23/3]])\n\n adm_qqp_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4/3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]])\n\n adm_qpq_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 4/3]])\n\n adm_qqp_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 4/3],\n [0, 0, 0, 0, 0, 0, 0, 0]])\n\n adm_qpq_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4/3, 0]])\n\n adm_q_q = np.array([[4, 4, 0, - 28/3],\n [0, 0, 0, 44/3],\n [0, 0, 44/9, 0],\n [5/3, 13/3, 0, - 106/9]])\n\n adm_qqp_q = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 4/3],\n [0, 0, 0, 0],\n [0, 0, 4/9, 0],\n [0, 0, 0, 0]])\n\n\n adm_qpq_q = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 4/3],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 4/9, 0]])\n\n adm_q_qqp = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 8/3, 0],\n [0, 0, 0, 0, 20/9, 0, 0, 0]])\n\n adm_q_qpq = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 8/3],\n [0, 0, 0, 0, 20/9, 0, 0, 0]])\n\n adm_ud = np.hstack((adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qpq_qqpp, adm_qpq_qqpp,\\\n adm_qpq_qqpp, np.zeros((8, 24)), adm_qqp_q, adm_qpq_q, np.zeros((8,12))))\n\n adm_us = np.hstack((adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qpq_qppq, np.zeros((8,16)),\\\n adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8, 8)), adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,8))))\n\n adm_uc = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp, np.zeros((8,8)), adm_qpq_qppq,\\\n np.zeros((8,8)), adm_qpq_qppq, np.zeros((8, 8)), adm_qpq_qqpp, adm_qqp_q, np.zeros((8,8)), adm_qpq_q, np.zeros((8,4))))\n\n adm_ub = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp, np.zeros((8,16)), adm_qpq_qppq,\\\n np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq, adm_qqp_q, np.zeros((8,12)), adm_qpq_q))\n\n adm_ds = np.hstack((adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,16)), adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\\\n adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8,8)), np.zeros((8,4)), adm_qqp_q, adm_qpq_q, np.zeros((8,8))))\n\n adm_dc = np.hstack((adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq, np.zeros((8,8)), adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\\\n adm_qpq_qppq, np.zeros((8,8)), adm_qpq_qqpp, np.zeros((8,4)), adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,4))))\n\n adm_db = np.hstack((adm_qqp_qppq, np.zeros((8,16)), adm_qpq_qppq, adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\\\n np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq, np.zeros((8,4)), adm_qqp_q, np.zeros((8,8)), adm_qpq_q))\n\n adm_sc = np.hstack((np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)),\\\n adm_qqp_qqp, adm_qqp_qqpp, adm_qpq_qqpp, np.zeros((8,8)), adm_qqp_q, adm_qpq_q, np.zeros((8,4))))\n\n adm_sb = np.hstack((np.zeros((8,8)), adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq, adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\\\n adm_qqp_qqpp, adm_qqp_qqp, adm_qpq_qppq, np.zeros((8,8)), adm_qqp_q, np.zeros((8,4)), adm_qpq_q))\n\n adm_cb = np.hstack((np.zeros((8,16)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\\\n adm_qqp_qppq, adm_qpq_qppq, adm_qqp_qqp, np.zeros((8,12)), adm_qqp_q, adm_qpq_q))\n\n adm_u = np.hstack((adm_q_qqp, adm_q_qqp, adm_q_qqp, adm_q_qqp, np.zeros((4,48)), adm_q_q, np.zeros((4,16))))\n\n adm_d = np.hstack((adm_q_qpq, np.zeros((4,24)), adm_q_qqp, adm_q_qqp, adm_q_qqp, np.zeros((4,24)), np.zeros((4,4)), adm_q_q, np.zeros((4,12))))\n\n adm_s = np.hstack((np.zeros((4,8)), adm_q_qpq, np.zeros((4,16)), adm_q_qpq, np.zeros((4,16)), adm_q_qqp, adm_q_qqp, np.zeros((4,8)),\\\n np.zeros((4,8)), adm_q_q, np.zeros((4,8))))\n\n adm_c = np.hstack((np.zeros((4,16)), adm_q_qpq, np.zeros((4,16)), adm_q_qpq, np.zeros((4,8)), adm_q_qpq, np.zeros((4,8)), adm_q_qqp,\\\n np.zeros((4,12)), adm_q_q, np.zeros((4,4))))\n\n adm_b = np.hstack((np.zeros((4,24)), adm_q_qpq, np.zeros((4,16)), adm_q_qpq, np.zeros((4,8)), adm_q_qpq, adm_q_qpq, np.zeros((4,16)), adm_q_q))\n\n\n adm = np.vstack((adm_ud, adm_us, adm_uc, adm_ub, adm_ds, adm_dc, adm_db, adm_sc, adm_sb, adm_cb, adm_u, adm_d, adm_s, adm_c, adm_b))\n\n if nf == 5:\n return adm\n elif nf == 4:\n return np.delete(np.delete(adm, np.r_[np.s_[24:32], np.s_[48:56], np.s_[64:80], np.s_[96:100]], 0),\\\n np.r_[np.s_[24:32], np.s_[48:56], np.s_[64:80], np.s_[96:100]], 1)\n else:\n raise Exception(\"nf has to be 4 or 5\")", "def csi(self):\n return self.table[0, 0] / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0])", "def get_codon_arr(chromosome: Chromosome) -> np.ndarray:\n\n seq_len = len(chromosome.sequence)\n arr = np.zeros((seq_len - 2,), dtype=np.int)\n\n for f in chromosome.features:\n\n if f.type != 'CDS':\n continue\n if f.strand == '-':\n continue\n\n protein_len = (f.end - f.start) // 3\n for aa in range(protein_len):\n pos = f.start + (aa * 3) - 1 # -1 to 0-based\n arr[pos] = 1\n\n return arr", "def from_mypackage(mycosmo):\n # Cosmology provides a nice method \"mapping\", so all that needs to\n # be done here is create a dictionary of the parameters\n mapping = {}\n mapping[\"H0\"] = mycosmo.hubble_parameter\n mapping[\"Om0\"] = mycosmo.Omega_matter_initial\n ... # keep building mapping\n\n return Cosmology.from_format(\n mapping, format=\"mapping\", move_to_meta=True\n ) # extra info -> meta", "def constellaqc(denovo_groups, annotated_groups):\n known_feat = np.unique(annotated_groups.loc[:, 'group'])\n pred_group = np.unique(denovo_groups.loc[:, 'group'])\n\n scores = []\n\n for anno in known_feat:\n # anno_bool_index = annotated_groups.loc[:, 'group'] == anno\n anno_group_calls = denovo_groups.loc[annotated_groups.loc[:, 'group'] == anno, 'group'].values\n # print(anno, 'count: ', np.sum(anno_bool_index))\n score_row = []\n for denovo in pred_group:\n score_row.append(np.sum(anno_group_calls == denovo))\n scores.append(score_row)\n\n scores = pd.DataFrame(scores, index=known_feat, columns=pred_group)\n\n if params.debug is not None:\n print('Known Feature-Predicted Group Scoring Matrix:\\n')\n print(scores)\n\n anno_sum = []\n anno_no = []\n anno_error = []\n ni = []\n\n for anno in known_feat:\n anno_sum.append(np.sum(scores.loc[anno, :].values))\n anno_no.append(np.sum(scores.loc[anno, :].values != 0))\n anno_error.append(np.sum(scores.loc[anno, :].values != 0) - 1)\n ni.append(1)\n pred_sum = []\n pred_no = []\n pred_error = []\n nj = []\n\n for denovo in pred_group:\n pred_sum.append(np.sum(scores.loc[:, denovo].values))\n pred_no.append(np.sum(scores.loc[:, denovo].values != 0))\n pred_error.append(np.sum(scores.loc[:, denovo].values != 0) - 1)\n nj.append(1)\n\n anno_valid = np.array(anno_sum) - ni - np.array(anno_error)\n # pred_valid = np.array(pred_sum) - nj - np.array(pred_error)\n\n v_sum = np.sum(anno_valid)\n s_sum = np.sum(anno_error)\n c_sum = np.sum(pred_error)\n total = v_sum + s_sum + c_sum\n\n print('\\n\\nValid Call Rate: ', round(100 * (v_sum / total), 2), '%')\n print('Splitting Call Rate: ', round(100 * (s_sum / total), 2), '%')\n print('Clumping Call Rate: ', round(100 * (c_sum / total), 2), '%')", "def load_ChEMBL_kd():\n affinity = pd.read_csv('./dataset/regression/ChEMBL/Chem_Kd_nM.txt', header=None)\n target = pd.read_csv('./dataset/regression/ChEMBL/ChEMBL_Target_Sequence.txt', header=None)\n drug = pd.read_csv('./dataset/regression/ChEMBL/Chem_SMILES_only.txt', header=None)\n \n SMILES=[]\n Target=[]\n y=[]\n drugcnt=[]\n \n for i in range(len(target)):\n Target.append(target[0][i])\n y.append(affinity[0][i])\n SMILES.append(drug[0][i])\n\n aff=[]\n total=[]\n for i in range(len(target)):\n aff.insert(i, y[i].split(\" \"))\n for i in aff:\n total += i\n for i in range(len(SMILES)):\n drugcnt.insert(i, len(SMILES[i].split()))\n\n smile = []\n for segments in SMILES:\n for x in segments.split():\n smile.extend(x)\n #smile = [x for segments in SMILES for x in segments.split()]\n smiles_res=[]\n y_tmp=[]\n target_res=[]\n tmp=[]\n\n for i in range(len(drugcnt)):\n tmp.extend(repeat(Target[i], drugcnt[i]))\n for i in range(len(total)):\n if total[i] != '-1':\n y_tmp.append(total[i])\n smiles_res.append(smile[i])\n target_res.append(tmp[i])\n\n y_res = [float(i) for i in y_tmp]\n y_res = convert_y_unit(np.array(y_res), 'nM', 'p')\n return np.array(smiles_res), np.array(target_res), np.array(y_res)", "def set_lookup_qn(diagram, p_cm, p_max, gammas, skip=True, verbose=0):\n\n lookup_p = set_lookup_p(p_max, p_cm, diagram, skip)\n lookup_g = set_lookup_g(gammas, diagram)\n\n # TODO: A more elegant solution for combining lookup_p and lookup_g is welcome\n # maybe Multiindex.from_product()\n tmp = it.product(lookup_p, lookup_g)\n lookup_qn = []\n for t in tmp:\n lookup_qn.append(t[0]+t[1])\n lookup_qn = DataFrame(lookup_qn, columns=['p_{so}', 'p_{si}', '\\gamma_{so}', '\\gamma_{si}'])\n# lookup_qn['p_{so}'] = qn['p_{so}'].apply(np.array)\n# lookup_qn['p_{si}'] = qn['p_{si}'].apply(np.array)\n \n return lookup_qn", "def get_mod_freq_clf_train_test(df, cols, chr_pos, strains, train_samples, \n clf=KNeighborsClassifier(), method=\"KNN\"):\n results = []\n for cp in chr_pos:\n # train classifier using train sampels: unmod and mod\n _df = df.loc[(df[\"chr_pos\"]==cp)&(df.Strain.isin(train_samples)), cols+[\"Strain\"]]\n X_train = min_max_norm(_df[cols].to_numpy().astype(\"float\"))\n y_train = _df.Strain==train_samples[-1]\n clf.fit(X_train, y_train)\n # min-max normalisation\n _df = df.loc[(df[\"chr_pos\"]==cp)&(df.Strain.isin(strains)), cols+[\"Strain\"]]\n _X = min_max_norm(_df[cols].to_numpy().astype(\"float\"))\n # get fit and clusters\n clusters = clf.predict(_X) # this will return 0 (unmodified) and 1 (modified)\n # get modification freuqency - simply number of 1s over all for each sample\n freqs = [clusters[_df[\"Strain\"]==s].mean() for s in strains]\n results.append((cp, method, *freqs, \", \".join(map(str, strains[1:]))))\n return results", "def base_composition(reads, base):\n assert base.upper() in set(\"ACGT\")\n\n \"\"\" Reports nucelotide frequencies at each position in the\n sam sequences\n \"\"\"\n # DNA_Alphabet=[\"A\",\"C\",\"T\",\"G\",\"N\"]\n all_nucs = []\n for read in reads:\n nucs = {} # Dictionary to store nucleotide data.\n seq = read[9]\n for i in range(0, len(seq)):\n nucs[str(i + 1)] = seq[i]\n all_nucs.append(nucs)\n all_items = []\n counts = []\n for dicts in all_nucs:\n for item in dicts.items():\n all_items.append(item)\n all_items.sort(key=operator.itemgetter(0))\n groups = [map(operator.itemgetter(1), list(group))\n for key, group in itertools.groupby(\n all_items, operator.itemgetter(0))]\n for group in groups:\n counts.append(group.count(base))\n\n pos = range(1, len(seq) + 1)\n\n # Create plot.\n plt.figure(1, figsize=(8, 8))\n plt.axes([0.1, 0.1, 0.8, 0.8])\n plt.bar(pos, counts, facecolor='g')\n plt.xlabel(\"Position\")\n plt.ylabel(\"number of mapped reads\")\n plt.title(base)\n plt.show()", "def seq_gc(seq, mapped_only=True):\n if not isinstance(seq, str):\n raise ValueError(\"reformat input sequence as a str\")\n g = seq.count(\"G\")\n g += seq.count(\"g\")\n c = seq.count(\"C\")\n c += seq.count(\"c\")\n nbases = len(seq)\n if mapped_only:\n n = seq.count(\"N\")\n n += seq.count(\"n\")\n nbases -= n\n return (g + c) / nbases if nbases > 0 else np.nan", "def raw_features_extractor(database='./red_cod.db.pkl', sites=-1, elements = -1, maxatoms= -1,\r\n dictionary='diccionario', features='datosrahm.csv'):\r\n \r\n df=create_collection(database=database,sites=sites, elements=elements, maxatoms=maxatoms, \r\n dictionary=dictionary)\r\n \r\n start=time.time()\r\n \r\n datos=pd.read_csv(features)\r\n datos=datos.fillna(-1)\r\n\r\n dicc=dict(datos[['Symbol','Z']].values)\r\n\r\n dicc['D']=1\r\n dicc['Bk']=97\r\n dicc['Cf']=98\r\n dicc['Es']=99\r\n dicc['Fm']=100\r\n dicc['Md']=101\r\n dicc['No']=102\r\n dicc['Lr']=103\r\n \r\n max_sitios = max(df['sitios'].values)\r\n\r\n df=df[df['sitios'] <= max_sitios].reset_index(drop=True)\r\n \r\n X=np.zeros((len(df),max_sitios,104))\r\n y=np.zeros((len(df),1))\r\n mult=np.zeros((len(df),max_sitios))\r\n wyckmul=np.load('support/WyckoffSG_dict.npy').item()['wyckmul']\r\n \r\n for row in range(len(df)):\r\n \r\n item=df['WyckOcc'][row]\r\n sitios=list(item.values()) \r\n sitocc=np.zeros((len(sitios),104))\r\n spacegroup = str(df['sgnum'][row]).zfill(3)\r\n \r\n try:\r\n \r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n sitios] for i in j]\r\n \r\n except:\r\n print('There exists an error concerning with the space group of CIF ', df['cif'][row],'\\n')\r\n print('Please check in www.crystallography.net to provide the correct space group number of that CIF',\r\n '\\n','\\n')\r\n spacegroup=input('Give me the correct spacegroup:'+'\\n'+'\\n')\r\n s=[int(wyckmul[spacegroup][i]) for j in [list(item.keys()) for item in \\\r\n list(df['WyckOcc'][row].values())] for i in j]\r\n \r\n occs=[]\r\n for i in range(len(sitios)):\r\n\r\n for j in list(sitios[i].values()):\r\n \r\n ocupacion=np.array(list(j.values()))\r\n llaves=[llave.replace('+','').replace('-','').replace('1',\r\n '').replace('2','').replace('3','').replace('4',\r\n '') for llave in np.array(list(j.keys()))]\r\n llaves=[llave.replace('.','') for llave in llaves]\r\n llaves=[llave.replace('5','').replace('6','').replace('7',\r\n '').replace('8','').replace('9','').replace('0',\r\n '') for llave in llaves]\r\n vector=np.zeros((1,104))\r\n occs=[sum(ocupacion)]+occs\r\n \r\n try:\r\n \r\n idx=[dicc[k] for k in llaves]\r\n \r\n except:\r\n \r\n print(' ELEMENTO NO IDENTIFICADO EN LA LISTA ',llaves,'\\n',\r\n 'REVISA EL SIGUIENTE CIF PARA HACER LA CORRECCION:','\\t',df['cif'][row])\r\n \r\n former = input('Elemento Incorrecto: ')\r\n current = input('Elemento Correcto: ')\r\n \r\n llaves=[current if x == former else x for x in llaves]\r\n idx=[dicc[k] for k in llaves]\r\n \r\n \r\n for k in idx:\r\n vector[0][k-1] = ocupacion[idx.index(k)]\r\n \r\n \r\n sitocc[i]=vector\r\n \r\n while sitocc.shape[0] != max_sitios:\r\n sitocc=np.concatenate((np.zeros((1,104)),sitocc))\r\n s=[0]+s\r\n \r\n X[row,:,:]=sitocc\r\n y[row]=df['target'][row]\r\n mult[row]=s\r\n \r\n S = np.expand_dims(mult,axis=2)\r\n features=datos.iloc[:,2:].values\r\n x=X[:,:,:96]\r\n \r\n fracsum = np.expand_dims(np.sum(x,axis=2), axis=2)\r\n \r\n x=np.dot(x,features) \r\n\r\n print('Atomic radii and electronegativities for each Wyckoff site extracted in',\r\n round(time.time()-start,2),' s') \r\n \r\n np.save('raw_features', x)\r\n np.save('output_values', y)\r\n np.save('multiplicities', S)\r\n np.save('occupation_fractions', fracsum)\r\n \r\n return x, y, S, fracsum, df", "def galactic_to_MS():\n return MS_MATRIX", "def get_transcript_sgrnas(target_region_seq_df, context_len, pam_start, pam_len,\n sgrna_start, sgrna_len, pams, sg_positions):\n sgrna_df_list = []\n meta_columns = ['object_type', 'strand', 'transcript_id', 'seq_region_name', 'region_id', 'start', 'end']\n for i, row in target_region_seq_df.iterrows():\n seq_start = row['expanded_start']\n seq_end = row['expanded_end']\n sequence = row['seq']\n # Sequences on the positive strand\n pos_sgrna_df = tile.build_sgrna_df(sequence, context_len=context_len, pam_start=pam_start,\n pam_len=pam_len, sgrna_start=sgrna_start,\n sgrna_len=sgrna_len, pams=pams)\n pos_sgrna_df = get_sgrna_global_indices(pos_sgrna_df, seq_start, seq_end, 1, sg_positions)\n # assuming the target_region_seq_df is oriented on the positive sgRNA strand\n pos_sgrna_df['sgrna_strand'] = 1\n # Sequences on the negative strand\n rev_comp_seq = reverse_compliment(sequence)\n neg_sgrna_df = tile.build_sgrna_df(rev_comp_seq, context_len=context_len, pam_start=pam_start,\n pam_len=pam_len, sgrna_start=sgrna_start,\n sgrna_len=sgrna_len, pams=pams)\n neg_sgrna_df = get_sgrna_global_indices(neg_sgrna_df, seq_start, seq_end, -1, sg_positions)\n neg_sgrna_df['sgrna_strand'] = -1\n # Combine and filter sgrna_dfs\n sgrna_df = pd.concat([pos_sgrna_df, neg_sgrna_df])\n for col in meta_columns:\n sgrna_df[col] = row[col]\n sgrna_df_list.append(sgrna_df)\n concatenated_sgrna_dfs = (pd.concat(sgrna_df_list)\n .rename({'strand': 'transcript_strand',\n 'start': 'region_start',\n 'end': 'region_end',\n 'seq_region_name': 'chromosome'}, axis=1))\n return concatenated_sgrna_dfs", "def Jsc_integrated(self):\n fname = 'scalar_reduce_genRate.npy'\n base = self.sims[0].conf['General']['results_dir']\n self.log.info('Computing integrated Jsc for group at %s', base)\n path = os.path.join(base, fname)\n try:\n genRate = np.load(path)\n except FileNotFoundError:\n self.scalar_reduce('genRate')\n genRate = np.load(path)\n # Gen rate in cm^-3. Gotta convert lengths here from um to cm\n z_vals = self.sims[0].Z\n x_vals = self.sims[0].X\n y_vals = self.sims[0].Y\n z_integral = intg.trapz(genRate, x=z_vals, axis=0)\n x_integral = intg.trapz(z_integral, x=x_vals, axis=0)\n y_integral = intg.trapz(x_integral, x=y_vals, axis=0)\n # z_integral = intg.simps(genRate, x=z_vals, axis=0)\n # x_integral = intg.simps(z_integral, x=x_vals, axis=0)\n # y_integral = intg.simps(x_integral, x=y_vals, axis=0)\n # Convert period to cm and current to mA\n Jsc = 1000*(consts.e/(self.sims[0].period*1e-4)**2)*y_integral\n outf = os.path.join(base, 'jsc_integrated.dat')\n with open(outf, 'w') as out:\n out.write('%f\\n' % Jsc)\n self.log.info('Jsc_integrated = %f', Jsc)\n return Jsc", "def ceQTL(counts, dos, cov_mat, rsid):\n acov_mat = cov_mat.copy(deep=True)\n acov_mat['soi'] = dos.ix[rsid, acov_mat.index]\n res = sm.OLS(counts, acov_mat).fit()\n return(res)", "def get_predictions_coverage(self):\n\n latest_tag_frequency_file = self.get_latest_output_file_name(configurations.TAG_FREQUENCY_STREAMS, next=False)[1]\n latest_tag_frequency_file_location = os.path.join(configurations.OUTPUT_FILES_DIRECTORY, latest_tag_frequency_file)\n tag_frequency_file_df = pd.read_csv(latest_tag_frequency_file_location, header=0, index_col=0)\n all_streams = tag_frequency_file_df.index.values\n\n\n output_file_name = self.get_latest_output_file_name(configurations.SIMILAR_STREAMS_GENRATED_FILE_NAME, next=False)[1]\n output_file_location = os.path.join(configurations.OUTPUT_FILES_DIRECTORY, output_file_name)\n output_df = pd.read_csv(output_file_location, header=0, index_col=0)\n all_streams_set = set(all_streams)\n\n recommended_stream_counter = Counter()\n for idx, row in output_df.iterrows():\n for val in row:\n if not math.isnan(val):\n val_int = int(val)\n recommended_stream_counter[val_int] += 1\n\n recommended_streams_set = set(recommended_stream_counter.keys())\n\n print(\"Most common {0} recommendations...\".format(str(configurations.NUM_MOST_COMMON_STREAMS)))\n print(recommended_stream_counter.most_common(configurations.NUM_MOST_COMMON_STREAMS))\n coverage = len(recommended_streams_set)/len(all_streams_set)\n print(\"Coverage = {0}\".format(str(coverage)))\n\n non_recommended_streams = all_streams_set - recommended_streams_set\n if non_recommended_streams:\n print(\"These streams were not recommended at all: \")\n print(non_recommended_streams)\n else:\n print(\"All streams were recommended at least once\")", "def run_phaseg(locus_file, gam_file, vg_file, canu_alignments, true_haps):\n\trecombrate=1.26\n\tmax_coverage = 15\n\tall_heterozygous = False\n\tdistrust_genotypes = True\n\twith ExitStack() as stack:\n\t\tnode_seq_list, edge_connections = vg_graph_reader(vg_file)\n\t\tall_reads, alleles_per_pos, locus_branch_mapping = vg_reader(locus_file, gam_file, canu_alignments)\n\t\tall_positions = sorted(all_reads.get_positions())\n\t\tall_components = find_components(all_positions, all_reads)\n\t\tblocks = defaultdict(list)\n\t\tprint(\"all_components\")\n\t\tfor position, block_id in all_components.items():\n\t\t\tblocks[block_id].append(locus_branch_mapping[position][0][0][0])\n\t\tfor k,v in blocks.items():\n\t\t\tprint(k,v)\n\t\tprint(\"all_components\")\n\t\t\n\n\t\t#print(all_reads)\n\t\tselected_indices = readselection(all_reads, max_coverage)\n\t\tselected_reads = all_reads.subset(selected_indices)\n\n\t\t#selected_reads = slice_reads(all_reads, max_coverage)\n\t\t#print('positions from all reads')\n\t\t#print(len(all_reads.get_positions()))\n\t\tprint(\"reads after read-selection\")\n\t\tprint(len(selected_reads))\n\t\tprint(\"positions covered by atleast one read after read selection\")\n\t\tprint(len(selected_reads.get_positions()))\n\n\t\taccessible_positions = sorted(selected_reads.get_positions())\n\t\t\n\t\tprint(\"readset after read_selection\")\n\t\t#for read in selected_reads:\n\t\t\t#print(read.name)\n\t\tpedigree = Pedigree(NumericSampleIds())\n\t\t# compute the number of alleles at each position.\n\t\talleles_per_accessible_pos =[]\n\t\tgenotype_likelihoods = []\n\t\tfor pos in accessible_positions:\n\t\t\tif pos in alleles_per_pos:\n\t\t\t\tn_alleles = alleles_per_pos[pos] \n\t\t\t\tpossible_genotypes = n_alleles + ncr(n_alleles, 2)\n\t\t\t\tgenotype_likelihoods.append(None if all_heterozygous else PhredGenotypeLikelihoods([0]* possible_genotypes))\n\t\t# random input of genotypes, since distrust_genotypes is always ON.\n\t\tpedigree.add_individual('individual0', [0]* len(accessible_positions), genotype_likelihoods)\n\t\trecombination_costs = uniform_recombination_map(recombrate, accessible_positions)\n\t\t# Finally, run phasing algorithm\n\t\t#print(selected_reads)\n\t\tdp_table = PedigreeDPTable(selected_reads, recombination_costs, pedigree, distrust_genotypes, accessible_positions)\n\t\tsuperreads_list, transmission_vector = dp_table.get_super_reads()\n\n\t\tcost = dp_table.get_optimal_cost()\n\t\tprint(superreads_list[0])\n\t\t#print(cost)\n\t\tread_partitions = dp_table.get_optimal_partitioning()\n\t\t#print(read_partitions)\n\t\t\n\t\t## To generate the connected components and corresponding haplotypes.\n\t\tprint(\"in components\")\n\t\tf = open('whole_genome' + '.predicted_read_partionting.pred', 'w')\n\t\toverall_components = find_components(accessible_positions, selected_reads)\n\t\t\n\t\tread_partitions_dict ={}\n\t\tfor read, haplotype in zip(selected_reads, read_partitions):\n\t\t\tphaseset = overall_components[read[0].position] + 1\n\t\t\tprint(read.name, phaseset, haplotype, file=f)\n\t\t\tread_partitions_dict[read.name] = haplotype\n\t\t#phaset is blockid\n\n\t\tn_phased_blocks = len(set(overall_components.values()))\n\t\tall_phased_blocks = len(set(all_components.values()))\n\t\tprint('No. of phased blocks: %d', n_phased_blocks)\n\t\tlargest_component = find_largest_component(overall_components)\n\t\tprint('No. of blocks from all the reads: %d', all_phased_blocks)\n\t\tlargest_component_all_reads = find_largest_component(all_components)\n\t\tif len(largest_component) > 0:\n\t\t\tprint('Largest component contains %d variants',len(largest_component))\n\t\tif len(largest_component_all_reads) > 0:\n\t\t\tprint('Largest component contains %d variants',len(largest_component_all_reads))\n\t\t\n\t\t\n\t\t### To generate contig sequences\n\t\tsample = 0\n\t\tsuperreads, components = dict(), dict()\n\t\tsuperreads[sample] = superreads_list[0]\n\t\tcomponents[sample] = overall_components\n\t\t#generate_hap_contigs_based_on_canu(superreads_list[0], components[sample], node_seq_list, locus_branch_mapping, edge_connections, canu_alignments, vg_file)\n\t\t#generate_hap_contigs(superreads_list[0], overall_components, node_seq_list, locus_branch_mapping, edge_connections)\n\t\t\n\t\tnodes_in_bubbles =[]\n\t\twith stream.open(str(locus_file), \"rb\") as istream:\n\t\t\tfor data in istream:\n\t\t\t\tl = vg_pb2.SnarlTraversal()\n\t\t\t\tl.ParseFromString(data)\n\t\t\t\tfor i in range(0,len(l.visits)):\n\t\t\t\t\tnodes_in_bubbles.append(l.visits[i].node_id)\n\t\t\t\t#nodes_in_bubbles.append(l.snarl.end.node_id)\n\t\t\t\t#nodes_in_bubbles.append(l.snarl.start.node_id)\n\t\tedge_connections_tmp = defaultdict(list)\n\t\twith stream.open(str(vg_file), \"rb\") as istream:\n\t\t\tfor data in istream:\n\t\t\t\tl = vg_pb2.Graph()\n\t\t\t\tl.ParseFromString(data)\n\t\t\t\tfor j in range(len(l.edge)):\n\t\t\t\t\tfrom_edge = getattr(l.edge[j], \"from\")\n\t\t\t\t\t#if from_edge not in nodes_in_bubbles and l.edge[j].to not in nodes_in_bubbles:\n\t\t\t\t\tedge_connections_tmp[str(from_edge)].append(str(l.edge[j].to))\n\t\t\t\t\tedge_connections_tmp[str(l.edge[j].to)].append(str(from_edge))\n\n\n\t\t#generate_hap_contigs_based_on_canu(superreads, components, node_seq_list, locus_branch_mapping, edge_connections, canu_alignments, vg_file)\n\t\t#generate_hap_contigs_avgRL(superreads, components, node_seq_list, locus_branch_mapping, edge_connections, edge_connections_tmp, gam_file, read_partitions_dict, nodes_in_bubbles)\n\t\t\n\t\t# evaluation partition all the reads based on one iteration\n\t\t#print('partition all the reads based on haplotypes from one iteration')\n\t\t# Check here if you wanna do all reads or selected reads only\n\t\t#haplotag(superreads_list[0], selected_reads, overall_components, 1)\n\t\t\n\t\t#compute_read_partitioning_accuracy(\"true_partioning\")\n\n\n\n\t\t##generate_hap_contigs(superreads, components, node_seq_list, locus_branch_mapping, edge_connections)\n\t\t\n\t\t##For phasing accuracy, read true haps and generate corresponding superreads\n\t\t#all_reads_true, alleles_per_pos_true, locus_branch_mapping_true = vg_reader(locus_file, true_haps)\n\t\t# Finally, run phasing algorithm for true haplotypes\n\t\t#dp_table_true = PedigreeDPTable(all_reads_true, recombination_costs, pedigree, distrust_genotypes, accessible_positions)\n\t\t#superreads_list_true, transmission_vector_true = dp_table_true.get_super_reads()\n\t\t# to compute the phasing accuracy\n\t\t#true_haps = ReadSet()\n\t\t#for read in all_reads_true:\n\t\t\t#tmp_read = Read(read.name, 0, 0, 0)\n\t\t\t#for variant in read:\n\t\t\t\t#if variant.position in accessible_positions:\n\t\t\t\t\t#tmp_read.add_variant(variant.position, variant.allele, [10])\n\t\t\t#true_haps.add(tmp_read)\n\t\t#compare(superreads_list[0], true_haps, overall_components)\n\t\t## To perform iterative whatshap phasing\n\t\t#remaining_reads =[]\n\t\t#for read in all_reads:\n\t\t\t#remaining_reads.append(read.name)\n\t\t#prev_superreads = superreads_list[0]\n\t\t#for read in selected_reads:\n\t\t\t#remaining_reads.remove(read.name)\n\t\t#while len(remaining_reads)>0:\n\t\t\t#print('iteration')\n\t\t\t#iterative_reaset = ReadSet()\n\t\t\t#for read in all_reads:\n\t\t\t\t#if read.name in remaining_reads:\n\t\t\t\t\t#iterative_reaset.add(read)\n\n\t\t\t\t\n\t\t\t#selected_indices = readselection(iterative_reaset, max_coverage)\n\t\t\t#selected_reads = iterative_reaset.subset(selected_indices)\n\t\t\t#for read in prev_superreads:\n\t\t\t\t#selected_reads.add(read)\n\t\t\t\t#remaining_reads.append(read.name)\n\t\t\t#accessible_positions = sorted(selected_reads.get_positions())\n\t\t\t#selected_reads.sort()\n\t\t\t#pedigree = Pedigree(NumericSampleIds())\n\t\t\t## compute the number of alleles at each position.\n\t\t\t#alleles_per_accessible_pos =[]\n\t\t\t#genotype_likelihoods = []\n\t\t\t#for pos in accessible_positions:\n\t\t\t\t#if pos in alleles_per_pos:\n\t\t\t\t\t#n_alleles = alleles_per_pos[pos] \n\t\t\t\t\t#possible_genotypes = n_alleles + ncr(n_alleles, 2)\n\t\t\t\t\t#genotype_likelihoods.append(None if all_heterozygous else PhredGenotypeLikelihoods([0]* possible_genotypes))\n\t\t\t## random input of genotypes, since distrust_genotypes is always ON.\n\t\t\t#pedigree.add_individual('individual0', [0]* len(accessible_positions), genotype_likelihoods)\n\t\t\t#recombination_costs = uniform_recombination_map(recombrate, accessible_positions)\n\t\t\t## Finally, run phasing algorithm\n\t\t\t##print(selected_reads)\n\t\t\t#dp_table = PedigreeDPTable(selected_reads, recombination_costs, pedigree, distrust_genotypes, accessible_positions)\n\t\t\t#superreads_list, transmission_vector = dp_table.get_super_reads()\n\t\t\t#for read in selected_reads:\n\t\t\t\t#remaining_reads.remove(read.name)\n\t\t\t#prev_superreads = superreads_list[0]\n\t\t\t\n\t\t#print('I am final')\n\t\t#accessible_positions = sorted(all_reads.get_positions())\n\t\t#overall_components = find_components(accessible_positions, all_reads)\n\t\t#haplotag(superreads_list[0], all_reads, overall_components, \"all_iter\")\n\t\t#compare(superreads_list[0], superreads_list_true[0], overall_components)\n\t\t#print(superreads_list[0])\n\t\t\n\t\t#iterative whatshap for sparse matrices where we fix the phasing for variants at each iteration that reach max coverage.", "def load_ChEMBL_pkd():\n affinity = pd.read_csv('./dataset/regression/ChEMBL/Chem_Affinity.txt', header=None)\n affinity = affinity.fillna(-1)\n target = pd.read_csv('./dataset/regression/ChEMBL/ChEMBL_Target_Sequence.txt', header=None)\n drug = pd.read_csv('./dataset/regression/ChEMBL/Chem_SMILES_only.txt', header=None)\n \n SMILES=[]\n Target=[]\n y=[]\n drugcnt=[]\n \n for i in range(len(target)):\n Target.append(target[0][i])\n y.append(affinity[0][i])\n SMILES.append(drug[0][i])\n\n aff=[]\n total=[]\n for i in range(len(target)):\n aff.insert(i, y[i].split(\" \"))\n for i in aff:\n total += i\n for i in range(len(SMILES)):\n drugcnt.insert(i, len(SMILES[i].split()))\n \n smile = []\n for segments in SMILES:\n for x in segments.split():\n smile.extend(x)\n #smile = [x for segments in SMILES for x in segments.split()]\n smiles_res=[]\n y_tmp=[]\n target_res=[]\n tmp=[]\n \n for i in range(len(drugcnt)):\n tmp.extend(repeat(Target[i], drugcnt[i]))\n for i in range(len(total)):\n if total[i] != '-1':\n y_tmp.append(total[i])\n smiles_res.append(smile[i])\n target_res.append(tmp[i])\n\n y_res = [float(i) for i in y_tmp]\n return np.array(smiles_res), np.array(target_res), np.array(y_res)", "def do_qc(fn, df, year):\n (lon, lat) = fn2lonlat(fn)\n stage4 = compute_stage4(lon, lat, year)\n # Does the frame appear to have all dates?\n if len(df.index) != len(df.resample(\"D\").mean().index):\n print(\"ERROR: Appears to be missing dates!\")\n\n if open(fn).read()[-1] != \"\\n\":\n print(\"ERROR: File does not end with \\\\n\")\n\n print(\"--------- Summary stats from the .cli file\")\n print(\"YEAR | RAIN | MAXRATE | MAXACC | #DAYS | #>1RT | RAD/D\")\n print(\" --- | --- | --- | --- | --- | --- | ---\")\n for _year, gdf in df.groupby(by=df.index.year):\n print(\n (\"%s | %6.2f | %7.2f | %7.2f | %6i | %6i | %6.0f\")\n % (\n _year,\n mm2inch(gdf[\"pcpn\"].sum()),\n mm2inch(gdf[\"maxr\"].max()),\n mm2inch(gdf[\"pcpn\"].max()),\n len(gdf[gdf[\"pcpn\"] > 0].index),\n len(gdf[gdf[\"maxr\"] > 25.4].index),\n gdf[\"rad\"].mean(),\n )\n )\n\n print(\"---- Months with < 0.05 precipitation ----\")\n gdf = df.groupby(by=[df.index.year, df.index.month])[\"pcpn\"].sum()\n print(gdf[gdf < 1.0])\n\n print(\"----- Average high temperature -----\")\n print(\"YEAR | Avg High F | Avg Low F | Days > 100F\")\n print(\" --- | --- | --- | ---\")\n for _year, gdf in df.groupby(by=df.index.year):\n print(\n (\"%s | %6.2f | %6.2f | %3i\")\n % (\n _year,\n c2f(gdf[\"tmax\"].mean()),\n c2f(gdf[\"tmin\"].mean()),\n len(gdf[gdf[\"tmax\"] > 37.7].index),\n )\n )\n\n monthly = df[df.index.year == year][\"pcpn\"].resample(\"M\").sum().copy()\n monthly = pd.DataFrame(\n {\"dep\": mm2inch(monthly.values)}, index=range(1, 13)\n )\n\n # Get prism, for a bulk comparison\n prism = requests.get(\n (\n \"http://mesonet.agron.iastate.edu/json/prism/\"\n \"%.2f/%.2f/%s0101-%s1231\"\n )\n % (lon, lat, year, year)\n ).json()\n rows = []\n for entry in prism[\"data\"]:\n rows.append(\n {\n \"date\": datetime.datetime.strptime(\n entry[\"valid\"][:10], \"%Y-%m-%d\"\n ),\n \"precip\": entry[\"precip_in\"],\n }\n )\n prismdf = pd.DataFrame(rows)\n prismdf = prismdf.set_index(\"date\")\n monthly[\"prism\"] = prismdf[\"precip\"].resample(\"M\").sum().copy().values\n\n # Compare daily values\n iemjson = requests.get(\n (\n \"http://mesonet.agron.iastate.edu/iemre/multiday/\"\n \"%s-01-01/%s-12-31/%s/%s/json\"\n )\n % (year, year, lat, lon)\n ).json()\n rows = []\n for entry in iemjson[\"data\"]:\n rows.append(\n {\n \"date\": datetime.datetime.strptime(entry[\"date\"], \"%Y-%m-%d\"),\n \"precip\": entry[\"daily_precip_in\"],\n }\n )\n iemdf = pd.DataFrame(rows)\n iemdf = iemdf.set_index(\"date\")\n print(\"PRISM %s precip is: %.2f\" % (year, prismdf[\"precip\"].sum()))\n print(\"IEMRE sum precip is: %.2f\" % (iemdf[\"precip\"].sum(),))\n print(\"StageIV sum precip is: %.2f\" % (stage4[\"precip\"].sum(),))\n monthly[\"stage4\"] = stage4[\"precip\"].resample(\"M\").sum().copy().values\n monthly[\"iemre\"] = iemdf[\"precip\"].resample(\"M\").sum().copy().values\n monthly[\"prism-dep\"] = monthly[\"prism\"] - monthly[\"dep\"]\n monthly[\"iemre-dep\"] = monthly[\"iemre\"] - monthly[\"dep\"]\n\n print(\" --------- %s Monthly Totals --------\" % (year,))\n print(monthly)\n df.at[\n slice(datetime.date(year, 1, 1), datetime.date(year, 12, 31)),\n \"stage4_precip\",\n ] = stage4[\"precip\"].values\n df[\"iemre_precip\"] = iemdf[\"precip\"]\n df[\"diff_precip\"] = df[\"pcpn_in\"] - df[\"iemre_precip\"]\n df[\"diff_stage4\"] = df[\"pcpn_in\"] - df[\"stage4_precip\"]\n print(\" --- Top 5 Largest DEP > IEMRE ----\")\n print(\n df[\n [\n \"diff_precip\",\n \"pcpn_in\",\n \"iemre_precip\",\n \"stage4_precip\",\n \"diff_stage4\",\n ]\n ]\n .sort_values(by=\"diff_precip\", ascending=False)\n .head()\n )\n print(\" --- Top 5 Largest IEMRE > DEP ----\")\n print(\n df[\n [\n \"diff_precip\",\n \"pcpn_in\",\n \"iemre_precip\",\n \"stage4_precip\",\n \"diff_stage4\",\n ]\n ]\n .sort_values(by=\"diff_precip\", ascending=True)\n .head()\n )\n\n print(\" --- Top 10 Largest Stage4 > DEP ----\")\n print(\n df[\n [\n \"diff_precip\",\n \"pcpn_in\",\n \"iemre_precip\",\n \"stage4_precip\",\n \"diff_stage4\",\n ]\n ]\n .sort_values(by=\"diff_stage4\", ascending=True)\n .head(10)\n )\n print(\" vvv job listing based on the above vvv\")\n for dt in df.sort_values(by=\"diff_stage4\", ascending=True).head(10).index:\n print(\n \"python daily_clifile_editor.py 0 %s %s %s\"\n % (dt.year, dt.month, dt.day)\n )\n df2 = df.loc[slice(datetime.date(year, 1, 1), datetime.date(year, 1, 31))][\n [\"diff_precip\", \"pcpn_in\", \"iemre_precip\", \"stage4_precip\"]\n ].sort_values(by=\"diff_precip\")\n print(\" --- Daily values for month \" \"\")\n print(df2)", "def test_gclda_symmetric(testdata_laird):\n counts_df = annotate.text.generate_counts(\n testdata_laird.texts,\n text_column=\"abstract\",\n tfidf=False,\n min_df=1,\n max_df=1.0,\n )\n\n with pytest.raises(ValueError):\n annotate.gclda.GCLDAModel(\n counts_df,\n testdata_laird.coordinates,\n mask=testdata_laird.masker.mask_img,\n n_regions=3,\n symmetric=True,\n )\n\n model = annotate.gclda.GCLDAModel(\n counts_df,\n testdata_laird.coordinates,\n mask=testdata_laird.masker.mask_img,\n n_regions=2,\n symmetric=True,\n )\n model.fit(n_iters=5, loglikely_freq=5)\n\n # Create ROI to decode\n arr = np.zeros(testdata_laird.masker.mask_img.shape, np.int32)\n arr[40:44, 45:49, 40:44] = 1\n mask_img = nib.Nifti1Image(arr, testdata_laird.masker.mask_img.affine)\n decoded_df, _ = decode.discrete.gclda_decode_roi(model, mask_img)\n assert isinstance(decoded_df, pd.DataFrame)\n\n # Decode the ROI as a continuous map\n decoded_df, _ = decode.continuous.gclda_decode_map(model, mask_img)\n assert isinstance(decoded_df, pd.DataFrame)\n\n # Encode text\n encoded_img, _ = decode.encode.gclda_encode(model, \"fmri activation\")\n assert isinstance(encoded_img, nib.Nifti1Image)", "def test_pcca_1():\n n = 1000 #number of data points\n kk = 3 #number of points where data accumulates\n k = 10 #number of cluster_centers\n factor = 0.1 #how much is the data perturbed\n data = np.zeros((n,1))\n for i in range(0,n):\n data[i] = i % kk + factor * np.random.rand() * math.pow(-1,int(2*np.random.rand()))\n #plt.scatter(data[:,0],np.zeros((n,1)))\n \n clustering = cl.KMeans(data,k)\n cluster_centers = clustering.cluster_centers\n cluster_labels = clustering.cluster_labels\n \n #plt.scatter(cluster_centers[:],np.zeros((k,1)),c='r')\n \n estimator = est.Estimator(cluster_labels, 1, 1)\n matrix = estimator.reversible_transition_matrix\n msm = ana.MarkovStateModel(matrix)\n \n n_pcca_states = 4;\n #fig, ax = plt.subplots(figsize=(6.5, 5))\n pcca_labels = msm.metastable_set_assignments(n_pcca_states)\n #im = ax.scatter(cluster_centers[:, 0], np.zeros((k,1)), c=pcca_labels, s=200)\n #cbar = fig.colorbar(im, ax=ax)\n error = 0;\n for j in range(0,kk):\n for i in range(0,k):\n if (round(cluster_centers[i,0]) == j):\n test = i\n for i in range(0,k):\n if (np.abs(cluster_centers[i,0] - cluster_centers[test,0]) < 2*factor):\n if (not pcca_labels[i] == pcca_labels[test]):\n error = 1\n print(error)\n assert_true(error == 0)", "def context_study_stats(frame_path=METRICS_DIR+'/merge.csv'):\n frame = pd.read_csv(frame_path)\n print(frame['LOC_prod'].mean())\n print(frame['LOC_prod'].sum())\n print(frame['LOC_test'].sum())\n print(frame['no_mutations'].sum())\n print(frame.shape[0])\n\n sizes = frame.groupby('project').size()\n prod = frame.groupby('project')['LOC_prod'].sum( )\n test = frame.groupby('project')['LOC_test'].sum()\n mutants = frame.groupby('project')['no_mutations'].sum()\n\n result = pd.DataFrame({'project': list(sizes.index),\n 'size': list(sizes),\n 'prod': list(prod),\n 'test': list(test),\n 'mutants': list(mutants)},\n columns=['project', 'size', 'prod', 'test', 'mutants'])\n print(result.to_latex())", "def runcircos(self):\n pd.read_csv(self.cns, sep=\"\\t\")[\n [\"chromosome\", \"start\", \"end\", \"tcn\"]\n ].rename({\"chromosome\": \"chrm\", \"tcn\": \"cns\"}, axis=1).to_csv(\n self.segs, index=None\n )\n\n passed_svs = [\n sv\n for sv in self.svs.values()\n ]\n circos_sv_file = os.path.join(\n self.out_dir, \"circos_svs.tsv\"\n )\n circos_df = pd.DataFrame(\n [\n (\"chr\" + sv.chr1, sv.pos1, sv.pos1, \"chr\" + sv.chr2, sv.pos2, sv.pos2)\n for sv in passed_svs\n ],\n columns=[\n \"Chromosome\",\n \"chromStart\",\n \"chromEnd\",\n \"Chromosome.1\",\n \"chromStart.1\",\n \"chromEnd.1\",\n ],\n )\n circos_df.to_csv(circos_sv_file, index=None)", "def causDspectra(uxmax, uymax, ax, ay, dso, dsl, dm, m, n, N):\n \n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n \n dlo = dso - dsl\n coeff = dsl*dlo*re*dm/(2*pi*dso)\n \n rx = np.linspace(xmin - 5., xmax + 5., 500)\n ry = np.linspace(ymin - 5., ymax + 5., 500)\n uvec = np.meshgrid(rx, ry)\n A, B, C, D, E = causticFreqHelp(uvec, ax, ay, m, n)\n upxvec = np.linspace(xmin, xmax, N)\n freqcaus = []\n for upx in upxvec:\n eq1 = A*upx**2 + B*upx + C\n eq2 = D*upx + E\n evcaus = np.array([eq1, eq2])\n roots = polishedRootsBulk(evcaus, causEqFreq, rx, ry, args = (upx, ax, ay, m, n))\n for root in roots:\n ux, uy = root\n arg = coeff*lensg(ux, uy)[0]/(ux - upx)\n # print(arg)\n if arg > 0:\n freq = c*np.sqrt(arg)/(ax*GHz)\n if freq > 0.01:\n freqcaus.append([upx, freq])\n # print(freqcaus)\n freqcaus = np.asarray(freqcaus).T\n # plt.scatter(freqcaus[0], freqcaus[1], marker = '.', color = 'black', s = 3.)\n # plt.xlim(xmin, xmax)\n # plt.ylim(0., max(freqcaus[1]) + 0.5)\n # plt.xlabel(r\"$u'_x$\", fontsize = 16)\n # plt.ylabel(r'$\\nu$ (GHz)', fontsize = 16)\n # plt.grid()\n # plt.show()\n return freqcaus", "def biom_output(self, name='samples.biom'):\n data = self.a.df_abundances\n with open(self.a.out_dir + name, 'w') as handle:\n # Basic #\n sample_ids = data.columns\n sample_md = None\n observation_ids = data.index\n # Observation metadata #\n observation_md = []\n for seq in data.index:\n seq_name = self.a.orig_names_to_renamed[seq]\n counts = self.a.seq_to_counts.get(seq_name)\n if not counts: observation_md.append({})\n else: observation_md.append({'source': counts})\n # Output #\n t = biom.table.Table(data.transpose().as_matrix(), sample_ids, observation_ids, sample_md, observation_md)\n handle.write(t.to_json('seqenv version %s') % seqenv.__version__)", "def cossim(corpus):\n files = os.listdir()\n vectorizer = TfidfVectorizer()\n trsfm = vectorizer.fit_transform(corpus)\n columns = vectorizer.get_feature_names()\n df_tfidf = pd.DataFrame(trsfm.toarray(), columns = columns, index = corpus)\n out = cosine_similarity(trsfm)\n df_result = pd.DataFrame(out, columns = files, index = files)\n return df_result", "def _snrenv_to_pc(snrenv, k=None, q=None, sigma_s=None, m=None):\n un = norm.ppf(1.0 - 1.0 / m)\n sn = 1.28255 / un\n un += 0.577 / un\n dp = k * snrenv ** q\n return norm.cdf(dp, un, np.sqrt(sigma_s ** 2 + sn ** 2)) * 100", "def ca_to_coils_second_df(agent_df):", "def idog_conv(sc):\n return math.sqrt(sc*2)", "def CL(self):" ]
[ "0.55647254", "0.49846494", "0.48036072", "0.4783149", "0.47653148", "0.47555912", "0.47205758", "0.47141117", "0.46780866", "0.4677644", "0.46744534", "0.4664888", "0.46561313", "0.4651731", "0.46359685", "0.46011293", "0.4596688", "0.45749894", "0.45679227", "0.45530605", "0.45446667", "0.44948655", "0.44901535", "0.44893095", "0.44892", "0.44848907", "0.4481873", "0.44788462", "0.4470406", "0.4462831", "0.4458076", "0.44571707", "0.4454197", "0.44517976", "0.4438931", "0.4433001", "0.4431192", "0.44251838", "0.44233868", "0.44175956", "0.4416754", "0.4412691", "0.44021207", "0.43999204", "0.43995002", "0.4396869", "0.4393578", "0.43896374", "0.438897", "0.4387073", "0.43855423", "0.4385297", "0.43822673", "0.4369672", "0.4362151", "0.4350303", "0.43490425", "0.43482807", "0.43363294", "0.43304652", "0.43265373", "0.43239537", "0.43226263", "0.43196616", "0.4318778", "0.4315391", "0.43149322", "0.4312836", "0.4310891", "0.43015847", "0.42963052", "0.42957127", "0.42927808", "0.42926088", "0.42910093", "0.4287684", "0.42849046", "0.42797852", "0.42736104", "0.42720097", "0.4263917", "0.4262528", "0.42611164", "0.4259174", "0.42540082", "0.42518172", "0.4246931", "0.42464507", "0.4245288", "0.42425808", "0.42423633", "0.42418763", "0.42389682", "0.42364264", "0.42341292", "0.4232769", "0.42291176", "0.42273268", "0.42260915", "0.42250156", "0.42245346" ]
0.0
-1
get per gene average multimapping score
def compute_mm(mmdata): mm_df = pd.DataFrame(columns=['ORF', 'MM']) counter = 0 for gene in mmdata.keys(): current_matrix = mmdata[gene] current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] ) mm_df.loc[counter] = [gene, current_avrg] counter += 1 return mm_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_avg_score(df, score):\n avg_score = (df.groupby(['condition', 'gene_a', 'gene_b'])\n .agg({score: 'mean'})\n .reset_index())\n return avg_score", "def compute_ave_score_w_sample(genes, samples):\n\n scores = np.zeros(len(genes), dtype=np.uint32)\n\n for i, v in enumerate(genes):\n for j in samples:\n score, _ = run_duel(v, j)\n scores[i] += score\n continue\n continue\n\n return scores / len(samples)", "def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])", "def _gsea_score(self, gene_list, correl_vector, gene_set, weighted_score_type=1, \n single=False, scale=False):\n N = len(gene_list)\n tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int)\n\n if weighted_score_type == 0 :\n correl_vector = np.repeat(1, N)\n else:\n correl_vector = np.abs(correl_vector)**weighted_score_type\n\n # GSEA Enrichment Score\n Nhint = tag_indicator.sum()\n sum_correl_tag = np.sum(correl_vector*tag_indicator)\n\n no_tag_indicator = 1 - tag_indicator\n Nmiss = N - Nhint\n norm_tag = 1.0/sum_correl_tag\n norm_no_tag = 1.0/Nmiss\n RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag)\n\n if scale: RES = RES / N\n if single: # ssGSEA\n es = RES.sum()\n else:\n max_ES, min_ES = RES.max(), RES.min()\n es = max_ES if np.abs(max_ES) > np.abs(min_ES) else min_ES \n # extract values\n return es", "def average_population_grade(population):\r\n total = 0\r\n for individual in population :\r\n total += get_individual_fitness(individual)\r\n return total/POPULATION_COUNT", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def calculate_score_persona(self, edge_list):\n score_list = []\n if self.proximity_function == \"dot\":\n for src, tag in edge_list:\n src_personas = self.node_to_persona[src]\n tag_personas = self.node_to_persona[tag]\n max_sim = self.aggregate_function(\n [\n np.dot(self.emb[src_persona], self.emb[tag_persona])\n for src_persona in src_personas\n for tag_persona in tag_personas\n ]\n )\n score_list.append(max_sim)\n elif self.proximity_function == \"cos\":\n for src, tag in edge_list:\n src_embs = [self.emb[persona] for persona in self.node_to_persona[src]]\n tag_embs = [self.emb[persona] for persona in self.node_to_persona[tag]]\n max_sim = np.amax(cosine_similarity(src_embs, tag_embs))\n score_list.append(max_sim)\n return score_list\n\n return score_list", "def mean_ensembler(self):\n models_name = self._get_models_name(self.data_dir)\n if type(models_name) != list:\n models_name = [models_name]\n\n get_test_y = self._get_model_val(models_name, self.data_dir, 'test')\n # Calculate the average\n average = pd.DataFrame()\n for add in list(get_test_y.keys()):\n if average.empty:\n average[self.TAGS] = get_test_y[add][self.TAGS]\n else:\n average[self.TAGS] += get_test_y[add][self.TAGS]\n\n average = average/len(list(get_test_y.keys()))\n average.insert(loc=0, column='id', value=self.test_id.values)\n doc_name = self.data_dir + '/' + 'submission_average_ensembler.csv'\n average.to_csv(doc_name, index=False)\n print('submission file saved at {}'.format(doc_name))\n return average", "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def calculate_avg_score(state_score,state_count):\n\tfor state in state_score.keys():\n\t\tstate_score[state] = 1.*state_score[state]/state_count[state]\n\treturn state_score", "def get_average(self, s_freq, e_freq):\n s_ind = self.get_bin(s_freq)\n e_ind = self.get_bin(e_freq)\n lst = self.mags[s_ind:e_ind+1]\n try:\n avg = sum(lst)/len(lst)\n except:\n print(s_ind, e_ind)\n print('werid stuff')\n avg = 0\n return avg", "def grade(population, target_sum, target_mult):\r\n summed = reduce(add, (fitness(x, target_sum, target_mult) for x in population), 0)\r\n return summed / len(population)", "def get_mean_of_all_genres(df, merged):\n all_genres = get_all_genres_from_df(df)\n mean_genres = {}\n for genres in all_genres:\n mean_genres[genres] = df['rating'][df[genres] == 1].mean()\n\n\n change_nan(mean_genres) # change Nan value\n\n\n for genres in all_genres:\n merged.loc[merged.genre == genres, 'rating'] = merged.loc[merged.genre == genres, 'rating'].map(lambda x : x - mean_genres[genres])\n\n return mean_genres", "def algo(GENE_VALUES_MATRIX):\n\n\tA = GENE_VALUES_MATRIX\n\n\tAA = np.zeros_like(A)\n\n\tI = np.argsort(A,axis=0)\n\n\tAA[I,np.arange(A.shape[1])] = np.mean(A[I,np.arange(A.shape[1])],axis=1)[:,np.newaxis]\n\n\treturn AA", "def record_em_score(record_examples: List[RecordNestedExample]):\n if not record_examples:\n return 0.\n em_scores = []\n for example in record_examples:\n example_ems = []\n for answer in example.answers:\n example_ems.append(string_f1_score(example.prediction, answer))\n if example_ems:\n em_scores.append(max(example_ems))\n return np.mean(em_scores) if em_scores else -1", "def mean_avg_precision(top_k_results, relevance):\n map_score = 0.0\n for j, scores in relevance.items():\n precision, _ = calculate_precision_recall(top_k_results[j - 1], scores)\n relevant = set()\n for x in scores:\n relevant.add(x[0])\n \n precision_score, cnt = 0.0, 0\n for i in range(len(top_k_results[j - 1])):\n if top_k_results[j - 1][i] in relevant:\n precision_score += precision[i]\n cnt += 1\n \n map_score += precision_score if cnt == 0 else precision_score / cnt\n \n map_score /= len(relevance)\n \n return map_score", "def compute_GS(GMtcs):\n\n GS = np.mean(GMtcs,axis=0) #average over voxels\n\n return GS", "def genre_average(genre_vectors):\n array = [vector for vector in genre_vectors]\n return np.average(array, axis=0)", "def get_base_score(df, ctl_genes):\n base_score = (df[df.target_gene.isin(ctl_genes)]\n .groupby(['anchor_guide', 'condition'])\n .agg({'lfc': 'median'})\n .reset_index())\n return base_score", "def class_average_withali(images,ptcl_info,xform,ref,averager=(\"mean\",{}),normproc=(\"normalize.edgemean\",{}),setsfref=0,verbose=0):\n\n\tif isinstance(images[0],EMData) : nimg=len(images)\n\telif isinstance(images[0],str) and isinstance(images[1],int) : nimg=len(images)-1\n\telse : raise Exception,\"Bad images list\"\n\n\tincl=[]\n\texcl=[]\n#\txforms=[]\n\tavgr=Averagers.get(averager[0], averager[1])\n\tfor i in range(nimg):\n\t\timg=get_image(images,i,normproc)\n\t\tptcl_info[i]=(ptcl_info[i][0],xform*ptcl_info[i][1],ptcl_info[i][2])\t\t# apply the new Transform to the existing one\n#\t\tptcl_info[i]=(ptcl_info[i][0],ptcl_info[i][1]*xform,ptcl_info[i][2])\t\t# apply the new Transform to the existing one\n\t\timg.process_inplace(\"xform\",{\"transform\":ptcl_info[i][1]})\n\t\ttry: use=ptcl_info[i][2]\n\t\texcept: use=1\n\t\tif use :\n\t\t\tavgr.add_image(img)\t\t\t\t# only include the particle if we've tagged it as good\n\t\t\tif img.has_attr(\"source_n\") : incl.append(img[\"source_n\"])\n#\t\t\txforms.append(ptcl_info[i][1])\n\t\telif img.has_attr(\"source_n\") : excl.append(img[\"source_n\"])\n\n\tavg=avgr.finish()\n\n\t# normalize to the reference, this should make make3dpar work better as we can skip the normalization step\n\tif ref!=None :\n\t\tif setsfref:\n\t\t\tavg.process_inplace(\"filter.matchto\",{\"to\":ref,\"interpolate\":0,\"keephires\":1})\n\t\t\tavg-=avg.get_edge_mean()\n\t\telse : avg.process_inplace(\"normalize.toimage\",{\"to\":ref})\n\n\t\tavg[\"class_qual\"]=avg.cmp(\"ccc\",ref)\n\n\t# set some useful attributes\n\tif len(incl)>0 or len(excl)>0 :\n\t\tif len(incl)>0 : avg[\"class_ptcl_idxs\"]=incl\n\t\tif len(excl)>0 : avg[\"exc_class_ptcl_idxs\"]=excl\n#\t\tif len(xforms)>0: avg[\"class_ptcl_xforms\"]=xforms\n\t\tavg[\"class_ptcl_src\"]=img[\"source_path\"]\n\n\treturn avg", "def average_ndcg(self, r):\n scores = []\n score = []\n for rank_max in range(1, len(r[0]) + 1):\n score = []\n for data in r:\n score.append(self.ndcg_at_k(data[:rank_max], rank_max, method = 0))\n scores.append(reduce(lambda x, y: x + y, score) / len(score))\n return scores", "def average_scores(self, scores, education, count):\n\n for key in scores.keys():\n for k in scores[key].keys():\n scores[key][k] = round(scores[key][k] / count[key][k], 1)\n education[key][k] = round(education[key][k] / count[key][k], 1)\n\n return scores, education", "def average_fitness(individuals):\n fitness_num = 0\n for individual in individuals:\n fitness = individual.get_fitness()\n fitness_num += fitness\n return fitness_num / len(individuals)", "def average(array):\n unique_vals = set(array)\n return sum(unique_vals) / len(unique_vals)\n\n \n # your code goes here", "def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)", "def sim_avg(sim_mats):\n return np.array(sim_mats).mean(axis=0)", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def get_score(self, summ_tids, gold_list):\n k = len(summ_tids)\n f_list = []\n for gold in gold_list:\n if len(gold) !=k:\n print('gold-k:',len(gold), k)\n assert len(gold)==k # for ESBM\n corr = len([t for t in summ_tids if t in gold])\n precision = corr/k\n recall = corr/len(gold)\n f_score = 2*((precision*recall)/(precision+recall)) if corr!=0 else 0\n f_list.append(f_score)\n favg = np.mean(f_list)\n return favg", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def _mean(self,gp):\r\n return self.gp_link.transf(gp)", "def azmap (scores, compare, dimension=0):\r\n mns = amean(compare,dimension)\r\n sstd = asamplestdev(compare,0)\r\n return (scores - mns) / sstd", "def get_map(self):\n average_precisions = []\n \n for query, answer in tqdm(zip(self.test_queries, self.results)):\n correct_set = self.correct_answers[query]\n average_precision = 0\n n_relevant = 0\n for i, candidate in enumerate(answer):\n if candidate in correct_set:\n n_relevant += 1\n average_precision += (n_relevant / (i + 1))\n average_precision /= len(correct_set)\n average_precisions.append(average_precision)\n \n return np.mean(average_precisions)", "def class_average(images,ref=None,niter=1,normproc=(\"normalize.edgemean\",{}),prefilt=0,align=(\"rotate_translate_flip\",{}),\n\t\taligncmp=(\"ccc\",{}),ralign=None,raligncmp=None,averager=(\"mean\",{}),scmp=(\"ccc\",{}),keep=1.5,keepsig=1,automask=0,saveali=0,verbose=0,callback=None,center=\"xform.center\"):\n\n\tif verbose>2 : print \"class_average(\",images,ref,niter,normproc,prefilt,align,aligncmp,ralign,raligncmp,averager,scmp,keep,keepsig,automask,verbose,callback,\")\"\n\n\t# nimg is the number of particles we have to align/average\n\tif isinstance(images[0],EMData) : nimg=len(images)\n\telif isinstance(images[0],str) and isinstance(images[1],int) : nimg=len(images)-1\n\telse : raise Exception,\"Bad images list (%s)\"%str(images)\n\n\tif verbose>2 : print \"Average %d images\"%nimg\n\n\t# If one image and no reference, just return it\n\tif nimg==1 and ref==None : return (get_image(images,0,normproc),[(0,Transform(),1)])\n\n\t# If one particle and reference, align and return\n\tif nimg==1:\n\t\tif averager[0]!=\"mean\" : raise Exception,\"Cannot perform correct average of single particle\"\n\t\tali=align_one(get_image(images,0,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)\n\t\ttry: ali[\"model_id\"]=ref[\"model_id\"]\n\t\texcept: pass\n\t\tsim=ali.cmp(scmp[0],ref,scmp[1])\t\t\t# compare similarity to reference (may use a different cmp() than the aligner)\n\t\treturn (ali,[(sim,ali[\"xform.align2d\"],1)])\n\n\t# If we don't have a reference image, we need to make one\n\tif ref==None :\n\t\tif verbose : print \"Generating reference\"\n#\t\tsigs=[(get_image(i)[\"sigma\"],i) for i in range(nimg)]\t\t# sigma for each input image, inefficient\n#\t\tref=get_image(images,max(sigs)[1])\n\t\tref=get_image(images,0,normproc)\t\t\t\t\t\t\t\t\t\t# just start with the first, as EMAN1\n\n\t\t# now align and average the set to the gradually improving average\n\t\tfor i in range(1,nimg):\n\t\t\tif verbose>1 :\n\t\t\t\tprint \".\",\n\t\t\t\tsys.stdout.flush()\n\t\t\tali=align_one(get_image(images,i,normproc),ref,prefilt,align,aligncmp,ralign,raligncmp)\n\t\t\tref.add(ali)\n\n\t\t# A little masking and centering\n\t\ttry:\n\t\t\tgmw=max(5,ref[\"nx\"]/16)\t\t# gaussian mask width\n\t\t\t#ref.process_inplace(\"filter.highpass.gauss\",{\"cutoff_pixels\":min(ref[\"nx\"]/10,5)})\t# highpass to reduce gradient issues\n\t\t\t#ref.process_inplace(\"normalize.circlemean\")\n\t\t\t#ref2=ref.process(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\t\t\t#ref2.process_inplace(\"filter.lowpass.gauss\",{\"cutoff_abs\":0.07})\t# highpass to reduce gradient issues\n\t\t\t#ref2.process_inplace(\"normalize.circlemean\")\n\t\t\t#ref2.process_inplace(\"threshold.binary\",{\"value\":ref[\"mean\"]+ref[\"sigma\"]*1.5})\n\t\t\t#ref2.process_inplace(\"xform.centerofmass\",{\"threshold\":0.5})\t\t\t\t\t\t# TODO: should probably check how well this works\n\t\t\t#fxf=ref2[\"xform.align2d\"]\n\t\t\t#ref.translate(fxf.get_trans())\n\t\t\t\n\t\t\tif center:\t#jesus\n\t\t\t\tref.process_inplace(center)\n\t\t\tref.process_inplace(\"normalize.circlemean\",{\"radius\":ref[\"nx\"]/2-gmw})\n\t\t\tref.process_inplace(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\t\t\tref_orient=None\n\t\texcept:\n\t\t\ttraceback.print_exc()\n\telse:\n\t\ttry: ref_orient=ref[\"xform.projection\"]\n\t\texcept: ref_orient=None\n\n\t\ttry: ref_model=ref[\"model_id\"]\n\t\texcept: ref_model=0\n\n\tif verbose>1 : print \"\"\n\n\tinit_ref=ref.copy()\n\n\t# Iterative alignment\n\tptcl_info=[None]*nimg\t\t# empty list of particle info\n\n\t# This is really niter+1 1/2 iterations. It gets terminated 1/2 way through the final loop\n\tfor it in range(niter+2):\n\t\tif verbose : print \"Starting iteration %d\"%it\n\t\tif callback!=None : callback(int(it*100/(niter+2)))\n\n\t\tmean,sigma=0.0,1.0\t\t# defaults for when similarity isn't computed\n\n\t\t# Evaluate quality from last iteration, and set a threshold for keeping particles\n\t\tif it>0:\n\t\t\t# measure statistics of quality values\n\t\t\tmean,sigma=0,0\n\t\t\tfor sim,xf,use in ptcl_info:\n\t\t\t\tmean+=sim\n\t\t\t\tsigma+=sim**2\n\t\t\tmean/=len(ptcl_info)\n\t\t\tsigma=sqrt(sigma/len(ptcl_info)-mean**2)\n\n\t\t\t# set a threshold based on statistics and options\n\t\t\tif keepsig:\t\t\t\t\t# keep a relative fraction based on the standard deviation of the similarity values\n\t\t\t\tthresh=mean+sigma*keep\n\t\t\t\tif verbose>1 : print \"mean = %f\\tsigma = %f\\tthresh=%f\"%(mean,sigma,thresh)\n\t\t\telse:\t\t\t\t\t\t# keep an absolute fraction of the total\n\t\t\t\tl=[i[0] for i in ptcl_info]\n\t\t\t\tl.sort()\n\t\t\t\ttry: thresh=l[int(len(l)*keep)]\n\t\t\t\texcept:\n\t\t\t\t\tif verbose: print \"Keeping all particles\"\n\t\t\t\t\tthresh=l[-1]+1.0\n\n\t\t\tif verbose:\n\t\t\t\tprint \"Threshold = %1.4f Quality: min=%f max=%f mean=%f sigma=%f\"%(thresh,min(ptcl_info)[0],max(ptcl_info)[0],mean,sigma)\n\n\t\t\t# mark the particles to keep and exclude\n\t\t\tnex=0\n\t\t\tfor i,pi in enumerate(ptcl_info):\n\t\t\t\tif pi[0]>thresh :\n\t\t\t\t\tnex+=1\n\t\t\t\t\tptcl_info[i]=(pi[0],pi[1],0)\n\t\t\t\telif pi[2]==0:\n\t\t\t\t\tptcl_info[i]=(pi[0],pi[1],1)\n\n\t\t\tif verbose : print \"%d/%d particles excluded\"%(nex,len(ptcl_info))\n\n\t\t\t# if all of the particles were thrown out for some reason, we keep the best one\n\t\t\tif nex==len(ptcl_info) :\n\t\t\t\tbest=ptcl_info.index(min(ptcl_info))\n\t\t\t\tptcl_info[best]=(ptcl_info[best][0],ptcl_info[best][1],1)\n\t\t\t\tif verbose : print \"Best particle reinstated\"\n\n\t\tif it==niter+1 : break\t\t# This is where the loop actually terminates. This makes sure that inclusion/exclusion is updated at the end\n\n\t\t# Now align and average\n\t\tavgr=Averagers.get(averager[0], averager[1])\n\t\tfor i in range(nimg):\n\t\t\tif callback!=None and nimg%10==9 : callback(int((it+i/float(nimg))*100/(niter+2.0)))\n\t\t\tptcl=get_image(images,i,normproc)\t\t\t\t\t# get the particle to align\n\t\t\tali=align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp) # align to reference\n\t\t\tsim=ali.cmp(scmp[0],ref,scmp[1])\t\t\t# compare similarity to reference (may use a different cmp() than the aligner)\n\t\t\tif saveali and it==niter : ali.write_image(\"aligned.hdf\",-1)\n\n\t\t\ttry: use=ptcl_info[i][2]\n\t\t\texcept: use=1\n\t\t\tif use :\n\t\t\t\tavgr.add_image(ali)\t\t\t\t# only include the particle if we've tagged it as good\n\t\t\t\tif verbose>1 :\n\t\t\t\t\tsys.stdout.write(\".\")\n\t\t\t\t\tsys.stdout.flush()\n\t\t\telif verbose>1:\n\t\t\t\tsys.stdout.write(\"X\")\n\t\t\t\tsys.stdout.flush()\n\t\t\tptcl_info[i]=(sim,ali[\"xform.align2d\"],use)\n\n\t\tif verbose>1 : print \"\"\n\n\t\tref=avgr.finish()\n\t\tref[\"class_ptcl_qual\"]=mean\n\t\tref[\"class_ptcl_qual_sigma\"]=sigma\n\n\t\t# A little masking before the next iteration\n\t\tgmw=max(5,ref[\"nx\"]/12)\t\t# gaussian mask width\n\t\tref.process_inplace(\"normalize.circlemean\",{\"radius\":ref[\"nx\"]/2-gmw})\n\t\tif automask :\n\t\t\tref.process_inplace(\"mask.auto2d\",{\"nmaxseed\":10,\"nshells\":gmw-2,\"nshellsgauss\":gmw,\"sigma\":0.2})\n\t\telse :\n\t\t\tref.process_inplace(\"mask.gaussian\",{\"inner_radius\":ref[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\n\tif ref_orient!=None :\n\t\tref[\"xform.projection\"]=ref_orient\n\t\tref[\"model_id\"]=ref_model\n\treturn [ref,ptcl_info]", "def similarityMetric(Est, GT, options):\n\n if options == None:\n options = {}\n if not 'metric' in options:\n options['metric'] = 'basic'\n\n#########################################################\n## YOU MUST REMOVE THE REST OF THE CODE OF THIS FUNCTION\n## AND CHANGE FOR YOUR OWN CODE\n#########################################################\n comptador = 0\n if options['metric'].lower() == 'basic':\n for i in Est:\n if i in GT[1]:\n comptador = comptador + 1\n return comptador / len(Est)\n\n else:\n return 0", "def enrichment_score(query_genes, term_genes, background_genes):\n n11 = np.intersect1d(query_genes, term_genes).size\n n12 = query_genes.size - n11\n n21 = term_genes.size - n11\n n22 = background_genes.size - (n11 + n12 + n21)\n contmatrix = np.array([[n11, n12], [n21, n22]])\n odds_ratio, p_values = stats.fisher_exact(contmatrix, alternative='greater')\n return odds_ratio, p_values", "def getAverage(self):\n return sum(self.scores) / len(self.scores)", "def average_strain_data(features, metadata, groups_column='gene_name'):\n\n meta_cols = metadata.columns.to_list() \n data = pd.concat([metadata[groups_column], features], axis=1)\n mean_data = data.groupby(groups_column).mean()\n df = metadata.merge(mean_data, how='right', on=groups_column)\n df = df.groupby(groups_column).first().reset_index()\n \n feat = df[[c for c in df.columns if c not in meta_cols]]\n meta = df[meta_cols]\n \n return feat, meta", "def computeOverallScore(self,m):\n \n def _computeOverallScore(scalars):\n \"\"\"Given a netCDF4 group of scalars, blend them into an overall score\"\"\"\n scores = {}\n variables = [v for v in scalars.variables.keys() if \"Score\" in v and \"Overall\" not in v]\n for region in self.regions:\n overall_score = 0.\n sum_of_weights = 0.\n for v in variables:\n if region not in v: continue\n score = v.replace(region,\"\").strip()\n weight = 1.\n if self.weight.has_key(score): weight = self.weight[score]\n overall_score += weight*scalars.variables[v][...]\n sum_of_weights += weight\n overall_score /= max(sum_of_weights,1e-12)\n scores[\"Overall Score %s\" % region] = overall_score\n return scores\n\n fname = os.path.join(self.output_path,\"%s_%s.nc\" % (self.name,m.name))\n if not os.path.isfile(fname): return\n with Dataset(fname,mode=\"r+\") as dataset:\n datasets = [dataset.groups[grp] for grp in dataset.groups if \"scalars\" not in grp]\n groups = [grp for grp in dataset.groups if \"scalars\" not in grp]\n datasets.append(dataset)\n groups .append(None)\n for dset,grp in zip(datasets,groups):\n if \"scalars\" in dset.groups:\n scalars = dset.groups[\"scalars\"]\n score = _computeOverallScore(scalars)\n for key in score.keys():\n if key in scalars.variables:\n scalars.variables[key][0] = score[key]\n else:\n Variable(data=score[key],name=key,unit=\"1\").toNetCDF4(dataset,group=grp)", "def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga", "def global_average_scores(self):\n\n return np.mean(self.average_scores_all_subjects(), axis=0)", "def aver_score(datalist):\n scores_per_position = []\n \n for tupl in datalist:\n count = 0\n sum_of_position = 0\n for element in tupl[3]:\n sum_of_position += element\n count +=1\n aver_pos = sum_of_position/ count\n scores_per_position += [aver_pos]\n \n return scores_per_position", "def find_avg(centroids, short_cut=False, sim_scores=None):\n \n total_sim = 0.0\n total_comparisons = 0\n \n if short_cut:\n total_comparisons = len(sim_scores)\n \n for score in sim_scores:\n total_sim += score\n \n return (total_sim / total_comparisons)\n\n length = len(centroids)\n\n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n total_sim += similarity(centroids[i], centroids[j])\n total_comparisons += 1\n\n return (total_sim / total_comparisons)", "def cal_hit_gbratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n #print({d['user'].iloc[0]:d['ratings'].to_list() for i,d in top_k.groupby('user')})\n score = 0.0\n # golden items hit in the top_K items\n score_1 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)]) for i,d in top_k.groupby('user')}\n score_2 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)]) for i,d in top_k.groupby('user')} \n score_ratio = [(score_1[d]-score_2[d]/self._test_ratings[d]) if self._test_ratings[d]!=0 else 0 for d in self._test_ratings.keys()]\n\n #print(np.mean(score_ratio))\n #print(score_1)\n #score = score_1 + score_2\n return np.mean(score_ratio)", "def print_avg():", "def avg_rows(rows):\n if len(rows) == 1:\n return rows[0]\n\n agg_vals = {k: [rows[j][k] for j in range(len(rows))] for k in rows[0].keys()}\n return {k: (reduce(np.add, v) / len(v)) for k, v in agg_vals.items()}", "def get_average_MAE(true_pred_df): \n age_group = true_pred_df.groupby('y_true')\n \n mae_average = []\n for age, age_data in age_group:\n mae_average.append(np.mean(age_data.mae))\n \n return mae_average", "def lof_sig_scores(table, samples, verbose=True):\n mut_probdam = 'Missense:Probably'\n mut_syn = 'Synonymous'\n mut_trunc = ['Nonsense', 'Frameshift', 'Splice-site']\n mut_other = ['Missense:Benign', 'Missense:Possibly', 'MissenseNA', 'Indel']\n mut_all = [mut_probdam, mut_syn] + mut_trunc + mut_other\n\n # Calculate the global nonsynonymous:synonymous ratio ---------------------\n # Within each mutation category, sum counts (across all genes)\n tot_count_probdam = sum(table[mut_probdam])\n tot_count_syn = sum(table[mut_syn])\n tot_count_trunc = sum(itertools.chain(*(list(table[col])\n for col in mut_trunc)))\n tot_count_other = sum(itertools.chain(*(list(table[col])\n for col in mut_other)))\n\n # Global mutation count across all categories and genes (= 3504)\n tot_count_all = sum((tot_count_probdam, tot_count_syn, tot_count_trunc,\n tot_count_other))\n if verbose:\n print(\"Counted\", tot_count_all, \"mutations across\", len(table), \"genes\",\n \"and\", len(samples), \"samples\", file=sys.stderr)\n\n # Fraction of global mutations in each category of interest\n tot_frac_probdam = tot_count_probdam / tot_count_all\n tot_frac_syn = tot_count_syn / tot_count_all\n tot_frac_trunc = tot_count_trunc / tot_count_all\n\n # Global nonsynonymous:synonymous ratio = (1-syn)/syn (= 2.13697)\n tot_ns_s_ratio = (1 - tot_frac_syn) / tot_frac_syn\n\n # Calculate each gene's mutation score ------------------------------------\n for _idx, row in table.iterrows():\n gene_count_all = sum([row[col] for col in mut_all])\n if not gene_count_all:\n # Gene is not mutated at all --> zero score\n yield (row['Gene'], 0.0)\n continue\n\n # Initial score is the sum the 'Normalized' values across all samples\n raw_score = sum(row[sid] for sid in samples)\n\n # Adjust for NS:S ratio\n gene_count_syn = row[mut_syn]\n syn_factor = max(1 - tot_ns_s_ratio * gene_count_syn / gene_count_all,\n 0)\n new_score = raw_score * syn_factor\n\n # Adjust for \"probably damaging\" missense and truncating mutations\n gene_frac_probdam = row[mut_probdam] / gene_count_all\n probdam_factor = 1 + gene_frac_probdam - tot_frac_probdam\n gene_frac_trunc = sum([row[col] for col in mut_trunc]) / gene_count_all\n trunc_factor = gene_frac_trunc / tot_frac_trunc\n final_score = new_score * probdam_factor * trunc_factor\n yield (row['Gene'], final_score)", "def average(grade1, grade2, grade3):\n return (grade1 + grade2 + grade3) / 3", "def score_game(game_core):\n \n att_counter = [] \n np.random.seed(1) # fix RANDOM SEED so the experiment is reproducible \n random_array = np.random.randint(1,101, size=(1000))\n for number in random_array:\n att_counter.append(game_core(number))\n score = int(np.mean(att_counter))\n print(f\"Your algorithm guesses on average the number in {score} attempts.\")\n return(score)", "def get_metrics(cfg, model, X_anchor, y_anchor, X_gal, y_gal, annoy_index, vec_dim):\n rank10_acc = 0\n rank5_acc = 0\n rank1_acc = 0\n avg_acc = 0\n vote_res = 0\n\n l2 = []\n for anchor in range(0, len(X_anchor)):\n res = get_result(get_image_features(cfg, model, X_anchor[anchor]), annoy_index)\n vote = defaultdict(int)\n # Accuracy\n correct = 0\n for i in res[:10]:\n vote[y_gal[i]] += 1\n\n max_key = max(vote, key=vote.get)\n if max_key == y_anchor[anchor]:\n vote_res += 1\n \n\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1 \n\n avg_acc += correct/len(res)\n\n # Mean Average Precision\n l1 = []\n for recomm in res[:10]:\n if y_gal[recomm] == y_anchor[anchor]:\n correct += 1\n l1.append(1)\n else:\n l1.append(0)\n l2.append(l1) \n\n # Rank10 Accuracy\n for each_val in res[:10]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank10_acc += 1\n break\n \n # Rank5 Accuracy\n for each_val in res[:5]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank5_acc += 1\n break\n\n # Rank1 Accuracy\n for each_val in res[:1]:\n if y_gal[each_val] == y_anchor[anchor]:\n rank1_acc += 1\n break\n\n print(\"Avg acc is :: {avg_acc}\".format(avg_acc = avg_acc/len(X_anchor)))\n print(\"Rank 10 acc is :: {rank10_acc}\".format(rank10_acc = rank10_acc/len(X_anchor)))\n print(\"Rank 5 acc is :: {rank5_acc}\".format(rank5_acc = rank5_acc/len(X_anchor)))\n print(\"Rank 1 acc is :: {rank1_acc}\".format(rank1_acc = rank1_acc/len(X_anchor)))\n print(\"Mean Avg Precision is :: {mAP}\".format(mAP=mean_average_precision(l2)))\n print(\"Vote res :: \", vote_res/len(X_anchor))\n\n return rank1_acc/len(X_anchor), mean_average_precision(l2)", "def average_match(e, l, ignore_self=False):\n # filter out ourself as this will give us a nice 1.0 value\n # the problem with this is if we are a list of 1, anything will have a better value!\n if ignore_self:\n l = [x for x in l if x != e]\n if len(l) == 0:\n return 0\n values = [pairwise[e][x] for x in l]\n return 1.0*sum(values)/len(values)", "def summaries(e_dict, m_dict):\n for key, value in m_dict.items():\n e_dict[key].append(np.mean(value))\n return e_dict", "def average_grade(lst):\r\n res = []\r\n for stdnt in lst:\r\n name, avg = stdnt[0], mean(conv_to_num(stdnt[1:]))\r\n res.append([name, avg])\r\n\r\n\r\n return(res)", "def calculate_metric(self, distance_matrix):\n ap_scores = []\n for node_id in range(len(distance_matrix)):\n sorted_nodes = np.argsort(distance_matrix[node_id]).tolist()\n neighs = self.neighbors[node_id]\n n_correct = 0.0\n precisions = []\n for i in range(1, len(sorted_nodes)):\n if sorted_nodes[i] in neighs:\n n_correct += 1\n precisions.append(n_correct / i)\n if n_correct == len(neighs):\n break\n\n ap_scores.append(np.mean(precisions))\n\n return np.mean(ap_scores)", "def average_kappa_for_group(db, groupId):\n documents = db.documents.find({'groupId': groupId})\n kappas = []\n for document in documents:\n if document_has_annotations(db, document['_id']) and document_has_numbers(db, document['_id']):\n kappas.append(get_kappa_for_document(db, document['_id']))\n return sum(kappas)/float(len(kappas))", "def getAvg(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['avg'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / self.pair.data['avg']", "def score_all_genes(self):\n scores = pd.DataFrame(self.clf.predict_proba(self.dataset), index=self.dataset.index)[1]\n scores = pd.DataFrame(scores).sort_values(1, ascending=False)\n scores['known'] = [int(g in list(self.M.befree_genes + self.M.curated_genes + self.M.sven_genes)) for g in scores.index]\n scores.columns = ['score', 'known']\n scores.to_csv(self.save_path + '/all_gene_scores.csv')\n\n predictions = pd.DataFrame(self.clf.predict(self.dataset), index=self.dataset.index)\n predictions['known'] = [int(g in list(self.M.befree_genes + self.M.curated_genes + self.M.sven_genes)) for g in predictions.index]\n predictions.to_csv(self.save_path + '/predictions.csv')", "def get_gpa(scores):\n subjects_gpas = []\n for score in scores:\n subjects_gpas.append(calculate_gpa(score))\n gpa = get_average(subjects_gpas)\n return gpa", "def get_averages(self):\t\n\t\t\n\t\taverages = {}\n\t\tfor subject in self.grades.iterkeys():\n\t\t\taverages[subject] = float(sum(self.grades[subject])) / len(self.grades[subject])\n\t\treturn averages", "def average_grades(grades):\r\n\r\n\tfor key, value in grades.items(): # iterate through the dictionary for key and value\r\n\t\tgrades[key] = sum(value)/len(value) # average of the value\r\n\r\n\treturn (grades) #return grades\r", "def avg_by_elem(p):\n p, num = p\n avg = map(lambda x:x/num, p)\n return tuple(avg)", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def average(X, ebunch):\n edge_embeds = np.zeros((len(ebunch), len(X[list(X.keys())[0]])))\n i = 0\n for edge in ebunch:\n edge_embeds[i] = (X[str(edge[0])] + X[str(edge[1])]) / 2.0\n i += 1\n return edge_embeds", "def compute_evidence_weighted_aggregated_veracity_score(\n gold: Dict[Tuple[int, str], Dict],\n pred: Dict[Tuple[int, str], Dict],\n elementwise_evidence_f1: Dict[Tuple[int, str], float],\n elementwise_evidence_f1_corrected: Dict[Tuple[int, str], float],\n) -> Dict:\n\n accuracies_passages: List[float] = []\n f1_scores_evidence: List[float] = []\n f1_scores_corrected_evidence: List[float] = []\n\n keys: List[Any] = list(gold.keys())\n\n for key in keys:\n gold_sample: Dict = gold[key]\n pred_sample: Dict = pred[key]\n\n gold_passage_label: str = gold_sample['labels']['passage']\n predicted_passage_label: str = pred_sample['predicted']\n\n accuracies_passages.append(get_instance_accuracy(gold_passage_label, predicted_passage_label))\n f1_scores_evidence.append(elementwise_evidence_f1[key])\n f1_scores_corrected_evidence.append(elementwise_evidence_f1_corrected[key])\n\n return {\n 'ev_weighted_accuracy': np.mean(np.array(accuracies_passages) * np.array(f1_scores_evidence)),\n 'ev_weighted_accuracy_corrected': np.mean(\n np.array(accuracies_passages) * np.array(f1_scores_corrected_evidence)\n )\n }", "def compute_metrics(self):\n overall_ret = OrderedDict()\n for ap_iou_thresh in self.ap_iou_thresh:\n ret_dict = OrderedDict()\n rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh)\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n ret_dict[\"%s Average Precision\" % (clsname)] = ap[key]\n ap_vals = np.array(list(ap.values()), dtype=np.float32)\n ap_vals[np.isnan(ap_vals)] = 0\n ret_dict[\"mAP\"] = ap_vals.mean()\n rec_list = []\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n try:\n ret_dict[\"%s Recall\" % (clsname)] = rec[key][-1]\n rec_list.append(rec[key][-1])\n except:\n ret_dict[\"%s Recall\" % (clsname)] = 0\n rec_list.append(0)\n ret_dict[\"AR\"] = np.mean(rec_list)\n overall_ret[ap_iou_thresh] = ret_dict\n return overall_ret", "def grade(population, targetSum, targetProduct):\n summed = reduce (add,(fitness(x, targetSum, targetProduct) for x in population), 0 )\n return summed / len(population)", "def geo_mean(num_list):\n np_array = np.array(num_list)\n return np_array.prod() ** (1.0 / len(np_array))", "def mean_average_position():\n pass", "def _score_for_model(meta):\n mean_acc = list()\n pipes = meta[\"pipeline\"]\n acc = meta[\"accuracy\"]\n if \"tagger\" in pipes:\n mean_acc.append(acc[\"tags_acc\"])\n if \"morphologizer\" in pipes:\n mean_acc.append((acc[\"morphs_acc\"] + acc[\"pos_acc\"]) / 2)\n if \"parser\" in pipes:\n mean_acc.append((acc[\"uas\"] + acc[\"las\"]) / 2)\n if \"ner\" in pipes:\n mean_acc.append((acc[\"ents_p\"] + acc[\"ents_r\"] + acc[\"ents_f\"]) / 3)\n if \"textcat\" in pipes:\n mean_acc.append(acc[\"textcat_score\"])\n if \"senter\" in pipes:\n mean_acc.append((acc[\"sent_p\"] + acc[\"sent_r\"] + acc[\"sent_f\"]) / 3)\n return sum(mean_acc) / len(mean_acc)", "def ensemble_mean(self):\n return self.mean(dim='mem')", "def calculate_score(self, edge_list):\n embs = np.array(\n [[self.emb[source], self.emb[target]] for source, target in edge_list]\n )\n\n if self.proximity_function == \"dot\":\n score_list = [\n np.dot(source_emb, target_emb) for source_emb, target_emb in embs\n ]\n elif self.proximity_function == \"cos\":\n score_list = cosine_similarity(embs[:, 0], embs[:, 1])\n\n return score_list", "def calculate_transformation_score(self, aggregated, measure):\n original_data_count = self.data_dict[measure][\"distinct_enum\"]\n aggregated_data_count = len(aggregated)\n score = 1 - aggregated_data_count/original_data_count\n return score", "def get_score(location, grid, shape):", "def compute_each_score(word_embeddings, each_id_pair): # without weighting scheme\n emb1 = word_embeddings[each_id_pair[0], :]\n emb2 = word_embeddings[each_id_pair[1], :]\n inn = np.inner(emb1, emb2)\n # print('inner product is {}'.format(inn))\n emb1norm = np.sqrt(np.inner(emb1, emb1))\n # print('emb1norm is {}'.format(emb1norm))\n emb2norm = np.sqrt(np.inner(emb2, emb2))\n # print('emb2norm is {}'.format(emb2norm))\n each_pair_score = inn / emb1norm / emb2norm\n # print('each score is {}\\n'.format(each_pair_score))\n return each_pair_score", "def compute_ensemble(fopen_list, var_list, range_list, indicesOnCluster, maxIndices, indicesToParticle): #{{{\n\n rlzn_ensmbl = compute_rlzn_ensemble(fopen_list, var_list, range_list)\n\n # average is over 2nd dimension (if it exists), since first is time\n #ensmbl = compute_cluster_ensemble(rlzn_ensmbl, indicesOnCluster, maxIndices, indicesToParticle)\n\n ensmbl = rlzn_ensmbl\n\n return ensmbl #}}}", "def compute_average_separability_score(self) -> Dict:\n avg_sep_score = {}\n for class_pair_key, class_pair_val in self.separability_scores.items():\n avg_sep_score[class_pair_key] = np.mean(np.array([val for _, val in class_pair_val.items()]))\n avg_sep_score['agg_with_risk'] = sum(\n np.array([val for _, val in avg_sep_score.items()]) *\n RISK\n ) \n avg_sep_score['agg'] = sum([val for key, val in avg_sep_score.items() if type(key)==int]) \n return avg_sep_score", "def load_average(self):\n return _favg(self.load_samples)", "def store_overall_means(src_file: H5File) -> None:\n perp_sum = 0\n par_sum = 0\n ref_sum = 0\n counts = 0\n for path in rawnav.pump_group_paths(src_file):\n perp_path = path + '/perp'\n par_path = path + '/par'\n ref_path = path + '/ref'\n perp_sum += src_file[perp_path].attrs['mean']\n par_sum += src_file[par_path].attrs['mean']\n ref_sum += src_file[ref_path].attrs['mean']\n counts += 1\n src_file.attrs['perp_mean'] = perp_sum / counts\n src_file.attrs['par_mean'] = par_sum / counts\n src_file.attrs['ref_mean'] = ref_sum / counts\n return", "def _rouge_score_compute(sentence_results: Dict[str, List[Tensor]]) ->Dict[str, Tensor]:\n results: Dict[str, Tensor] = {}\n if sentence_results == {}:\n return results\n for rouge_key, scores in sentence_results.items():\n results[rouge_key] = torch.tensor(scores).mean()\n return results", "def average(data):\n return np.average(data)", "def top_1_avg(model_topic_labels):\n blog_model_sum = 0\n book_model_sum = 0\n news_model_sum = 0\n pubmed_model_sum = 0\n blog_gold_sum = 0\n book_gold_sum = 0\n news_gold_sum = 0\n pubmed_gold_sum = 0\n\n for topic_id, labels in model_topic_labels.items():\n top_model_label = labels[0]\n model_score = gold_topic_labels[topic_id][top_model_label]\n gold_score = max(gold_topic_labels[topic_id].values())\n if topic_id < N_blogs:\n blog_model_sum += model_score\n blog_gold_sum += gold_score\n elif topic_id < N_blogs + N_books:\n book_model_sum += model_score\n book_gold_sum += gold_score\n elif topic_id < N_blogs + N_books + N_news:\n news_model_sum += model_score\n news_gold_sum += gold_score\n else:\n pubmed_model_sum += model_score\n pubmed_gold_sum += gold_score\n\n top1avg_blogs, upper_bound_blogs = blog_model_sum / N_blogs, blog_gold_sum / N_blogs\n top1avg_books, upper_bound_books = book_model_sum / N_books, book_gold_sum / N_books\n top1avg_news, upper_bound_news = news_model_sum / N_news, news_gold_sum / N_news\n top1avg_pubmed, upper_bound_pubmed = pubmed_model_sum / N_pubmed, pubmed_gold_sum / N_pubmed\n\n return (top1avg_blogs, upper_bound_blogs, top1avg_books, upper_bound_books, \\\n top1avg_news, upper_bound_news, top1avg_pubmed, upper_bound_pubmed)", "def residue_pair_energy(self, res1, res2, pose, sf, emap):\n\t\tpose = pose\n\t\temv = EMapVector()\n\t\tsf_pair.eval_ci_2b(res1,res2,pose,emv)\n\t\tweighted_score = -1*(emv[pair]*sec_struct_weight[pose.secstruct()[res1.seqpos()-1]]*sec_struct_weight[pose.secstruct()[res2.seqpos()-1]])\n\t\temap.set(self.scoreType, weighted_score)", "def _get_mean(self):\n return (0.485, 0.456, 0.406)", "def calculate_all_cycle_gan_metrics(nat_a, nat_b, gen_a, gen_b, cyc_a, cyc_b, combined_results=True):\n score1 = calculate_single_cycle_gan_metrics(gen_a, nat_a) # space A: generated vs. true data\n score2 = calculate_single_cycle_gan_metrics(gen_b, nat_b) # space B: generated vs. true data\n score3 = calculate_single_cycle_gan_metrics(cyc_a, nat_a) # space A: cyclic generated vs. true data\n score4 = calculate_single_cycle_gan_metrics(cyc_b, nat_b) # space B: cyclic generated vs. true data\n if not combined_results:\n print(score1)\n print(score2)\n print(score3)\n print(score4)\n return [score1, score2, score3, score4]\n # mean_score = [(w + x + y + z) / 4 for w, x, y, z in zip(score1, score2, score3, score4)]\n mean_score = [(x + y) / 2 for x, y in zip(score1, score2)]\n return mean_score", "def average(self):\n return self.summation() / self.count()", "def mape(x, y):\n return statistics.mean(ape(x, y))", "def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)", "def avgmu(self):\n if self._dataframe is DataframeEnum.SkimmedNtuple:\n return self._event.averageIntPerXing\n elif self._dataframe is DataframeEnum.PhysVal:\n return self._event.avgmu\n else:\n self._logger.warning(\"Impossible to retrieve the value of avgmu. Unknow dataframe.\")", "def mean(xs):\n ave = 0\n for xs_split in xs:\n num = float(xs_split)\n print(xs_split)\n ave = ave+num\n average = ave/len(xs)\n return average", "def aver_and_var(self):\n # assert not self.is_empty\n\n for axis in range(3):\n c1, c2 = self.bounds[axis]\n w = self.n_pix_partial[axis]\n aver = np.average(np.arange(c1, c2), weights=w)\n var = np.average(np.arange(c1, c2)**2, weights=w) - aver ** 2 # D = E(X^2) - (EX)^2\n yield aver, var", "def ensembleMethod(tool_list, score_dict, dir, args):\n\n chr_list = score_dict[score_dict.keys()[0]].keys()\n peaks = []\n scores = []\n\n print len(tool_list)\n for chr in chr_list:\n print dir\n print args\n print chr_list\n print score_dict\n #print tool\n add_peak, add_score = stacking_peaks(dir, args, tool_list, score_dict, chr=chr)\n\n for peak in add_peak:\n peaks.append(peak)\n\n for score in add_score:\n scores.append(score)\n ensemble.ensembler(peaks, scores, len(tool_list))\n peaks = []\n score = []\n print peaks, scores\n\n print len(peaks), len(scores)", "def _compute_cluster_averages(self, key=\"_scvi_labels\"):\n # find cell label column\n label_col = self.adata.uns[\"_scvi\"][\"categorical_mappings\"][key][\"original_key\"]\n\n # find data slot\n x_dict = self.adata.uns[\"_scvi\"][\"data_registry\"][\"X\"]\n if x_dict[\"attr_name\"] == \"X\":\n use_raw = False\n else:\n use_raw = True\n if x_dict[\"attr_name\"] == \"layers\":\n layer = x_dict[\"attr_key\"]\n else:\n layer = None\n\n # compute mean expression of each gene in each cluster/batch\n aver = compute_cluster_averages(self.adata, labels=label_col, use_raw=use_raw, layer=layer)\n\n return aver", "def get_average_grade_of_students(students):\n total_grade = 0\n for row in students:\n total_grade += int(row[5])\n return total_grade/len(students)", "def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean", "def _compute_mean(index, M, R, rake):\r\n mean = (a1[index] + _compute_linear_magnitude_term(index, M) + _compute_quadratic_magnitude_term(index, M) +\r\n _compute_logarithmic_distance_term(index, M, R) + _compute_faulting_style_term(index, rake))\r\n\r\n return mean", "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def _compute_mean(self, C, mag, rjb, rake):\n mean = (C['a1'] +\n self._compute_linear_magnitude_term(C, mag) +\n self._compute_quadratic_magnitude_term(C, mag) +\n self._compute_logarithmic_distance_term(C, mag, rjb) +\n self._compute_faulting_style_term(C, rake))\n\n return mean", "def score_samples(self, X):\n ..." ]
[ "0.6643462", "0.6452144", "0.62165403", "0.6171676", "0.60874385", "0.60116637", "0.5967243", "0.59489244", "0.5940046", "0.5897672", "0.5886396", "0.5863131", "0.584357", "0.58260745", "0.58122647", "0.58121586", "0.5747593", "0.5738639", "0.5724953", "0.5721815", "0.57199776", "0.570864", "0.5702405", "0.56995976", "0.56905186", "0.56877804", "0.56819993", "0.56817514", "0.56653893", "0.56653893", "0.56653893", "0.56653893", "0.56653893", "0.56632483", "0.5654815", "0.5646385", "0.5640165", "0.5632765", "0.5623213", "0.5607792", "0.5601835", "0.5597576", "0.55950534", "0.5590095", "0.55856407", "0.5585531", "0.557714", "0.55765927", "0.55750054", "0.55656004", "0.55582917", "0.55564654", "0.55472535", "0.554379", "0.55346465", "0.5533818", "0.55287427", "0.55086046", "0.5504831", "0.55041414", "0.5499812", "0.5495047", "0.54948795", "0.54927826", "0.5485351", "0.54783756", "0.54725546", "0.54702413", "0.5467311", "0.5457988", "0.5443377", "0.543837", "0.54346925", "0.54273576", "0.54258513", "0.5425638", "0.5413414", "0.5409738", "0.5400351", "0.53924626", "0.5391411", "0.53878754", "0.53818554", "0.5381808", "0.53751934", "0.53705645", "0.53643835", "0.53643507", "0.5361814", "0.5357494", "0.5355651", "0.5355132", "0.53474903", "0.5345478", "0.53439474", "0.5343529", "0.5337021", "0.5337021", "0.5335716", "0.5328702", "0.53286165" ]
0.0
-1
Determine relevant entries in crkeng.xml and build a smaller xml file for testing.
def build_test_xml(): crkeng_file_path = find_latest_xml_file(shared_res_dir / "dictionaries") print(f"Building test dictionary files using {crkeng_file_path.name}") crkeng_root = ET.parse(str(crkeng_file_path)).getroot() # relevant entries in crkeng.xml file we want to determine relevant_xml_ls: Set[str] = set() xml_ls: Set[str] = set() crkeng_entries = crkeng_root.findall(".//e") for element in crkeng_entries: xml_l = extract_l_str(element) xml_ls.add(xml_l) test_words = get_test_words() print(f"Analyzing xml l elements and test words") word_to_analyses = morphodict.analysis.relaxed_analyzer().bulk_lookup( xml_ls | test_words ) print("Analysis done") test_word_lemmas: Set[str] = set() for test_word in test_words: for analysis in word_to_analyses[test_word]: lemma = fst_analysis_parser.extract_lemma(analysis) if lemma is None: logger.warn( "Skipping test word: %s. " "Could not extract lemma from its analysis: %s", test_word, analysis, ) continue test_word_lemmas.add(lemma) for xml_l in tqdm(xml_ls, desc="screening relevant entries in crkeng.xml"): if xml_l in test_words: relevant_xml_ls.add(xml_l) continue for xml_l_analysis in word_to_analyses[xml_l]: xml_lemma = partition_analysis(xml_l_analysis)[1] for test_word_lemma in test_word_lemmas: if test_word_lemma == xml_lemma: relevant_xml_ls.add(xml_l) break relevant_crkeng_entries = [] for element in crkeng_entries: xml_l = extract_l_str(element) if xml_l in relevant_xml_ls: relevant_crkeng_entries.append(element) crkeng_xml_utils.write_xml_from_elements( list(crkeng_root.findall(".//source")) + relevant_crkeng_entries, shared_res_dir / "test_dictionaries" / "crkeng.xml", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn", "def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False", "def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()", "def makexmlfunc(healpix,ra,dec,week1,week2,distance):\n\t\n\tif week1!=week2:\n\t\tidentity=\"%06d_%d_%d_w%03d_w%03d\" %(healpix,ra,dec,week1,week2)\n\t\tltcube=\"%s/lat_ltcube_weekly_w%03d_w%03d_p203_v001.fits\" %(cfg.home,week1,week2)\n\t\tspacecraft=\"%s/w%03d_w%03d_newspacecraft.fits\" %(cfg.ispace,week1,week2)\n\telse:\n\t\tidentity=\"%06d_%d_%d_w%03d\" %(healpix,ra,dec,week1)\n\t\tltcube=\"%s/lat_spacecraft_weekly_w%03d_p203_v001_ltcube.fits\" %(cfg.home,week1)\n\t\tspacecraft=\"%s/lat_spacecraft_weekly_w%03d_p202_v001.fits \" %(cfg.ispace,week1)\n\n\tregion_filtered=\"%s_region_filtered_gti.fits\" %(identity)\n\tfermisources=\"%s_fermisources_model.xml\" %(identity)\n\tinputmodel=\"%s_input_model.xml\" %(identity)\n\tfermis=\"%s_fermis.xml\" %identity\n\tresponse=\"P7REP_SOURCE_V15\"\n\tmakexmllog=\"%s_output_makexml.log\" %identity\n\tglobal extendedsource\n\tglobal numberofextendedsources\n\textendedlog=\"%s_number_of_extendedsources.log\" %identity\n\tExtendedList=\"ExtendedList.txt\"\n\tOthersList=\"OthersList.txt\"\n\n\t\n\twith open (makexmllog,'r') as outputFile: #opens the makexmllog file from makesyfunc. This document contains info about the extended sources.\n\t\t\n\t\tfor line in outputFile:\n\t\t\t\n\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\tif line.startswith('Added')==True:\n\t\t\t\t\ta,b=line.split('and ')\t\n\t\t\t\t\tb1,b2,b3=b.split(' ')\n\t\t\t\t\n\t\t\t\t\tnumberofextendedsources=int(b1) #b1 is the number of extended sources\n\toutputFile.close()\n\toutputFile=open(inputmodel, 'w')\n\tprint numberofextendedsources\n\n\tif numberofextendedsources==1: #if there is an extended source\n\t\twith open (makexmllog,'r') as outputFile:\n\t\t\n\t\t\tfor line in outputFile:\n\t\t\t\n\t\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\t\tif line.startswith('Extended')==True:\n\t\t\t\t\t\tprint line\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tc,d=line.split(' in')\n\t\t\t\t\t\n\t\t\t\t\t\tc1,c2,c3,c4=c.split(' ')\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\textendedsource=str(c3) #extracts the name of the extended source from makexmllog\n\t\n\n\t\t\n\n\n\t\toutputFile.close()\t\n\n\n\t\n\n\t\twith open(\"%s\" %fermisources) as thefile: #opens the xml file that was created from makesyfunc\n\t\t\tfor line in thefile:\n\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==True:\n\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tspecial=str.replace(line,'%s.fits'%extendedsource,'%s/%s.fits' %(cfg.homesy,extendedsource)) \n\t\t\t\t\tprint special #replace with the correct path to the extendedsource(Templates folder)\n\t\t\t\n\t\t\t\t\tspecial1=str.replace(special,'type=\"SpatialMap\"','type=\"SpatialMap\" map_based_integral=\"true\"')\n\t\t\t\t\tprint special1 #instruction from fermi tutorial, you must add map_based...\n\t\t\t\t\toutputFile=open(fermis, 'w') #write to fermis, the original xml with the right path to the extended source\n\t\t\t\t\twith open(\"%s\" %fermisources,'r') as infile:\n\t\t\t\t\t\tfor line in infile:\n\t\t\t\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==False:\n\t\t\t\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutputFile.write(special1)\n\t\t\t\t\toutputFile.close()\n\t\t\t\t\t\t\t\t\t\n\n\n\t\t\t\n\t\toutputFile=open(inputmodel, 'w') #final xml file. contains the right path and the source info of \"your\" source.\n\t\twith open(fermis,'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\t\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\t\t\t\n\n\t\toutputFile.close()\n\t\n\t\twith open(\"%s_diffrsp.log\" % (identity), 'w') as outsyputFile: #run diffrsp if you have an extended source.\n\t\t\tsubprocess.call(['%s' %(cfg.pythoncommand),'gtdiffrsp.py', '%s' %(region_filtered),'%s' %(spacecraft), '%s' %inputmodel, '%s' %(response),'%s' %identity ],stdout=outsyputFile)\n\t\t\t\n\t\twith open(ExtendedList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\t\t\t\t\n\tif numberofextendedsources==0: #if there is no extended source\n\t\toutputFile=open('%s' %(inputmodel), 'w') #write to inputmodel, \"your\" source\n\t\twith open('%s' %(fermisources),'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\n\t\t\t\n\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\toutputFile.close()\n\tif numberofextendedsources>1:\n\t\twith open(OthersList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\n\tif numberofextendedsources==1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\\n\\\n \t%s\"%(numberofextendedsources,extendedsource))\n\t\toutsyputFile.close()\n\n\tif numberofextendedsources !=1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\" %(numberofextendedsources))\n\t\toutsyputFile.close()", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n # sprint(PrettyFormatAny.form(self.m_root_xml, 'A3-01-A - Entire Xml'))\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n # print(PrettyFormatAny.form(self.m_xml.controller_sect, 'A3-01-B - All Controllers Xml'))\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')\n # print(PrettyFormatAny.form(self.m_xml.controller, 'A3-01-C - First Controller Xml'))", "def generate_xml(self, locations):\n\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n xmlroot = self.root\n kernel = Kerneladapter()\n\n for locname in locations:\n xml_location = ET.SubElement(xmlroot, 'location')\n location = kernel.location_info(locname)\n ET.SubElement(xml_location, \"location\").text = unicode(locname)\n ET.SubElement(xml_location, \"height\").text = unicode(location['height'])\n ET.SubElement(xml_location, \"attributes\").text = unicode(location['attributes'])\n ET.SubElement(xml_location, \"floorlevel\").text = unicode(location['floorlevel'])\n ET.SubElement(xml_location, \"preference\").text = unicode(location['preference'])\n ET.SubElement(xml_location, \"info\").text = unicode(location['info'])\n ET.SubElement(xml_location, \"reserved_for\").text = unicode(location['reserved_for'])\n\n for mui in location['allocated_by']:\n unit = kernel.unit_info(mui)\n xml_unit = ET.SubElement(xml_location, \"unit\")\n ET.SubElement(xml_unit, \"mui\").text = unicode(unit['mui'])\n ET.SubElement(xml_unit, \"quantity\").text = unicode(unit['quantity'])\n ET.SubElement(xml_unit, \"artnr\").text = unicode(unit['product'])\n ET.SubElement(xml_unit, \"height\").text = unicode(unit['height'])\n ET.SubElement(xml_unit, \"pick_quantity\").text = unicode(unit['pick_quantity'])\n ET.SubElement(xml_unit, 'created_at').text = unit['created_at'].strftime('%Y-%m-%d %H:%M:%S')\n ET.SubElement(xml_unit, \"movements\").text = unicode(unit['movements'])\n ET.SubElement(xml_unit, \"picks\").text = unicode(unit['picks'])\n ET.SubElement(xml_unit, \"attributes\").text = unicode(unit['attributes'])\n try:\n product = produktpass.models.Product.objects.get(artnr=unit['product'])\n ET.SubElement(xml_unit, \"product_name\").text = unicode(product.name)\n except produktpass.models.Product.DoesNotExist:\n ET.SubElement(xml_unit, \"product_name\").text = '???'\n\n return xmlroot", "def createXML(config, ccdpars, userpars):\n\n # identify the template\n appLab = ccdpars.appLab.value()\n if config.debug:\n print('DEBUG: createXML: application = ' + appLab)\n print('DEBUG: createXML: application vals = ' + str(config.templates[appLab]))\n\n if config.template_from_server:\n # get template from server\n url = config.http_camera_server + config.http_path_get + '?' + \\\n config.http_search_attr_name + '=' + config.templates[appLab]\n if config.debug:\n print ('DEBUG: url = ' + url)\n sxml = urllib2.urlopen(url).read()\n txml = ET.fromstring(sxml)\n else:\n # get template from local file\n if config.debug:\n print ('DEBUG: directory = ' + config.template_directory)\n lfile = os.path.join(config.template_directory, config.templates[appLab]['app'])\n if config.debug:\n print ('DEBUG: local file = ' + lfile)\n tree = ET.parse(lfile)\n txml = tree.getroot()\n\n # Find all CCD parameters\n cconfig = txml.find('configure_camera')\n pdict = {}\n for param in cconfig.findall('set_parameter'):\n pdict[param.attrib['ref']] = param.attrib\n\n # Set them. This is designed so that missing \n # parameters will cause exceptions to be raised.\n\n # X-binning factor\n pdict['X_BIN']['value'] = ccdpars.xbin.get()\n\n # Y-binning factor\n pdict['X_BIN']['value'] = ccdpars.ybin.get()\n\n # Number of exposures\n pdict['NUM_EXPS']['value'] = '-1' if ccdpars.number.value() == 0 else ccdpars.number.get()\n\n # LED level\n pdict['LED_FLSH']['value'] = ccdpars.led.get()\n\n # Avalanche or normal\n pdict['OUTPUT']['value'] = str(ccdpars.avalanche())\n\n # Avalanche gain\n pdict['HV_GAIN']['value'] = ccdpars.avgain.get()\n\n # Clear or not\n pdict['EN_CLR']['value'] = str(ccdpars.clear())\n\n # Dwell\n pdict['DWELL']['value'] = ccdpars.expose.get()\n\n # Readout speed\n pdict['SPEED']['value'] = '0' if ccdpars.readout == 'Slow' else '1' \\\n if ccdpars.readout == 'Medium' else '2'\n\n # Number of windows -- needed to set output parameters correctly\n nwin = ccdpars.nwin.value()\n\n # Load up enabled windows, null disabled windows\n for nw, win in ccdpars.wframe.wins:\n if nw < nwin:\n pdict['X' + str(nw+1) + '_START']['value'] = win.xstart.get()\n pdict['Y' + str(nw+1) + '_START']['value'] = win.ystart.get()\n pdict['X' + str(nw+1) + '_SIZE']['value'] = win.nx.get()\n pdict['Y' + str(nw+1) + '_SIZE']['value'] = win.ny.get()\n else:\n pdict['X' + str(nw+1) + '_START']['value'] = '1'\n pdict['Y' + str(nw+1) + '_START']['value'] = '1'\n pdict['X' + str(nw+1) + '_SIZE']['value'] = '0'\n pdict['Y' + str(nw+1) + '_SIZE']['value'] = '0'\n\n # Load the user parameters\n uconfig = txml.find('user')\n uconfig.set('target', userpars.target.get())\n uconfig.set('comment', userpars.comment.get())\n uconfig.set('ID', userpars.progid.get())\n uconfig.set('PI', userpars.pi.get())\n uconfig.set('Observers', userpars.observers.get())\n \n return txml", "def _populate_from_xml_file(self, xml):\n '''\n example from API: http://www.ga.gov.au/www/argus.argus_api.survey?pSurveyNo=921\n\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n # turn the XML doc into a Python object\n root = objectify.fromstring(xml)\n\n if hasattr(root.ROW, 'SURVEYNAME'):\n self.survey_name = root.ROW.SURVEYNAME\n if hasattr(root.ROW, 'STATE'):\n self.state = root.ROW.STATE\n if hasattr(root.ROW, 'OPERATOR'):\n self.operator = root.ROW.OPERATOR\n if hasattr(root.ROW, 'CONTRACTOR'):\n self.contractor = root.ROW.CONTRACTOR\n if hasattr(root.ROW, 'PROCESSOR'):\n self.processor = root.ROW.PROCESSOR\n if hasattr(root.ROW, 'SURVEY_TYPE'):\n self.survey_type = root.ROW.SURVEY_TYPE\n if hasattr(root.ROW, 'DATATYPES'):\n self.data_types = root.ROW.DATATYPES\n if hasattr(root.ROW, 'VESSEL'):\n self.vessel = root.ROW.VESSEL\n if hasattr(root.ROW, 'VESSEL_TYPE'):\n self.vessel_type = root.ROW.VESSEL_TYPE\n if hasattr(root.ROW, 'RELEASEDATE'):\n self.release_date = datetime.strptime(root.ROW.RELEASEDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.RELEASEDATE.text is not None else None\n if hasattr(root.ROW, 'ONSHORE_OFFSHORE'):\n self.onshore_offshore = root.ROW.ONSHORE_OFFSHORE\n if hasattr(root.ROW, 'STARTDATE'):\n self.start_date = datetime.strptime(root.ROW.STARTDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.STARTDATE.text is not None else None\n if hasattr(root.ROW, 'ENDDATE'):\n self.end_date = datetime.strptime(root.ROW.ENDDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.ENDDATE.text is not None else None\n if hasattr(root.ROW, 'WLONG'):\n self.w_long = root.ROW.WLONG\n if hasattr(root.ROW, 'ELONG'):\n self.e_long = root.ROW.ELONG\n if hasattr(root.ROW, 'SLAT'):\n self.s_lat = root.ROW.SLAT\n if hasattr(root.ROW, 'NLAT'):\n self.n_lat = root.ROW.NLAT\n if hasattr(root.ROW, 'LINE_KM'):\n self.line_km = root.ROW.LINE_KM\n if hasattr(root.ROW, 'TOTAL_KM'):\n self.total_km = root.ROW.TOTAL_KM\n if hasattr(root.ROW, 'LINE_SPACING'):\n self.line_spacing = root.ROW.LINE_SPACING\n if hasattr(root.ROW, 'LINE_DIRECTION'):\n self.line_direction = root.ROW.LINE_DIRECTION\n if hasattr(root.ROW, 'TIE_SPACING'):\n self.tie_spacing = root.ROW.TIE_SPACING\n if hasattr(root.ROW, 'SQUARE_KM'):\n self.square_km = root.ROW.SQUARE_KM\n if hasattr(root.ROW, 'CRYSTAL_VOLUME'):\n self.crystal_volume = root.ROW.CRYSTAL_VOLUME\n if hasattr(root.ROW, 'UP_CRYSTAL_VOLUME'):\n self.up_crystal_volume = root.ROW.UP_CRYSTAL_VOLUME\n if hasattr(root.ROW, 'DIGITAL_DATA'):\n self.digital_data = root.ROW.DIGITAL_DATA\n if hasattr(root.ROW, 'GEODETIC_DATUM'):\n self.geodetic_datum = root.ROW.GEODETIC_DATUM\n if hasattr(root.ROW, 'ASL'):\n self.asl = root.ROW.ASL\n if hasattr(root.ROW, 'AGL'):\n self.agl = root.ROW.AGL\n if hasattr(root.ROW, 'MAG_INSTRUMENT'):\n self.mag_instrument = root.ROW.MAG_INSTRUMENT\n if hasattr(root.ROW, 'RAD_INSTRUMENT'):\n self.rad_instrument = root.ROW.RAD_INSTRUMENT", "def test_pep8_conformance_pygccxml(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../pygccxml/\"\n\n self.run_check(path)", "def wrez2xml(self,newdoc,newroot):\n\t\twrez = newdoc.createElement('wrez')\n\t\twrez.setAttribute('hasChanged', str(self.hasChanged))\n\t\tnewroot.appendChild(wrez)\n\n\t\tpath = newdoc.createElement('path')\n\t\tpath.setAttribute('value', self.path)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('init_str')\n\t\tpath.setAttribute('value', self.init_str)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('hash_sha512')\n\t\tpath.setAttribute('value', self.hash_sha512)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('src_rip')\n\t\tpath.setAttribute('value', self.src_rip)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('quality')\n\t\tpath.setAttribute('value', self.quality)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('codec')\n\t\tpath.setAttribute('value', self.codec)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('language')\n\t\tpath.setAttribute('value', self.language)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('audio')\n\t\tpath.setAttribute('value', self.audio)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('encoder')\n\t\tpath.setAttribute('value', self.encoder)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('version')\n\t\tpath.setAttribute('value', self.version)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('extension')\n\t\tpath.setAttribute('value', self.extension)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('release_year')\n\t\tpath.setAttribute('value', self.release_year)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('title')\n\t\tpath.setAttribute('value', self.title)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('size')\n\t\tpath.setAttribute('value', str(self.size))\n\t\twrez.appendChild(path)\n\t\treturn wrez", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')", "def evaluate(self, xml_gold_path, xml_output_path):\n\n # Go through all files in xml_gold_path directory\n for file in os.listdir(xml_gold_path):\n\n # Set path to file\n file = xml_gold_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open xml files\n chapter_input_gold = open(file, 'r', encoding='utf8')\n chapter_input_test = open(xml_output_path+os.path.split(file)[-1], 'r', encoding='utf8')\n\n # Check if filenams are the same\n chapter_input_gold_name = os.path.split(chapter_input_gold.name)[-1]\n chapter_input_test_name = os.path.split(chapter_input_test.name)[-1]\n\n if chapter_input_gold_name == chapter_input_test_name:\n\n # Console log\n chapter_input_gold_name = chapter_input_gold.name\n chapter_input_test_name = chapter_input_test.name\n #print('Calculating score for: ' + chapter_input_gold_name + ' and: ' + chapter_input_test_name)\n\n # Process xml input file with BeautifulSoup\n chapter_input_gold = BeautifulSoup(chapter_input_gold, 'xml')\n chapter_input_test = BeautifulSoup(chapter_input_test, 'xml')\n\n # Empty variables for collecting Target scores\n target_precision_scores = 0\n target_recall_scores = 0\n target_f1_scores = 0\n target_jaccard_scores = 0\n\n # Empty variables for collecting Focus scores\n focus_precision_scores = 0\n focus_recall_scores = 0\n focus_f1_scores = 0\n focus_jaccard_scores = 0\n\n # Empty variables for collecting Negated scores\n negated_precision_scores = 0\n negated_recall_scores = 0\n negated_f1_scores = 0\n negated_jaccard_scores = 0\n\n # Empty variables for collecting Scope scores\n scope_precision_scores = 0\n scope_recall_scores = 0\n scope_f1_scores = 0\n scope_jaccard_scores = 0\n\n # Count sentences and frames\n sentence_count = 0\n gold_frames_count = 0\n test_frames_count = 0\n\n scope_gold_frames_count = 0\n #scope_test_frames_count = 0\n\n # Find all Gold and Test Sentences\n sentences_gold = chapter_input_gold.find_all('s')\n sentences_test = chapter_input_test.find_all('s')\n\n #targets_gold = chapter_input_gold.find_all('target')\n #targets_test = chapter_input_test.find_all('target')\n\n scope_gold_frames = chapter_input_gold.find_all('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_frames_count = len(scope_gold_frames)\n\n scope_test_frames = chapter_input_test.find_all('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_frames_count = len(scope_test_frames)\n\n # Exit if number of sentences != between Gold and Test files\n if len(sentences_gold) != len(sentences_test):\n raise SystemExit(print('Number of sentences between Gold and Test files does not match.\\nGold:',\n len(sentences_gold), 'Test:', len(sentences_test)))\n\n # Zip Gold and Test Sentences\n for s_gold, s_test in zip(sentences_gold, sentences_test):\n\n sentence_count = sentence_count + 1\n\n gold_frames = s_gold.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n test_frames = s_test.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n\n gold_frames_count = gold_frames_count + len(gold_frames)\n test_frames_count = test_frames_count + len(test_frames)\n\n for item in zip(gold_frames, test_frames):\n\n #print('\\n=========')\n #print('\\nFrame:', item[0].get('id'))\n\n target_gold_list = []\n target_test_list = []\n\n focus_gold_list = []\n focus_test_list = []\n\n negated_gold_list = []\n negated_test_list = []\n\n scope_gold_list = []\n scope_test_list = []\n\n # Flatten a nested list of fenodes\n def flatten(nested_list):\n \"\"\" Flatten a nested list of fenodes \"\"\"\n t_l = []\n for i in nested_list:\n if not isinstance(i, list):\n t_l.append(i)\n else:\n t_l.extend(flatten(i))\n return t_l\n\n # Target\n if item[0].find('target'):\n target_gold = item[0].find('target')\n target_gold_fenode_id = target_gold.find('fenode').get('idref')\n target_gold_word = s_gold.find(id=target_gold_fenode_id).get('word').lower()\n\n try:\n target_test = item[1].find('target')\n target_test_fenode__id = target_test.find('fenode').get('idref')\n target_test_word = s_test.find(id=target_test_fenode__id).get('word').lower()\n except:\n target_test_word = ''\n\n elif item[1].find('target'):\n target_test = item[1].find('target')\n target_test_fenode__id = target_test.find('fenode').get('idref')\n target_test_word = s_test.find(id=target_test_fenode__id).get('word').lower()\n\n try:\n target_gold = item[0].find('target')\n target_gold_fenode_id = target_gold.find('fenode').get('idref')\n target_gold_word = s_gold.find(id=target_gold_fenode_id).get('word').lower()\n except:\n target_gold_word = ''\n\n target_gold_list.append(target_gold_word)\n target_test_list.append(target_test_word)\n\n # Sort lists\n sorted_target_gold_list = sorted(flatten(target_gold_list))\n sorted_target_test_list = sorted(flatten(target_test_list))\n\n #print('\\nTarget [Gold]:', sorted_target_gold_list)\n #print('Target [Test]:', sorted_target_test_list)\n\n\n # Focus\n if item[0].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_gold = item[0].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_gold_fenode_id = focus_gold.find('fenode').get('idref')\n focus_gold_word = s_gold.find(id=focus_gold_fenode_id).get('word').lower()\n except:\n focus_gold_word = ''\n if item[1].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_test = item[1].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_test_fenode_id = focus_test.find('fenode').get('idref')\n focus_test_word = s_test.find(id=focus_test_fenode_id).get('word').lower()\n except:\n focus_test_word = ''\n else:\n focus_test_word = ''\n\n elif item[1].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_test = item[1].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_test_fenode_id = focus_test.find('fenode').get('idref')\n focus_test_word = s_test.find(id=focus_test_fenode_id).get('word').lower()\n except:\n focus_test_word = ''\n if item[0].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_gold = item[0].find('fe', {'name' : FOCUS_TAG_NAME})\n focus_gold_fenode_id = focus_gold.find('fenode').get('idref')\n try:\n focus_gold_word = s_gold.find(id=focus_gold_fenode_id).get('word').lower()\n except AttributeError:\n focus_gold_word = ''\n else:\n focus_gold_word = ''\n\n focus_gold_list.append(focus_gold_word)\n focus_test_list.append(focus_test_word)\n\n # Sort lists\n sorted_focus_gold_list = sorted(flatten(focus_gold_list))\n sorted_focus_test_list = sorted(flatten(focus_test_list))\n\n #print('\\nFocus [Gold]:', sorted_focus_gold_list)\n #print('Focus [Test]:', sorted_focus_test_list)\n\n\n # Negated\n if item[0].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_gold = item[0].find('fe', {'name' : NEGATED_TAG_NAME})\n negated_gold_fenode_id = negated_gold.find('fenode').get('idref')\n try:\n negated_gold_word = s_gold.find(id=negated_gold_fenode_id).get('word').lower()\n except AttributeError:\n negated_gold_word = ''\n if item[1].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_test = item[1].find('fe', {'name' : NEGATED_TAG_NAME})\n try:\n negated_test_fenode_id = negated_test.find('fenode').get('idref')\n negated_test_word = s_test.find(id=negated_test_fenode_id).get('word').lower()\n except:\n negated_test_word = ''\n else:\n negated_test_word = ''\n\n elif item[1].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_test = item[1].find('fe', {'name' : NEGATED_TAG_NAME})\n try:\n negated_test_fenode_id = negated_test.find('fenode').get('idref')\n negated_test_word = s_test.find(id=negated_test_fenode_id).get('word').lower()\n except:\n negated_test_word = ''\n if item[0].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_gold = item[0].find('fe', {'name' : NEGATED_TAG_NAME})\n negated_gold_fenode_id = negated_gold.find('fenode').get('idref')\n try:\n negated_gold_word = s_gold.find(id=negated_gold_fenode_id).get('word').lower()\n except AttributeError:\n negated_gold_word = ''\n else:\n negated_gold_word = ''\n else:\n negated_test_word = ''\n negated_gold_word = ''\n\n negated_gold_list.append(negated_gold_word)\n negated_test_list.append(negated_test_word)\n\n # Sort lists\n sorted_negated_gold_list = sorted(flatten(negated_gold_list))\n sorted_negated_test_list = sorted(flatten(negated_test_list))\n\n #print('\\nNegated [Gold]:', sorted_negated_gold_list)\n #print('Negated [Test]:', sorted_negated_test_list)\n\n\n # Resolve Terminals if Scope on a complex graph\n def resolve_non_terminals(idref):\n \"\"\" This function resolves a complex gold graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = s_gold.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if s_gold.find(id=e_id).get('word') is not None:\n try:\n edge_word = s_gold.find(id=e_id).get('word').lower()\n edge_words.append(edge_word)\n except:\n pass\n if s_gold.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n def resolve_non_terminals_test(idref):\n \"\"\" This function resolves a complex test graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = s_test.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if s_test.find(id=e_id).get('word') is not None:\n try:\n edge_word = s_test.find(id=e_id).get('word').lower()\n edge_words.append(edge_word)\n except:\n pass\n if s_test.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n # Scope\n if item[0].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_gold = item[0].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_fenodes = scope_gold.find_all('fenode')\n for s_g in scope_gold_fenodes:\n s_id = s_g.get('idref')\n if s_gold.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_gold.find(id=s_id).get('word').lower()\n scope_gold_list.append(scope_word)\n except:\n pass\n if s_gold.find(id=s_id).get('word') is None:\n scope_gold_list.append(resolve_non_terminals(s_id))\n else:\n pass\n\n if item[1].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_test = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_fenodes = scope_test.find_all('fenode')\n for s_t in scope_test_fenodes:\n s_id = s_t.get('idref')\n if s_test.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_test.find(id=s_id).get('word').lower()\n scope_test_list.append(scope_word)\n except:\n pass\n elif s_test.find(id=s_id).get('word') is None:\n scope_test_list.append(resolve_non_terminals_test(s_id))\n else:\n scope_test_list.append('')\n\n elif item[1].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_test = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_fenodes = scope_test.find_all('fenode')\n for s_t in scope_test_fenodes:\n s_id = s_t.get('idref')\n if s_test.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_test.find(id=s_id).get('word').lower()\n scope_test_list.append(scope_word)\n except:\n pass\n if s_test.find(id=s_id).get('word') is None:\n scope_test_list.append(resolve_non_terminals_test(s_id))\n else:\n pass\n\n if item[0].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_gold = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_fenodes = scope_gold.find_all('fenode')\n for s_g in scope_gold_fenodes:\n s_id = s_g.get('idref')\n if s_gold.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_gold.find(id=s_id).get('word').lower()\n scope_gold_list.append(scope_word)\n except:\n pass\n if s_gold.find(id=s_id).get('word') is None:\n scope_gold_list.append(resolve_non_terminals(s_id))\n else:\n pass\n else:\n scope_gold_list.append('')\n\n # Sort lists\n sorted_scope_gold_list = sorted(flatten(scope_gold_list))\n sorted_scope_test_list = sorted(flatten(scope_test_list))\n\n #print('\\nScope [Gold]:', sorted_scope_gold_list)\n #print('Scope [Test]:', sorted_scope_test_list)\n\n # If lists are same length, check if items are same\n if len(sorted_scope_gold_list) == len(sorted_scope_test_list):\n sorted_scope_test_list_intersection = set(sorted_scope_gold_list).intersection(sorted_scope_test_list)\n sorted_scope_test_list_intersection = list(sorted_scope_test_list_intersection)\n if len(sorted_scope_test_list_intersection) < len(sorted_scope_test_list):\n difference = len(sorted_scope_test_list) - len(sorted_scope_test_list_intersection)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_test_list_intersection.append('')\n empty_element = empty_element + 1\n \n sorted_scope_test_list = sorted_scope_test_list_intersection\n\n # If lists are different lengths, add empty elements\n elif len(sorted_scope_gold_list) > len(sorted_scope_test_list):\n difference = len(sorted_scope_gold_list) - len(sorted_scope_test_list)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_test_list.append('')\n empty_element = empty_element + 1\n\n elif len(sorted_scope_test_list) > len(sorted_scope_gold_list):\n difference = len(sorted_scope_test_list) - len(sorted_scope_gold_list)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_gold_list.append('')\n empty_element = empty_element + 1\n\n\n # Align items in the lists for sklearn, set 1 for matched items, else set 0\n sorted_target_gold_list_normalized = [1 if element in sorted_target_gold_list and not element == \"\" else 0 for element in sorted_target_gold_list]\n sorted_target_test_list_normalized = [1 if element in sorted_target_gold_list else 0 for element in sorted_target_test_list]\n\n sorted_focus_gold_list_normalized = [1 if element in sorted_focus_gold_list and not element == \"\" else 0 for element in sorted_focus_gold_list]\n sorted_focus_test_list_normalized = [1 if element in sorted_focus_gold_list else 0 for element in sorted_focus_test_list]\n\n sorted_negated_gold_list_normalized = [1 if element in sorted_negated_gold_list and not element == \"\" else 0 for element in sorted_negated_gold_list]\n sorted_negated_test_list_normalized = [1 if element in sorted_negated_gold_list else 0 for element in sorted_negated_test_list]\n\n sorted_scope_gold_list_normalized = [1 if element in sorted_scope_gold_list and not element == \"\" else 0 for element in sorted_scope_gold_list]\n sorted_scope_test_list_normalized = [1 if element in sorted_scope_gold_list else 1 if not element == \"\" else 0 for element in sorted_scope_test_list]\n\n #print(sorted_scope_gold_list_normalized)\n #print(sorted_scope_test_list_normalized)\n\n\n # Sklearn calculations\n #target_precision_scores = target_precision_scores + precision_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n #target_recall_scores = target_recall_scores + recall_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n target_f1_scores = target_f1_scores + f1_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n #target_jaccard_scores = target_jaccard_scores + jaccard_similarity_score(sorted_target_gold_list, sorted_target_test_list)\n\n #focus_precision_scores = focus_precision_scores + precision_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n #focus_recall_scores = focus_recall_scores + recall_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n focus_f1_scores = focus_f1_scores + f1_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n #focus_jaccard_scores = focus_jaccard_scores + jaccard_similarity_score(sorted_focus_gold_list, sorted_focus_test_list)\n\n #negated_precision_scores = negated_precision_scores + precision_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n #negated_recall_scores = negated_recall_scores + recall_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n negated_f1_scores = negated_f1_scores + f1_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n #negated_jaccard_scores = negated_jaccard_scores + jaccard_similarity_score(sorted_negated_gold_list, sorted_negated_test_list)\n\n scope_precision_scores = scope_precision_scores + precision_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_recall_scores = scope_recall_scores + recall_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_f1_scores = scope_f1_scores + f1_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_jaccard_scores = scope_jaccard_scores + jaccard_similarity_score(sorted_scope_gold_list, sorted_scope_test_list)\n\n\n print('\\n=============================')\n print('====== EVALUATION for:', chapter_input_test_name, '======')\n print('Total Sentences:', sentence_count,\n '\\nNegation Gold frames:', gold_frames_count,\n '\\nNegation Test frames:', test_frames_count, '\\n')\n\n print('----- CUEWORDS -----')\n #print('Precision:\\t', target_precision_scores / gold_frames_count)\n #print('Recall:\\t', target_recall_scores / gold_frames_count)\n print('F1 score:\\t', target_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', target_jaccard_scores / gold_frames_count)\n\n print('\\n----- FOCUS -----')\n #print('Precision:\\t', focus_precision_scores / gold_frames_count)\n #print('Recall:\\t', focus_recall_scores / gold_frames_count)\n print('F1 score:\\t', focus_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', focus_jaccard_scores / gold_frames_count)\n\n print('\\n----- NEGATED -----')\n #print('Precision:\\t', negated_precision_scores / gold_frames_count)\n #print('Recall:\\t', negated_recall_scores / gold_frames_count)\n print('F1 score:\\t', negated_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', negated_jaccard_scores / gold_frames_count)\n\n print('\\n----- SCOPE -----\\nScope Gold frames:', scope_gold_frames_count, '\\nScope Test frames:', scope_test_frames_count, '\\n')\n print('Precision:\\t', scope_precision_scores / scope_test_frames_count)\n print('Recall:\\t', scope_recall_scores / scope_test_frames_count)\n print('F1 score:\\t', scope_f1_scores / scope_test_frames_count)\n print('Jaccard similarity:\\t', scope_jaccard_scores / scope_test_frames_count)\n\n print('Done!')", "def build(filename=\"JMdict_e.gz\", output_filename=DATABASE_FILENAME):\n # NOTE: The JMdict XML file contains XML entities, that are expanded when\n # parsed using Python's stdlib xml.etree.ElementTree like so:\n # ElementTree.parse(f). That is undesired behavior for our use-case. Oshi\n # needs to parse the short entity string, for example &adj-i; should be\n # \"adj-i\" instead of \"adjective (keiyoushi)\". That's why it uses an external\n # xml parser: lxml that allows you to specify whether to expand entites.\n extension = path.splitext(filename)[1].lower()\n parser = etree.XMLParser(resolve_entities=False)\n if extension == \".gz\":\n with gzip.open(filename) as f:\n tree = etree.parse(f, parser)\n elif extension == \".xml\":\n tree = etree.parse(filename, parser)\n else:\n raise ValueError(\"File extension not supported: \" + extension)\n\n entries = []\n # variables starting with x contain xml element(s)\n for xentry in tree.getroot():\n entry = {}\n entry[\"writings\"] = [x.find('keb').text for x in xentry.findall('k_ele')]\n entry[\"readings\"] = [x.find('reb').text for x in xentry.findall('r_ele')]\n xsenses = xentry.findall('sense')\n senses = []\n # last_tags will contain a reference to previously found tags (JMdict\n # specifies that when pos is empty, the previous one should be used)\n last_tags = []\n for xsense in xsenses:\n tags = []\n xtags = xsense.findall('pos') # + xsense.findall('misc')\n for xtag in xtags:\n match = re.search(r'&([\\w-]+?);', etree.tostring(xtag, encoding=\"utf-8\").decode('utf-8') or \"\")\n if match: tags.append(match.group(1))\n glosses = [x.text for x in xsense.findall('gloss')]\n senses.append({\"glosses\": glosses, \"tags\": tags or last_tags})\n last_tags = tags or last_tags\n entry[\"senses\"] = senses\n entries.append(entry)\n with open(output_filename, 'w', encoding='utf-8') as f:\n json.dump(entries, f, ensure_ascii=False)", "def creation_srcmdl(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,outputfile,emin,emax):\n\tf_liste_sour=\"a.txt\"\n\n\tlect_ca(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,f_liste_sour,name)\n\tXML_EC_PL(name, f_liste_sour, outputfile, emin,emax)\n\tos.system(\"rm -rf a.txt\")", "def test_write(self):\n cases = {\n self.test_eac + \"NE00401.xml\": True,\n self.test_eac + \"NE01501.xml\": False,\n self.test_eac + \"NE01302.xml\": True,\n }\n metadata_url = 'http://www.example.com/metadata.xml'\n presentation_url = 'http://www.example.com/presentation.html'\n for case in cases:\n doc = EacCpf.EacCpf(case, metadata_url, presentation_url)\n self.assertNotEqual(doc, None)\n path = doc.write(self.temp)\n self.assertEquals(os.path.exists(path), True)\n # read the file and try to extract the attributes\n try:\n tree = etree.parse(path)\n ns = {\n EacCpf.DOC_KEY: EacCpf.DOC_NS,\n EacCpf.ESRC_KEY: EacCpf.ESRC_NS,\n }\n # get the url to the metadata file\n metadata = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":metadata\", namespaces=ns)\n self.assertNotEqual(metadata, None)\n self.assertEqual(metadata[0], metadata_url)\n # get the url to the presentation file\n presentation = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":presentation\", namespaces=ns)\n self.assertNotEqual(presentation, None)\n self.assertEqual(presentation[0], presentation_url)\n # get the url to the source file\n source = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":source\", namespaces=ns)\n self.assertNotEqual(source, None)\n self.assertEqual(source[0], case)\n except:\n msg = \"Failed to complete parsing of {0}\".format(case)\n self.log.error(msg, exc_info=True)\n self.fail(msg)", "def test_xml_from_file(self):\n j2k = Jp2k(self.j2kfile)\n\n self.jp2h.box = [self.ihdr, self.colr]\n\n xmlb = glymur.jp2box.XMLBox(filename=self.xmlfile)\n boxes = [self.jp2b, self.ftyp, self.jp2h, xmlb, self.jp2c]\n with tempfile.NamedTemporaryFile(suffix=\".jp2\") as tfile:\n j2k.wrap(tfile.name, boxes=boxes)\n jp2 = Jp2k(tfile.name)\n\n output_boxes = [box.box_id for box in jp2.box]\n self.assertEqual(output_boxes, ['jP ', 'ftyp', 'jp2h', 'xml ',\n 'jp2c'])\n\n elts = jp2.box[3].xml.findall('country')\n self.assertEqual(len(elts), 3)\n\n neighbor = elts[1].find('neighbor')\n self.assertEqual(neighbor.attrib['name'], 'Malaysia')\n self.assertEqual(neighbor.attrib['direction'], 'N')", "def test_non_regression(self):\n main(\"Source_mobile.xml\", [[\"engine\", \"A320.xml\", \"A320.csv\"]], \"Resultat.xml\", gui=False)\n compare_xml_results(\"Resultat.xml\", \"Reference.xml\", self)", "def configureGeoData(data,resultDir):\n xmlfiles = findfiles(['*.xml'],where=data[\"folder\"])\n xmlurls=[]\n fgdclist=[]\n #xmlselect=[]\n for xml in xmlfiles:\n shutil.copy(os.path.join(data['folder'],xml),resultDir)\n xmlurls.append(os.path.join(resulturl,resultDir.split('/')[-1],xml))\n #import xmltodict\n localfilename=os.path.join(data['folder'],xml)\n #xmlselect.append({\"file\":localfilename,\"url\":os.path.join(resulturl,resultDir.split('/')[-1],xml)})\n with open(os.path.join(data['folder'],xml)) as fd:\n stringxml = fd.read()\n #if 'FGDC' in stringxml.upper():\n fgdc={}\n fgdc['url']=os.path.join(resulturl,resultDir.split('/')[-1],xml)\n doc = xmltodict.parse(stringxml,cdata_key='text',attr_prefix='',dict_constructor=dict)\n fgdc['data']=doc\n fgdc['file']=localfilename\n fgdclist.append(fgdc)\n data['xmlurls']=xmlurls\n data['xml']={\"urls\":xmlurls,\"fgdc\":fgdclist,\"files\":xmlfiles}\n return data", "def test_01_Xml(self):\n l_xml = self.m_xml.light_sect[1]\n # print(PrettyFormatAny.form(l_xml, 'C4-01-A - XML'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_LIGHT_NAME_1)\n self.assertEqual(l_xml.find('DeviceFamily').text, TESTING_DEVICE_FAMILY_UPB)", "def main(*args):\r\n print(START_MESSAGE)\r\n print(\"Script Location:\", location)\r\n print(\"Arguments Passed:\", args)\r\n\r\n root = ET.parse(xmlfile).getroot()\r\n keys = []\r\n out = \"\"\r\n\r\n for child in root[1]:\r\n out += child.attrib['Name'] + \";\" + child[0].text + \"\\n\"\r\n\r\n with open(outputfile, 'w') as f:\r\n f.write(out)", "def prepare_xml(original_xml, mangled_xml):\n in_handle = open(original_xml)\n footer = \" </BlastOutput_iterations>\\n</BlastOutput>\\n\"\n header = \"\"\n while True:\n line = in_handle.readline()\n if not line:\n #No hits?\n stop_err(\"Problem with XML file?\")\n if line.strip() == \"<Iteration>\":\n break\n header += line\n\n if \"<BlastOutput_program>blastx</BlastOutput_program>\" in header:\n print \"BLASTX output identified\"\n elif \"<BlastOutput_program>blastp</BlastOutput_program>\" in header:\n print \"BLASTP output identified\"\n else:\n in_handle.close()\n stop_err(\"Expect BLASTP or BLASTX output\")\n\n out_handle = open(mangled_xml, \"w\")\n out_handle.write(header)\n out_handle.write(line)\n count = 1\n while True:\n line = in_handle.readline()\n if not line:\n break\n elif line.strip() == \"<Iteration>\":\n #Insert footer/header\n out_handle.write(footer)\n out_handle.write(header)\n count += 1\n out_handle.write(line)\n\n out_handle.close()\n in_handle.close()\n print \"Input has %i queries\" % count", "def parse_CRAFT(kb_data):\n\n print(\"Parsing CRAFT corpus...\")\n corpus_dir = str()\n \n if kb_data.kb == \"chebi\":\n corpus_dir = \"./retrieved_data/corpora/CRAFT-4.0.1/concept-annotation/CHEBI/CHEBI/knowtator/\"\n \n elif kb_data.kb == \"go_bp\":\n corpus_dir = \"./retrieved_data/corpora/CRAFT-4.0.1/concept-annotation/GO_BP/GO_BP/knowtator/\"\n\n output_CRAFT = dict()\n \n for document in os.listdir(corpus_dir): \n root = ET.parse(corpus_dir + document)\n file_id = document.strip('.txt.knowtator.xml')\n annotations = dict()\n\n for annotation in root.iter(\"annotation\"):\n annotation_id = annotation.find('mention').attrib['id']\n annotation_text = annotation.find('spannedText').text\n start_pos, end_pos = annotation.find('span').attrib['start'], annotation.find('span').attrib['end']\n annotations[annotation_id] = [annotation_text, start_pos, end_pos] \n \n for classMention in root.iter(\"classMention\"):\n classMention_id = classMention.attrib['id']\n annotation_values = annotations[classMention_id]\n kb_id = classMention.find('mentionClass').attrib['id']\n \n if kb_id in kb_data.child_to_parent.keys(): # Consider only KB concepts with ONE direct ancestor\n direct_ancestor = kb_data.child_to_parent[kb_id]\n annotation = (annotation_values[0], annotation_values[1], \n annotation_values[2], kb_id, direct_ancestor) \n output_CRAFT = add_annotation_to_output_dict(file_id, annotation, output_CRAFT)\n \n print(\"...Done!\")\n return output_CRAFT", "def build_corpus_questions(criteria_incl_question=True, criteria_incl_snip=False, criteria_incl_long=False, level=0):\r\n\r\n\tprint('\\nbuilding questions and answers')\r\n\r\n\tif load_corpus_questions():\r\n\t\treturn\r\n\r\n\timport xml.etree.ElementTree as ET\r\n\r\n\tquestion_count = 0\r\n\tno_abstract_tag = 0\r\n\tno_abstract_file = 0\r\n\tlong_count = 0\r\n\t\r\n\tglobal search_criteria_dict, solution_dict, linked_abstracts_dict\r\n\t\r\n\tsearch_criteria_dict = collections.defaultdict(list)\r\n\tsolution_dict = collections.defaultdict(list)\r\n\tlinked_abstracts_dict = collections.defaultdict(list)\r\n\tcommon_map_dict = collections.defaultdict(list)\r\n\t\r\n\ttree = ET.parse(paths.path_data_questions)\r\n\troot = tree.getroot()\r\n\tfor record in root.findall('record'):\r\n\t\trecord_id = record.get('id')\r\n\t\tquestion_text = preprocess_document(record.find('question').text,True)\r\n\r\n\t\tif level == 0:\r\n\t\t\tkey = record_id # key\r\n\t\t\r\n\t\tanswer = record.find('answer')\r\n\t\tif answer is not None:\r\n\t\t\tfor s in answer.findall('snip'):\r\n\t\t\t\tif s is not None:\r\n\t\t\t\t\tsnip_id = s.get('id')\r\n\t\t\t\t\tsnip_text = preprocess_document(s.find('sniptext').text,True)\r\n\t\t\t\t\t\r\n\t\t\t\t\tif level == 1:\r\n\t\t\t\t\t\tkey = record_id + '_' + snip_id # key\r\n\t\t\t\t\t\r\n\t\t\t\t\tfor i,l in enumerate(s.findall('long')):\r\n\t\t\t\t\t\tif l is not None:\r\n\t\t\t\t\t\t\tlong_id = l.get('id')\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif level == 2:\r\n\t\t\t\t\t\t\t\tkey = record_id + '_' + snip_id + '_' + long_id # key\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif criteria_incl_question:\r\n\t\t\t\t\t\t\t\tfor x in question_text:\r\n\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # question\r\n\t\t\t\t\t\t\tif criteria_incl_snip:\r\n\t\t\t\t\t\t\t\tfor x in snip_text:\r\n\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # snip\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tlong_text = l.find('longtext')\r\n\t\t\t\t\t\t\tif long_text is not None:\r\n\t\t\t\t\t\t\t\tlong_text = preprocess_document(long_text.text,True)\r\n\t\t\t\t\t\t\t\tfor x in long_text:\r\n\t\t\t\t\t\t\t\t\tsolution_dict[key].append(x) # long - answer\r\n\t\t\t\t\t\t\t\tif criteria_incl_long:\r\n\t\t\t\t\t\t\t\t\tfor x in long_text:\r\n\t\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # long - search\r\n\r\n\t\t\t\t\t\t\tif key not in search_criteria_dict.keys():\r\n\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append('')\r\n\r\n\t\t\t\t\t\t\tlong_refs = l.findall('ref')\r\n\t\t\t\t\t\t\tfor long_ref in long_refs:\r\n\t\t\t\t\t\t\t\tabstract = long_ref.get('abstract')[10:]\r\n\t\t\t\t\t\t\t\tabstract_path = paths.path_data_abstracts + '/' + abstract\r\n\t\t\t\t\t\t\t\tabstract_sentences = abstracts_dict[abstract]\r\n\t\t\t\t\t\t\t\tlinked_abstracts_dict[key].append(abstract) # linked abstracts\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\tlong_count += 1\r\n\t\t\t\t\t\t\t\t\r\n\t\tquestion_count += 1\r\n\t\t# print(str(question_count) + ' : ' + str(question_text) + ' : ' + str(no_abstract_file) + ' : ' + str(no_abstract_tag) + ' : ' + str(long_count))\r\n\r\n\tpickle.dump(search_criteria_dict,open(paths.path_data_questions_pickle,\"wb\"))\r\n\tpickle.dump(solution_dict,open(paths.path_data_answers_pickle,\"wb\"))\r\n\tpickle.dump(linked_abstracts_dict,open(paths.path_data_linkedabstracts_pickle,\"wb\"))\r\n\t\r\n\tprint(len(search_criteria_dict))\r\n\tprint(len(solution_dict))\r\n\tprint(len(linked_abstracts_dict))\r\n\t\r\n\tprint('\\ncorpus build complete')", "def test_02_Xml1(self):\n l_xml = self.m_xml.light_sect[1]\n print(PrettyFormatAny.form(l_xml, 'C1-02-A - XML'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_LIGHT_NAME_1)\n self.assertEqual(l_xml.find('DeviceFamily').text, TESTING_DEVICE_FAMILY_UPB)", "def test_load_quakeML():\n # Check one cmt file\n with tempfile.TemporaryDirectory() as tmp_dir:\n\n # Cmtfile path\n cmtfile = os.path.join(DATA_DIR, \"testCMT\")\n\n # create new directory\n new_xml_path = os.path.join(tmp_dir, \"tests.xml\")\n xml = read_events(cmtfile)\n xml.write(new_xml_path, format=\"QUAKEML\")\n\n assert(os.path.exists(new_xml_path)\n and os.path.isfile(new_xml_path))\n\n print(\"QuakeML\\n\", CMTSource.from_quakeml_file(new_xml_path))\n print(\"CMT\\n\", CMTSource.from_CMTSOLUTION_file(cmtfile))\n assertDictAlmostEqual(CMTSource.from_quakeml_file(new_xml_path),\n CMTSource.from_CMTSOLUTION_file(cmtfile))", "def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it", "def __createXMLFileForClear():\r\n #description\r\n #Root\r\n clear_root = Element('clear-users-request', {'xmlns':SYMPLECTIC_XMLNS_URI,} )\r\n #Feed\r\n SubElement(clear_root, 'feed-id').text = IMPORT_USERS_FEED_ID\r\n #Convert to ElementTree and write xml version to file\r\n xml_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_USER_FOLDER + SYMPLECTIC_LOCAL_USER_CLEARFILE\r\n ElementTree(clear_root).write(xml_filename)\r\n #Return xml filename\r\n return xml_filename", "def setUpClass(cls):\n import os\n for root in cls.prod_s2_ssc:\n os.makedirs(root)\n metadata = root.split(\".\")[0] + \".HDR\"\n TestFunctions.touch(metadata)\n for root in cls.prod_s2_mus:\n os.makedirs(root)\n metadata = os.path.join(root, root + \"_MTD_ALL.xml\")\n TestFunctions.touch(metadata)\n for root in cls.prod_s2_nat:\n os.makedirs(root)\n metadata = os.path.join(root, \"MTD_MSIL1C.xml\")\n TestFunctions.touch(metadata)", "def xml_parser_dielectrics(request, tmpdir_factory):\n testdir = os.path.dirname(__file__)\n xmlfile = testdir + \"/dielectrics.xml\"\n tmpfile = str(tmpdir_factory.mktemp('data').join('basic_trunc.xml'))\n xml_truncate(request.param, xmlfile, tmpfile)\n xml = vasprun.Xml(tmpfile, event = False)\n \n return xml", "def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)", "def generateXMLmodel(quickLogger,\n base,\n galactic_file=\"gal_2yearp7v6_v0.fits\",\n isotropic_file=\"iso_p7v6source.txt\",\n catalog_file=\"gll_psc_v07.fit\"):\n\n\n try:\n checkForFiles(quickLogger,[base+\"_model.xml\"])\n quickLogger.info(base+\"_model.xml exists, won't create a new one.\")\n except(FileNotFound):\n quickLogger.info(base+\"_model.xml doesn't exist, will create a new one.\") \n try:\n checkForFiles(quickLogger,[base+\"_filtered_gti.fits\",galactic_file,isotropic_file,catalog_file])\n import make2FGLxml\n mymodel = make2FGLxml.srcList(catalog_file,base+\"_filtered_gti.fits\",base+\"_model.xml\")\n mymodel.makeModel(galactic_file, 'gal_2yearp7v6_v0', isotropic_file, 'iso_p7v6source')\n quickLogger.info(\"NOTE: if there are extended sources in your ROI, make sure the \"\\\n +\"correspoinding diffuse template is in the working directory.\")\n except(FileNotFound):\n raise FileNotFound", "def buildnemxml(self):\n for n in sorted(self._objs.keys()):\n emanenode = self._objs[n]\n emanenode.buildnemxmlfiles(self)", "def process_xml(self):\n self.process_gpx_file(str(self.filename))", "def hlsp_to_xml(config):\n\n # Check the user-provided config file path.\n config = cp.check_existing_file(config)\n if config is None:\n return\n\n # Try to read in the yaml config file.\n try:\n parameters = read_yaml(config)\n except (FileNotFoundError, TypeError) as err:\n print(err)\n return\n\n # Make sure the config file has all the expected sections\n for section in EXPECTED_CONFIGS:\n if section not in parameters:\n print(\"{0} does not define '{1}'!\".format(config, section))\n return\n\n # Make sure all necessary filepaths have been provided\n for path in EXPECTED_PATHS:\n if path not in parameters[\"filepaths\"]:\n print(\"{0} is missing an '{1}' filepath!\".format(config, path))\n return\n\n # Config parameters have been checked, now read into variables\n paths = parameters[\"filepaths\"]\n hlsppath = paths[\"hlsppath\"]\n output = paths[\"output\"]\n overwrite = paths[\"overwrite\"]\n\n extensions = parameters[\"file_types\"]\n header_type = parameters[\"header_type\"]\n data_type = parameters[\"data_type\"]\n keyword_updates = parameters[\"keyword_updates\"]\n uniques = parameters[\"unique_parameters\"]\n\n # Set up logging\n outdir = os.path.dirname(output)\n logfile = os.path.join(outdir, LOG)\n logfile = cp.check_new_file(logfile)\n if logfile is None:\n return\n logging.basicConfig(filename=logfile,\n format='***%(levelname)s from %(module)s: %(message)s',\n level=logging.DEBUG, filemode='w')\n logging.info(\"Logging started at {0}\".format(\n datetime.datetime.now().isoformat()))\n\n # Prepare the output file\n output = cp.check_new_file(output)\n if output is None:\n return\n print(\"Opening {0}\".format(output))\n if overwrite or not os.path.isfile(output):\n with open(output, 'w') as xmlfile:\n xmlfile.close()\n else:\n err = \"{0} already exists. Set overwrite=True to proceed.\".format(\n output)\n logging.error(err)\n print(err)\n return\n\n # Begin the lxml tree and add the main subelements\n composite = etree.Element(\"CompositeObservation\")\n xmltree = etree.ElementTree(composite)\n metadata = etree.SubElement(composite, \"metadataList\")\n provenance = etree.SubElement(composite, \"provenance\")\n products = etree.SubElement(composite, \"productList\")\n\n # Create the CAOMxmlList we will save CAOMxml objects into\n caomlist = CAOMxmlList()\n\n # Read the static CAOM values from the yaml file\n print(\"Creating standard HLSP entries...\")\n statics = cp.check_existing_file(STATICS)\n if statics is None:\n return\n try:\n static_values = read_yaml(statics)\n except (FileNotFoundError, TypeError) as err:\n logging.error(err)\n print(err)\n return\n\n # Add standard entries\n caomlist = add_value_caomxml(caomlist, static_values[\"hlsp\"])\n\n # Add some conditional entries\n if data_type == \"timeseries\":\n caomlist = add_value_caomxml(caomlist, static_values[\"timeseries\"])\n if header_type == \"kepler\":\n caomlist = add_value_caomxml(caomlist, static_values[\"kepler\"])\n print(\"...done!\")\n\n # Add information from the header keywords table.\n print(\"Adding entries from fits headers...\")\n caomlist = add_header_entries(caomlist, KEYWORD_TABLE, header_type)\n print(\"...done!\")\n\n # Add CAOMxml entries for HLSP-specifiic CAOM parameters.\n print(\"Adding unique entries for this HLSP...\")\n if uniques is None:\n logging.warning(\"No unique parameters provided in the yaml config.\")\n else:\n caomlist = add_value_caomxml(caomlist, uniques)\n print(\"...done!\")\n\n # Add product entries to the list of CAOMxml objects\n print(\"Generating the productList...\")\n caomlist = add_product_caomxml(caomlist, hlsppath, extensions, data_type)\n print(\"...done!\")\n\n # Make final tweaks to caomlist\n print(\"Making final adjustments...\")\n caomlist = adjust_defaults(caomlist, header_type, keyword_updates)\n print(\"...done!\")\n\n # Create the head string to write to doctype\n head_strings = []\n head_strings.append(\"<!-- Process HLSP for CAOM ingestion -->\")\n head_strings.append(\"\")\n head = \"\\n\".join(head_strings)\n\n # Add CAOMxml elements to xmltree\n print(\"Writing everything to XML...\")\n for entry in sorted(caomlist):\n\n # Skip extra top-level entries caused by recursion in add_value_caomxml\n if xmltree.find(entry.label) is None:\n entry.send_to_lxml(xmltree)\n\n # Write the xml tree to the OUTPUT file\n # (doctype not a valid argument for python 2.x)\n xmltree.write(output,\n encoding=\"utf-8\",\n xml_declaration=True,\n #doctype=head,\n pretty_print=True)\n print(\"...XML file generated!\")\n\n # Print out log stats before finishing\n check_log(logfile)\n logging.info(\"Logging finished at {0}\".format(\n datetime.datetime.now().isoformat()))", "def citation_validation(self):\n for file in filter(lambda x: x.startswith('citation'), listdir(bs_directory)):\n print \"Processing\", file\n bs = BeautifulSoup(open(file), 'xml')\n # find ground_truth file\n ground_truth = BeautifulSoup(open(self.ground_truth_directory + file.split(\".\")[0] + '.xml'), 'xml')\n self.__citation_correction(ground_truth, bs)\n file = open(self.citation_output + file, \"wb\")\n file.write(bs.prettify().encode('utf-8'))", "def test_XML(browser, filename, languages='en'):\n wait = browser.waiter()\n\n source_files = glob(os.path.abspath(os.path.join('test-data', filename)))\n languages = languages.split(',')\n\n if len(source_files) == 0:\n print('Error: No such file')\n\n for n, source_path in enumerate(source_files):\n\n if not os.path.isfile(source_path):\n print(\"File not found: %s\" % source_path)\n return\n\n local_content = open(source_path, 'rb').read()\n try:\n ElementTree.fromstring(local_content)\n except ElementTree.ParseError as e:\n print('%sError: The file \"%s\" contains invalid XML:%s' % (Fore.RED, os.path.basename(source_path), Style.RESET_ALL))\n print(Fore.RED + str(e) + Style.RESET_ALL)\n return\n\n source_path_root, source_path_ext = os.path.splitext(source_path)\n\n for m, lang in enumerate(languages):\n\n png_path = '%s_%s.png' % (source_path_root, lang)\n html_path = '%s_%s.html' % (source_path_root, lang)\n\n tmp = tempfile.NamedTemporaryFile('w+b')\n with open(source_path, 'rb') as op:\n tmp.write(re.sub('<preferred_language>[a-z]+</preferred_language>',\n '<preferred_language>%s</preferred_language>' % lang,\n op.read().decode('utf-8')).encode('utf-8'))\n tmp.flush()\n\n try:\n element = browser.driver.find_element_by_id('cbuttonupload')\n except NoSuchElementException:\n browser.get('/mng/action/home.do')\n\n # Open Alma configuration\n browser.wait_for(By.XPATH, '//*[@aria-label=\"Open Alma configuration\"]')\n browser.click(By.XPATH, '//*[@aria-label=\"Open Alma configuration\"]')\n browser.click(By.XPATH, '//*[@href=\"#CONF_MENU5\"]')\n browser.click(By.XPATH, '//*[text() = \"Notification Template\"]')\n\n browser.wait_for(By.ID, 'cbuttonupload')\n\n # Set language\n element = browser.driver.find_element_by_id('pageBeanuserPreferredLanguage')\n element.click()\n element = browser.driver.find_element_by_id('pageBeanuserPreferredLanguage_hiddenSelect')\n select = Select(element)\n opts = {el.get_attribute('value'): el.get_attribute('innerText') for el in select.options}\n longLangName = opts[lang]\n\n cur = n * len(languages) + m + 1\n tot = len(languages) * len(source_files)\n print('[%d/%d] Testing \"%s\" using language \"%s\"' % (cur, tot,\n os.path.basename(source_path),\n longLangName))\n\n element = wait.until(EC.element_to_be_clickable(\n (By.XPATH, '//ul[@id=\"pageBeanuserPreferredLanguage_hiddenSelect_list\"]/li[@title=\"%s\"]/a' % longLangName)\n ))\n element.click()\n\n\n # Upload the XML\n file_field = browser.driver.find_element_by_id('pageBeannewFormFile')\n file_field.send_keys(tmp.name)\n\n upload_btn = browser.driver.find_element_by_id('cbuttonupload')\n upload_btn.click()\n\n browser.wait_for(By.CSS_SELECTOR, '.infoErrorMessages')\n\n run_btn = wait.until(\n EC.element_to_be_clickable((By.ID, 'PAGE_BUTTONS_admconfigure_notification_templaterun_xsl'))\n )\n\n cwh = browser.driver.current_window_handle\n\n run_btn.click()\n time.sleep(1)\n\n # Take a screenshot\n found_win = False\n for handle in browser.driver.window_handles:\n browser.driver.switch_to_window(handle)\n if 'beanContentParam=htmlContent' in browser.driver.current_url:\n browser.driver.set_window_size(browser.config.get('screenshot', 'width'), 600)\n with open(html_path, 'w+b') as html_file:\n html_file.write(browser.driver.page_source.encode('utf-8'))\n print('Saved output: %s' % html_path)\n if browser.driver.save_screenshot(png_path):\n print('Saved screenshot: %s' % png_path)\n else:\n print('Failed to save screenshot')\n found_win = True\n break\n\n if not found_win:\n print(Fore.RED + 'ERROR: Failed to produce output!' + Fore.RESET)\n browser.driver.switch_to_window(cwh)\n tmp.close()", "def before_running(res, src_iso_path, dest_path, project, XML_FILE):\n path_list = [\"common_setting/Generic\",\"common_setting/DVD\"]\n if os.name == 'nt':\n if project.upper() == 'DVDFAB 8' or project.upper() == 'DVDFAB8':\n for path in path_list:\n tree, nodes = windows_xml.read_xml(XML_FILE, path, xml_temp) \n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder')\n else:\n for path in path_list:\n tree, nodes = windows_xml.read_xml(XML_FILE, path, xml_temp) \n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BDBurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder') \n else:\n if project.upper() == 'DVDFAB 8' or project.upper() == 'DVDFAB8':\n for path in path_list:\n tree, nodes = myxml.read_xml(XML_FILE, path, xml_temp)\n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder')\t\n else:\n for path in path_list:\n tree, nodes = myxml.read_xml(XML_FILE, path, xml_temp) \n fab_logpath = get_xml_value(nodes[0], 'LogFolder')\n burn_engine_type = get_xml_value(nodes[0], 'BDBurnEngineType')\n tempfolder_path = get_xml_value(nodes[0], 'TempFolder')\t\t\t\t\n \n dest_path = tempfolder_path if '.ISO' == os.path.splitext(res[6].upper())[1] else dest_path\n initlog('before running, dest_path is: %s' % dest_path) \n tempfolder_path = ''.join((tempfolder_path, 'ReportCrash')).replace(\"_nbsp;\",\" \")\n fab_logpath = fab_logpath.replace(\"_nbsp;\",\" \")\n initlog(\"fab_logpath is: %s; tempfolder_path is: %s\" %(fab_logpath, tempfolder_path))\n logpath = (fab_logpath, tempfolder_path) \n remove_fab_logfile(fab_logpath)\n return dest_path, logpath, burn_engine_type", "def buildtransportxml(self):\n try:\n subprocess.check_call([\"emanegentransportxml\", \"platform.xml\"], cwd=self.session.session_dir)\n except subprocess.CalledProcessError:\n logger.exception(\"error running emanegentransportxml\")", "def get_match_criteria(self):\n #-- factory attributes ----\n print(\"\"\"\nWhat glidein/factory attributres are you using in the match expression?\nI have computed my best estimate for your match string,\nplease verify and correct if needed.\n\"\"\")\n default_factory_attributes = string.join(self.extract_factory_attrs(), ',')\n factory_attributes = raw_input(\"Factory attributes: [%s] \"%default_factory_attributes)\n if factory_attributes == \"\":\n factory_attributes = default_factory_attributes\n if factory_attributes == \"\":\n factory_attributes = []\n else:\n factory_attributes = string.split(factory_attributes, ',')\n\n #--- job_attributes --\n print(\"\"\"\nWhat job attributes are you using in the match expression?\nI have computed my best estimate for your match string,\nplease verify and correct if needed.\n\"\"\")\n default_job_attributes = string.join(self.extract_job_attrs(), ',')\n job_attributes = raw_input(\"Job attributes: [%s] \" % default_job_attributes)\n if job_attributes == \"\":\n job_attributes = default_job_attributes\n if job_attributes == \"\":\n job_attributes = []\n else:\n job_attributes = string.split(job_attributes, ',')\n\n #--- create xml ----\n data = \"\"\"\n%(indent2)s<group name=\"%(group_name)s\" enabled=\"True\">\n%(indent3)s<match match_expr=%(match_string)s start_expr=\"True\">\n%(factory_attributes)s\n%(job_attributes)s\n%(indent3)s</match>\n%(indent2)s</group>\n\"\"\" % \\\n{ \"indent2\": common.indent(2),\n \"indent3\": common.indent(3),\n \"indent4\": common.indent(4),\n \"group_name\": self.group_name(),\n \"match_string\": glideinwms.lib.xmlFormat.xml_quoteattr(self.match_string()),\n \"factory_attributes\": self.factory_data(factory_attributes),\n \"job_attributes\": self.job_data(job_attributes),\n}\n return data", "def makeCourse( xmlFile, genPath, importPaths, commonFiles, rendererContent=True):\n\ttry:\n\n\t\t# parse the command line\n\t\tConfig.add_option('--verbose', help='Set verbosity to maximum', dest='verbosity', default=0, action='store_const', const=2)\n\t\tConfig.add_option('-v', '--verbosity', help='Set the verbosity level (0: quiet, 1: display the command lines, 2: display command lines and their outputs', dest='verbosity', default=0, type=int)\n\t\tConfig.add_option('-d', '--debug', help='Create the files in the debug/ folder, instead of in a temporary one', dest='debug', action='store_true', default=False)\n\t\tConfig.add_option('-f', '--force', help='Force the generation of the documents, even if nothing changes from last run', dest='force', action='store_true', default=False)\n\t\tConfig.add_option('-q', '--quick', help='Quick pdf generation (do not compile twice the latex, do not produce handout, etc.)', dest='quick', action='store_true', default=False)\n\t\tConfig.add_option('-w', '--wordpress', help='Publish to wordpress', dest='wordpress', default=False, action='store_true')\n\t\tConfig.add_option('-c', '--HTMLcorrection', help='Display an HTML correction', dest='HTMLcorrection', default=False, action='store_true')\n\t\tConfig.add_option('-s', '--shared', help='Copy the required files to the <shared> path (via ssh)', default=False,\taction='store_true')\n\t\tConfig.parse()\n\t\targs = Config.args\n\t\toptions = Config.options\n\t\tConfig.importPaths = importPaths \n\t\tConfig.commonFiles = commonFiles\n\t\tConfig.allSessions = { x.__name__:x for x in Session.__subclasses__()}\t# list of the created session classes\n\t\tConfig.rendererContent = rendererContent\n\t\t\n\t\t# clean the debug directory in debug mode\n\t\tbasePath = os.path.abspath('.')+'/'\t\t\t# base path (from where the script is run, because the path are relative)\n\t\tif options.debug:\n\t\t\tif os.path.exists('debug/'):\n\t\t\t\trunCommand(['rm','-rf','debug/'])\n\n\t\t# open and parse the course file\n\t\twith codecs.open(xmlFile, encoding='utf-8') as f:\n\t\t\tbs = BeautifulSoup(f, features=\"xml\")\n\n\n\t\t# build the recursively the sessions\n\t\ttop = createTagSession( bs, father=None )\t\t# bs.contents[0]\n\t\tsessionsToBuild = Session.sessionsToBuild\t\t# get the list of the sessions object\n\t\t\n\n\t\t\"\"\"\n\t\timportFiles( bs.contents[0], importPaths)\n\n\t\t# get the list of sessions we can build (with a 'make' method)\n\t\tbuildableSessions = { x.__name__:x for x in Session.__subclasses__() if 'make' in x.__dict__ }\n\n\t\t#This set the PATH for PyDev only...\n\t\tos.environ['PATH'] = os.environ['PATH']+':'+os.getenv('PATH')\n\n\n\t\t# build the list of Sessions to build\n\t\tsessionsToBuild = []\n\t\tfor name,session in buildableSessions.items():\n\t\t\tsessionsToBuild.extend( session(tag, commonFiles) for tag in bs(name) )\n\t\t\"\"\"\n\t\t\n\t\t\n\n\t\t# if possible, load the previous xml file, and look for the differences\n\t\tdirName,baseName = split(xmlFile) \n\t\ttry:\n\t\t\twith open(dirName+\"/.\"+baseName+\".makeCourse\", \"rb\") as f:\n\t\t\t\tdata = load( f )\n\t\t\t\tfor s in sessionsToBuild:\n\t\t\t\t\tif s.name in data:\n\t\t\t\t\t\ts.checkDifferences( data[s.name] )\n\t\texcept IOError:\n\t\t\tpass\n\n\n\t\t# build every argument in the command line arguments\n\t\tsomethingHasBeDone = False\n\t\tfor s in sessionsToBuild:\n\t\t\tif (not args) or (\"all\" in args) or (s.name in args) or (s.type in args):\n\t\t\t\t\n\t\t\t\tcd( basePath)\n\t\t\t\t\n\t\t\t\t# check if something has to be done\n\t\t\t\tif s.shouldBeMake(basePath+'/'+genPath, options) or options.force:\n\t\t\t\t\tsomethingHasBeDone = True\n\n\t\t\t\t\t#Make one build (TP, course, etc.)\n\t\t\t\t\tprint ( Fore.BLUE+\"*) Make \"+Style.BRIGHT+s.name+Fore.RESET+Style.NORMAL)\n\n\t\t\t\t\t# make temp directory and copy all the file in resources dir\n\t\t\t\t\tif options.debug:\n\t\t\t\t\t\ttmp = \"debug/\"+s.name+'/'\n\t\t\t\t\t\tcreateDirectory(tmp)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp = mkdtemp()\n\n\t\t\t\t\ts.prepareResources(tmp )\n\t\t\t\t\tcd( tmp)\n\n\t\t\t\t\t# call the custom function associated with the type, to produce the documents\n\t\t\t\t\ts.make(options)\n\n\t\t\t\t\t# then move the files in the right place\n\t\t\t\t\tfor f in s.files(options):\n\t\t\t\t\t\tcreateDirectory( basePath+'/'+genPath.format( **s.dict ) )\n\t\t\t\t\t\tnewFile = basePath+'/'+genPath.format( **s.dict )+f\n\t\t\t\t\t\tif not os.path.exists(f):\n\t\t\t\t\t\t\tprint( Fore.YELLOW+'The file '+f+' has not been created by '+s.type+' function !'+Fore.RESET)\n\t\t\t\t\t\trunCommand( ['cp', f, newFile])\n\n\t\t\t\t\t# del the temporary directory or clean debug directory\n\t\t\t\t\tif not options.debug:\n\t\t\t\t\t\trunCommand( ['rm', '-rf', tmp])\n\t\t\t\telse:\n\t\t\t\t\tif options.verbosity>0:\n\t\t\t\t\t\tprint( Fore.BLUE + \"*) Nothing changed for \"+Style.BRIGHT+s.name+Style.NORMAL+\", skipped\"+Fore.RESET)\n\n\n\n\t\tif not somethingHasBeDone:\n\t\t\tprint( Fore.BLUE + \"Nothing has changed, nothing to do, so nothing has been done...\" + Fore.RESET)\n\n\n\t\t# save the data file\n\t\tdata = {L.name: {key:md5(str(val).encode('utf-8')).hexdigest() for key,val in L.dict.items()} for L in sessionsToBuild }\n\t\tcd( basePath)\n\t\twith open(dirName+\"/.\"+baseName+\".makeCourse\", 'wb') as f:\n\t\t\tdump( data, f)\n\n\n\n\n\n\n\n\texcept mkcException as err:\n\t\tprint( err )", "def test_xml_to_attributes_and_back(self):\n # etree_to_dict(e) and dict_to_etree(d)\n e = open(EXAMPLE_SECTION_FILENAME, 'r').read()\n e = ET.XML(e)\n d = etree_to_dict(e)\n all_attributes = read_section_dict(d)\n d = make_section_dict(*all_attributes)\n e = dict_to_xml_str(d)\n e = prettify(e)\n\n if SHOW_RESULTS:\n print(e)\n\n xml_file = StringIO(unicode(e))\n self.assertEqual(verify_files(xml_file, open(SECTION_DTD_FILENAME, 'r')), True)", "def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')", "def create_entity_claim_input_file_doc_ret():\n claim_doc = open(r\"C:\\study\\technion\\MSc\\Thesis\\Y!\\rawClaim_SW.txt\").read().strip()\n \"remove the stop words from the claims\"\n SW_doc = r\"C:\\study\\technion\\MSc\\Thesis\\Y!\\stopWords.xml\"\n stopWords_list = []\n claims_no_SW_dict = {}\n with open(SW_doc, 'r') as f:\n line = f.readline()\n while line !=\"\":\n if \"<word>\" in line:\n stopWords_list.append(line.split(\"<word>\")[1].split(\"</word>\")[0])\n line = f.readline()\n \n for i,line in enumerate(claim_doc.split(\"\\n\")):\n clmLMdocLM_doc_ret_query_file = open(\"LMdocLM_doc_ret_query_file_clm_\"+str(i+1),\"wb\")\n clmLMdocLM_doc_ret_query_file.write(\"<parameters>\\n\")\n curr_claim_words = line.split(\"|\")[1].lower().split()\n curr_entity_words = line.split(\"|\")[0].lower().split()\n noSW_claim = \"\"\n noSW_entity = \"\"\n for word in curr_claim_words:\n if word not in stopWords_list: \n noSW_claim += word+\" \"\n for word in curr_entity_words:\n if word not in stopWords_list: \n noSW_entity += word+\" \"\n# clmLMdocLM_doc_ret_query_file.write(\"<query><number>\"+str(i+1)+\"</number><text>\"+noSW_entity+\"|\"+noSW_claim+\"</text></query>\\n\")\n# clmLMdocLM_doc_ret_query_file.write(\"</parameters>\")\n# clmLMdocLM_doc_ret_query_file.close()\n claims_no_SW_dict[str(i+1)] = (noSW_entity,noSW_claim)\n save_pickle(\"claims_no_SW_dict\", claims_no_SW_dict)", "def write_and_clean(urn, lang, parsed, citations,target):\n\n os.makedirs(\"cache\", exist_ok=True)\n\n\n if \"grc\" not in urn and \"lat\" not in urn:\n type_text = \"translation\"\n else:\n type_text = \"edition\"\n\n \"\"\"\n Change TEI.2 tag to TEI \n \"\"\"\n # We change the main tag\n TEI = parsed.getroot()\n # We change the root tag to TEI\n TEI.tag = \"TEI\"\n # We change the main tag\n TEI = parsed.getroot()\n\n \"\"\"\n Moving every children of //body into a new div with a @n attribute\n \"\"\"\n body = parsed.xpath(\"//body\")[0]\n # Get its children\n child_body = body.getchildren()\n\n # For each child of body, remove it from body\n for child in child_body:\n body.remove(child)\n\n # Create a new div with the informations\n div = etree.Element(\n \"div\",\n attrib = { \n \"type\":type_text,\n \"n\": urn,\n \"{http://www.w3.org/XML/1998/namespace}lang\" : lang\n }\n )\n\n # Add the full list of children of body to the newly created div\n div.extend(child_body)\n # Add this new div in body\n body.append(div)\n\n # Add them to the current encodingDesc\n refsDecl = \"\"\"<tei:refsDecl n=\"CTS\" xmlns:tei=\"http://www.tei-c.org/ns/1.0\">\\n\"\"\" + \"\\n\".join([str(citation) for citation in citations]) + \"\"\"\\n</tei:refsDecl>\"\"\"\n # Parse it\n refsDecl = etree.fromstring(refsDecl)\n # Find encodingDesc\n encodingDesc = parsed.xpath(\"//encodingDesc\")[0]\n encodingDesc.append(refsDecl)\n\n \"\"\"\n Search for old //encodingDesc/refsDecl and refsDecl/state and correct them\n \"\"\"\n refsDecls = parsed.xpath(\"//encodingDesc/refsDecl[@doctype]\")\n for refsDecl in refsDecls:\n refsDecl.set(\"n\", refsDecl.get(\"doctype\"))\n del refsDecl.attrib[\"doctype\"]\n\n states = parsed.xpath(\"//encodingDesc/refsDecl/state\")\n for state in states:\n state.tag = \"refState\"\n\n \"\"\"\n Change language@id to ident\n \"\"\"\n languages = parsed.xpath(\"//langUsage/language[@id]\") + parsed.xpath(\"//langUsage/lang[@id]\")\n for lang in languages:\n lang.set(\"ident\", lang.attrib[\"id\"])\n del lang.attrib[\"id\"]\n\n \"\"\"\n Change pb@id to pb@n\n \"\"\"\n pbs = parsed.xpath(\"//pb[@id]\")\n for pb in pbs:\n pb.set(\"n\", pb.attrib[\"id\"])\n del pb.attrib[\"id\"]\n\n \"\"\"\n Clean keyboarding/p\n \"\"\"\n ps = parsed.xpath(\"//sourceDesc/p\")\n for p in ps:\n p.getparent().remove(p)\n\n \"\"\"\n Clear attributes of text and body\n \"\"\"\n body_text = parsed.xpath(\"//body\") + parsed.xpath(\"//text\")\n for tag in body_text:\n for key in tag.attrib:\n del tag.attrib[key]\n\n\n \"\"\"\n Clear refsDecl/step\n \"\"\"\n refsdecls_step = parsed.xpath(\"//refsDecl/step/parent::refsDecl\")\n for step_parent in refsdecls_step:\n step_parent.getparent().remove(step_parent)\n\n \"\"\"\n Clear refsDecl/step\n \"\"\"\n refsdecls_step = parsed.xpath(\"//refsDecl/step/parent::refsDecl\")\n for step_parent in refsdecls_step:\n step_parent.getparent().remove(step_parent)\n\n \"\"\"\n Fix anchored\n \"\"\"\n anchoreds = parsed.xpath(\"//*[@anchored='yes']\")\n for anchored in anchoreds:\n anchored.set(\"anchored\", \"true\")\n\n # Convert to xml\n \"\"\" \n Create a new document so we can have tei namespace \n \"\"\"\n # And now some other CTS Magic\n New_Root = etree.Element(\n \"{http://www.tei-c.org/ns/1.0}TEI\",\n nsmap = { None : \"http://www.tei-c.org/ns/1.0\" } # Creating a new element allows us to use a default namespace\n )\n New_Root.text = \"\\n\"\n # Add children of old root to New_Root\n New_Root.extend(TEI.getchildren())\n\n # We create a new document\n New_Doc = etree.ElementTree(New_Root)\n # And now some other CTS Magic\n \n New_Doc = P4P5(New_Doc)\n\n # save xml\n os.makedirs(os.path.dirname(target), exist_ok=True)\n with open (target, \"w\") as xmlfile:\n xmlfile.write(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n\"\"\"+etree.tostring(New_Doc, encoding=str))\n\n # And now we write cts informations\n try:\n cts.cts_metadata(urn)\n except Exception as E:\n print(E)", "def write_to_xml(dictData, metadata, xmlfile):\n\tfout = codecs.open(xmlfile, 'w', 'utf-8')\n\tfout.write('<?xml version = \"1.0\" encoding = \"UTF-8\" standalone = \"no\" ?>\\n')\n\tfout.write('<?xml-stylesheet type=\"text/xsl\" href=\"maketable.xsl\"?>\\n')\n\tfout.write('<root>\\n')\n\tfout.write('<meta>\\n')\n\tfor key, value in metadata.items():\n\t\tfout.write('<' + key + '>' + value + '</' + key + '>\\n')\n\tfout.write('</meta>\\n')\n\tfout.write('<content>\\n')\n\tfor (hw, meanings, verse, verseNumDetails, pageNumDetails) in dictData:\n\t\txmlline = ''\n\t\txmlline += '<word><headword>' + hw + '</headword><meanings>'\n\t\tfor meaning in meanings:\n\t\t\txmlline += '<m>' + meaning + '</m>'\n\t\txmlline += '</meanings>'\n\t\txmlline += '<verse>'\n\t\tlines = verse.split('<BR>')\n\t\tfor line in lines:\n\t\t\txmlline += '<line>' + line + '</line>'\n\t\txmlline += '</verse>'\n\t\txmlline += '<verseNumber>' + verseNumDetails + '</verseNumber>'\n\t\txmlline += '<pageNumber>' + pageNumDetails + '</pageNumber></word>'\n\t\t# Write in babylon format. <BR><BR> is to separate verses.\n\t\tfout.write(xmlline + '\\n')\n\t\txmlline = ''\n\tfout.write('</content>\\n</root>')\n\tfout.close()\n\n\t# Give some summary to the user\n\tprint('XML file generated. Success!')\n\tprint('{} metadata lines and {} content lines written to XML file.'.format(len(metadata), len(dictData)))", "def create_conf_xml(self):\n path = os.path.join(\n self.buildout['buildout']['parts-directory'],\n self.name)\n if not os.path.isdir(path):\n os.makedirs(path)\n\n xml_path = os.path.join(path, 'uwsgi.xml')\n\n conf = \"\"\n for key, value in self.conf.items():\n if value.lower() in ('true', 'on', 'yes'):\n conf += \"<%s/>\\n\" % key\n elif value and value.lower() not in ('false', 'off', 'yes'):\n conf += \"<%s>%s</%s>\\n\" % (key, value, key)\n\n\n requirements, ws = self.egg.working_set()\n eggs_paths = [dist.location for dist in ws]\n eggs_paths.extend(self.get_extra_paths())\n # order preserving unique\n unique_egg_paths = []\n for p in eggs_paths:\n if p not in unique_egg_paths:\n unique_egg_paths.append(p)\n\n for path in map(realpath, unique_egg_paths):\n conf += \"<pythonpath>%s</pythonpath>\\n\" % path\n\n f = open(xml_path, 'w')\n f.write(\"<uwsgi>\\n%s</uwsgi>\" % conf)\n f.close()\n return xml_path", "def test_01_FindXml(self):", "def xml_to_conll(self, xml_file_path):\n\n if not os.path.exists(CONLL_PATH):\n self.create_directories(CONLL_PATH)\n\n\n for file in os.listdir(xml_file_path):\n\n # Set path to file\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open Files\n chapter_input = open(file, 'r', encoding='utf8')\n\n # Create Same Filename in Output Folder\n chapter_output = open(CONLL_PATH+os.path.split(file)[-1]+'.conll', 'w', encoding='utf8')\n\n print('Converting: ' + chapter_input.name + ' to Conll09 file: ' + chapter_output.name)\n\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n for sentence in chapter_input.find_all('s'):\n line_id = 0\n for terminal in sentence.find_all('t'):\n line_id, terminal_id, form, lemma, plemma = line_id+1, terminal.get('id'), terminal.get('word'), terminal.get('lemma'), terminal.get('lemma')\n pos, ppos = terminal.get('pos'), terminal.get('pos')\n feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1 = \"_\" * 9 # <3 Python!\n chapter_output.write(\"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\"\n \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\t\" \"%s\" \"\\n\"\n % (str(line_id)+\"-\"+terminal_id, form, lemma, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apred1))\n chapter_output.write(\"\\n\")\n\n chapter_output.close()\n\n print(\"Done!\")", "def test_01_Xml0(self):\n l_xml = self.m_xml.light_sect[0]\n print(PrettyFormatAny.form(l_xml, 'C1-01-A - XML'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_LIGHT_NAME_0)\n self.assertEqual(l_xml.find('DeviceFamily').text, TESTING_DEVICE_FAMILY_INSTEON)", "def test_xml_safety_flag(self):\r\n\r\n self._setstaff_login()\r\n response = self._add_edx4edx()\r\n self.assertIn('GIT_IMPORT_WITH_XMLMODULESTORE', response.content)\r\n\r\n def_ms = modulestore()\r\n course = def_ms.courses.get('{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR)), None)\r\n self.assertIsNone(course)", "def test_build(self):\n version = \"1.2.3\"\n input1, output1 = self.getArbitraryLoreInputAndOutput(version)\n input2, output2 = self.getArbitraryLoreInputAndOutput(version)\n\n self.howtoDir.child(\"one.xhtml\").setContent(input1)\n self.howtoDir.child(\"two.xhtml\").setContent(input2)\n\n self.builder.build(version, self.howtoDir, self.howtoDir,\n self.templateFile)\n out1 = self.howtoDir.child('one.html')\n out2 = self.howtoDir.child('two.html')\n self.assertXMLEqual(out1.getContent(), output1)\n self.assertXMLEqual(out2.getContent(), output2)", "def test_basic_xml(self):\n j2k = Jp2k(self.j2kfile)\n\n self.jp2h.box = [self.ihdr, self.colr]\n\n doc = ET.parse(BytesIO(b'<?xml version=\"1.0\"?><data>0</data>'))\n xmlb = glymur.jp2box.XMLBox(xml=doc)\n self.assertEqual(ET.tostring(xmlb.xml.getroot()),\n b'<data>0</data>')\n\n boxes = [self.jp2b, self.ftyp, self.jp2h, xmlb, self.jp2c]\n\n with tempfile.NamedTemporaryFile(suffix=\".jp2\") as tfile:\n j2k.wrap(tfile.name, boxes=boxes)\n jp2 = Jp2k(tfile.name)\n self.assertEqual(jp2.box[3].box_id, 'xml ')\n self.assertEqual(ET.tostring(jp2.box[3].xml.getroot()),\n b'<data>0</data>')", "def parseXML(xml_file, xml_file2):\r\n #tree = ET.ElementTree(file=xml_file)\r\n tree = ET.ElementTree(file=xml_file)\r\n #print (tree.getroot())\r\n root = tree.getroot()\r\n\r\n #tree2 = ET.ElementTree(file=xml_file2)\r\n tree2 = ET.ElementTree(file=xml_file2)\r\n root2 = tree2.getroot()\r\n\r\n #print (\"tag=%s, attrib=%s\" % (root.tag, root.attrib))\r\n from prettytable import PrettyTable\r\n t = PrettyTable(['N','Component', env, env2])\r\n count=1\r\n for child in root:\r\n for child2 in root2: \r\n if child.get('name') == child2.get('name'): \r\n if child.get('version') != child2.get('version'):\r\n if stg_filter == 1: \r\n if child.get('name')[:7].find(\"STAGING\") != 0:\r\n #print(child.get('name')[:7].find(\"STAGING\"))\r\n #print (\"---------STABLE-------\", child.get('name'), \"-->\" , child.get('version'), \"---------PROD-------\",child2.get('name'), \"-->\" , child2.get('version'))\r\n #print (child2.get('name'), \"------->\" , child2.get('version'))\r\n #print(\"hola\")\r\n #t.add_row([child.get('name'), child.get('version'), child2.get('version')])\r\n t.add_row([count,child.get('name'), child.get('version'), child2.get('version')])\r\n #t.add_row(['Bob', 19])\r\n count=count+1\r\n else:\r\n #print (\"---------STABLE-------\", child.get('name'), \"-->\" , child.get('version'), \"---------PROD-------\",child2.get('name'), \"-->\" , child2.get('version'))\r\n #print (child2.get('name'), \"------->\" , child2.get('version'))\r\n #print(\"hola\")\r\n #t.add_row([child.get('name'), child.get('version'), child2.get('version')])\r\n t.add_row([count,child.get('name'), child.get('version'), child2.get('version')])\r\n #t.add_row(['Bob', 19])\r\n count=count+1 \r\n print (t)", "def setUp(self):\r\n super(TestImport, self).setUp()\r\n self.content_dir = path(tempfile.mkdtemp())\r\n self.addCleanup(shutil.rmtree, self.content_dir)\r\n\r\n # Create good course xml\r\n self.good_dir = self.create_course_xml(self.content_dir, self.BASE_COURSE_KEY)\r\n\r\n # Create run changed course xml\r\n self.dupe_dir = self.create_course_xml(self.content_dir, self.DIFF_KEY)\r\n\r\n # Create course XML where TRUNCATED_COURSE.org == BASE_COURSE_ID.org\r\n # and BASE_COURSE_ID.startswith(TRUNCATED_COURSE.course)\r\n self.course_dir = self.create_course_xml(self.content_dir, self.TRUNCATED_KEY)", "def xml_parser_specific(request, tmpdir_factory):\n testdir = os.path.dirname(__file__)\n xmlfile = testdir + \"/specific.xml\"\n tmpfile = str(tmpdir_factory.mktemp('data').join('basic_trunc.xml'))\n xml_truncate(request.param, xmlfile, tmpfile)\n xml = vasprun.Xml(tmpfile, event = False)\n return xml", "def buildGPXDocument(gpxFileName, someGPXDocument):\n # obtains the DOM representation of the GPX file\n # (the content of the file instantiated in a structure of xml.dom.minidom)\n dom = parse(gpxFileName)\n for trk in dom.getElementsByTagName(\"trk\"):\n someGPXDocument.setTrack(buildTrack(trk))\n for rte in dom.getElementsByTagName(\"rte\"):\n someGPXDocument.setRoute(buildRoute(rte)) \n someGPXDocument.setWayPoints(buildWayPointList(dom))", "def example_xml_file40():\n return load_xml('datacite-v4.0-full-example.xml')", "def sample_xml(opts,file):\r\n with open(file, opts) as xml:\r\n return xml.read()", "def testBuildElement(self):\n self.assertEqual(\n b'<ColorCorrectionRef ref=\"uniqueId\"/>\\n',\n self.ccr.xml\n )", "def thredds_catalog(self, tmpdir_factory):\n input_dir = os.path.abspath(\"esacci_esgf/test_input_catalogs\")\n output_dir = str(tmpdir_factory.mktemp(\"output\", numbered=True))\n # Process all catalogs in input dir and create aggregations with WMS\n test_files = glob(\"{}/*.xml\".format(input_dir))\n assert test_files, \"No test catalogs found\"\n\n pb = ProcessBatch([\"-aw\", \"-o\", output_dir] + test_files)\n pb.do_all()\n tree = ET.ElementTree()\n tree.parse(os.path.join(output_dir, os.listdir(input_dir)[0]))\n return tree.getroot()", "def test_02_Tags(self):\n # print(PrettyFormatAny.form(self.m_xml, 'Xml'))\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.computer_div.tag, 'ComputerDivision')\n self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')\n self.assertEqual(self.m_xml.lighting_sect.tag, 'LightingSection')\n self.assertEqual(self.m_xml.button_sect.tag, 'ButtonSection')\n self.assertEqual(self.m_xml.button.tag, 'Button')\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection')\n self.assertEqual(self.m_xml.controller.tag, 'Controller')\n self.assertEqual(self.m_xml.light_sect.tag, 'LightSection')\n self.assertEqual(self.m_xml.light.tag, 'Light')", "def test_xml_from_dict(self):\n d = make_section_dict() # using default values\n e = dict_to_xml_str(d)\n e = prettify(e)\n\n if SHOW_RESULTS:\n print(e)\n\n xml_file = StringIO(unicode(e))\n self.assertTrue(verify_files(xml_file, open(SECTION_DTD_FILENAME, 'r')))", "def xml_bdsc(alignment, data, outdir, yaml, yaml_dir, yaml_glob, clock, mcmc, length, hot, intervals, prefix, sample_prior, model_prior, tag):\n\n if yaml_dir is not None:\n yaml_files = {f.stem: f for f in yaml_dir.glob(f\"{yaml_glob}\")}\n else:\n yaml_files = {prefix: yaml}\n\n outdir.mkdir(parents=True, exist_ok=True)\n\n for prefix, y in yaml_files.items():\n\n bdsc = BirthDeathSkylineContemporary(\n alignment=alignment,\n data=data,\n clock_model=clock,\n chain_type=mcmc,\n chain_length=length,\n chain_number=hot+1,\n prefix=prefix,\n sample_prior=sample_prior\n )\n\n bdsc.print_configuration()\n bdsc.check_configuration()\n\n config = bdsc.read_config(file=yaml)\n\n # Set model prior configuration\n model_priors = config.get('priors').get('model')\n # Modify the model prior configs if settings are passed\n if model_prior:\n model_priors = modify_model_priors(model_priors, model_prior, tag, prefix)\n\n bdsc.set_model_priors(prior_config=model_priors, distribution=True)\n\n # Set clock prior configuration\n clock_priors = config.get('priors').get('clock')\n bdsc.set_clock(prior_config=clock_priors)\n\n if intervals:\n # Set slice configurations and overwrite associated priors\n slice_config = config.get('priors').get('intervals')\n bdsc.set_slices(slice_config=slice_config)\n\n bdsc.construct_template(\n xml=outdir / f'{prefix}.xml'\n )", "def main():\n #short GPS Test\n filename = 'KML_short_test.kml'\n gps_filename = 'gps_short_test.txt'\n gpsfile = open(gps_filename, 'r')\n file = open(filename, 'w')\n addHeader(file)\n coordinate_lst = convert(gpsfile)\n cleaned = GPS_to_CostMap.clean_gps_data(coordinate_lst)\n write_coordinates(cleaned, file)\n addTrailer(file)\n file.close()\n\n #Repeat test\n filename = 'KML_repeat_test1.kml'\n gps_filename = 'gps_1.txt'\n gpsfile = open(gps_filename, 'r')\n file = open(filename, 'w')\n addHeader(file)\n coordinate_lst = convert(gpsfile)\n cleaned = GPS_to_CostMap.clean_gps_data(coordinate_lst)\n write_coordinates(cleaned, file)\n addTrailer(file)\n file.close()\n\n filename = 'KML_repeat_test2.kml'\n gps_filename = 'gps_1.txt'\n gpsfile = open(gps_filename, 'r')\n file = open(filename, 'w')\n addHeader(file)\n coordinate_lst = convert(gpsfile)\n cleaned = GPS_to_CostMap.clean_gps_data(coordinate_lst)\n write_coordinates(cleaned, file)\n addTrailer(file)\n file.close()", "def main(self):\n root = etree.Element(\"OpenSCENARIO\")\n self.get_header(root)\n self.get_parameter_declarations(root)\n etree.SubElement(root, \"CatalogLocations\")\n self.get_road_network(root)\n self.get_entities(root)\n storyboard = etree.SubElement(root, \"Storyboard\")\n self.get_init(storyboard)\n story = etree.SubElement(storyboard, \"Story\")\n story.set(\"name\", \"OSC Generated Story\")\n act = etree.SubElement(story, \"Act\")\n act.set(\"name\", \"OSC Generated Act\")\n self.get_maneuvers(act)\n self.get_story_start_trigger(act)\n self.get_story_stop_trigger(act)\n self.get_end_eval_criteria(storyboard)\n\n generated_xml = etree.tostring(root)\n self.write_xosc(generated_xml)", "def example_xml_file41():\n return load_xml('datacite-v4.1-full-example.xml')", "def test_valid_xml(self):\r\n self.build_problem()\r\n self.assertTrue(True)", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def collect_data(self, output_dir=os.path.join(ROOT_DIR, \"output\", \"license_check.out\")):\n # read rules from RULE_XML\n _dom = xml.dom.minidom.parse(RULE_XML)\n _root = _dom.documentElement\n if _root:\n items_node = _root.getElementsByTagName(\"items\")\n if items_node:\n # load rule.xml into item_list\n item_list = items_node[0].getElementsByTagName(\"item\")\n license_check_result_dict = {}\n # iterate to check every item\n for _item in item_list:\n product = _item.getAttribute('product')\n # the product is exit if there are one or more conditions fit\n if product in license_check_result_dict.keys():\n license_check_result_dict[product] = \\\n license_check_result_dict[product] or self.check_product(_item)\n else:\n license_check_result_dict[product] = self.check_product(_item)\n # write dict to json file\n ProcessJson.output_json_file(license_check_result_dict, output_dir)", "def _construct_data_xml(self, xml_file_list):\n award_dict = {}\n award_list = []\n for xml_file in xml_file_list:\n xml_file.seek(0)\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n for response in root:\n temp_dict = {}\n for award in response:\n if award.tag == 'entry':\n continue\n try:\n # temp_dict[award.tag].append(award.text)\n temp_dict[award.tag] = award.text\n except KeyError:\n print(\"KeyError\")\n # temp_dict[award.tag] = [award.text]\n\n # if 'entry' in temp_dict.keys():\n # del temp_dict['entry']\n if len(temp_dict) > 0:\n award_list.append(temp_dict)\n\n return award_list", "def generate_expected_file(self, expected_file, xml_name):\n\t\tlogging.info('Gerando arquivo de documentos esperados')\n\t\tcontent = self.read_xml(xml_name)\n\n\t\twith open(expected_file, 'w', newline='') as csvfile:\n\t\t\tfieldnames = ['QueryNumber', 'DocNumber', 'DocVotes']\n\t\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t\t\twriter.writeheader()\n\t\t\tfor index in range(0, len(content['QueryNumber'])):\n\t\t\t\tcount_results = 0\n\t\t\t\tlogging.info('Escrevendo documentos da consulta '+str(index+1)+'/'+str(len(content['QueryNumber'])))\n\t\t\t\tfor result in content['Records'][index]:\n\t\t\t\t\twriter.writerow({'QueryNumber': content['QueryNumber'][index], 'DocNumber': result[0], \n\t\t\t\t\t\t\t\t\t 'DocVotes': result[1]})\n\t\t\t\t\tcount_results += 1\n\t\t\t\t\tif count_results == int(content['Results'][index]): break", "def test_XmlDumpAllRevs(self):\n pages = get_entries('article-pear.xml', allrevisions=True)\n self.assertLength(pages, 4)\n self.assertEqual('Automated conversion', pages[0].comment)\n self.assertEqual('Pear', pages[0].title)\n self.assertEqual('24278', pages[0].id)\n self.assertTrue(pages[0].text.startswith('Pears are [[tree]]s of'))\n self.assertEqual('Quercusrobur', pages[1].username)\n self.assertEqual('Pear', pages[0].title)", "def _get_all_xml(self):\n\n # add all xml files to list\n for file in os.listdir(self._hero_folder):\n if file.endswith(\".xml\"):\n self._xml_list.append(file)\n\n # read all attr, skill, spell, fight_talent entries of hero file\n for _, value in enumerate(self._xml_list):\n name = value.replace(\".xml\", '')\n hero_root = self._parse_xml(value)\n attrs = self._read_attributes(hero_root)\n skills = self._read_skills(hero_root)\n spells = self._read_spells(hero_root)\n fight_talents = self._read_fight_talents(hero_root)\n advantages = self._read_advantages(hero_root)\n special_skills = self._read_special_skills(hero_root)\n\n # some entries are both skill and special skill, e.g.\n # \"Ritualkenntnis: Hexe\", special skill does not need to be tested\n # so it is removed\n for skill in skills:\n for index, special_skill in enumerate(special_skills):\n if skill.name == special_skill.name:\n special_skills.pop(index)\n\n self._heroes.update({name: Hero(name,\n hero_root,\n attrs,\n skills,\n spells,\n fight_talents,\n advantages,\n special_skills)})", "def sensibility_conformity_to_xml(xml_path):\n\tprint('ADDING SENSBIL and CFM to:', xml_path)\n\ttree = ET.parse(xml_path)\n\troot = tree.getroot()\n\n\tmetrics_dic = create_dict_from_xml(xml_path)\n\n\tvalsensibility = calculate_sensibility(metrics_dic)\n\tvalconformity = calculate_conformity(metrics_dic)\n\n\tsensibility_attributes = {\"name\": \"sensibility\", \"value\": str(valsensibility), \"symbol\": \"SENSBIL\",\n\t\t\t\t\t\t\t \"type\": \"similarity\", \"unit\": \"voxel\"}\n\tSENSBIL = ET.Element(\"SENSBIL\", attrib=sensibility_attributes)\n\tconformity_attributes = {\"name\": \"conformity\", \"value\": str(valconformity), \"symbol\": \"CFM\", \"type\": \"similarity\",\n\t\t\t\t\t\t\t \"unit\": \"voxel\"}\n\tCFM = ET.Element(\"CFM\", attrib=conformity_attributes)\n\n\troot[2].insert(2, SENSBIL)\n\troot[2].insert(3, CFM)\n\ttree.write(xml_path)", "def parse2016(filename, qdict, cdict):\n \n tree = ET.parse(filename)\n root = tree.getroot()\n\n for child in root:\n # Each child represents a new (original question, related question) pair\n orgq_id = child.attrib[\"ORGQ_ID\"]\n relq_id = child[2].attrib[\"THREAD_SEQUENCE\"]\n orgq_comment = []\n relq_comment = []\n # get orgq_comment, relq_comment\n orgq_subject = child[0].text if child[0].text != None else \"\"\n orgq_body = child[1].text if child[1].text != None else \"\"\n DUPLICATE = True if \"SubtaskA_Skip_Because_Same_As_RelQuestion_ID\" in child[2].attrib else False \n for rel in child[2]:\n if rel.tag == \"RelQuestion\":\n relq_subject = rel[0].text if rel[0].text != None else \"\"\n relq_body = rel[1].text if rel[1].text != None else \"\"\n elif rel.tag == \"RelComment\":\n c_text = rel[0].text\n orgq_c_label = rel.attrib[\"RELC_RELEVANCE2ORGQ\"]\n orgq_comment.append((c_text, orgq_c_label))\n relq_c_label = rel.attrib[\"RELC_RELEVANCE2RELQ\"]\n relq_comment.append((c_text, relq_c_label))\n\n if DUPLICATE is False:\n qdict[relq_id] = (relq_subject, relq_body)\n cdict[relq_id] = relq_comment\n \n if (orgq_id in qdict) != (orgq_id in cdict):\n print(\"WARNING qdict inconsistent with cdict\")\n elif orgq_id not in qdict:\n qdict[orgq_id] = (orgq_subject, orgq_body)\n cdict[orgq_id] = relq_comment\n else:\n cdict[orgq_id] = cdict[orgq_id] + orgq_comment\n \n return qdict, cdict", "def complete_xml_parsing(self):\n for item in self.entities:\n item.severity = self.parsed_severity\n item.cwes.extend(self.parsed_cwes)\n item.advisory_id = self.parsed_advisory_id\n item.attack_vector = self.parsed_attack_vector\n if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):\n cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)\n if self.parsed_cvss_temporal != '' \\\n and is_correct_score(self.parsed_cvss_temporal):\n cvss_v3.temporal_sc = self.parsed_cvss_temporal\n item.cvss_v3 = cvss_v3\n item.cvss_base_sc_v3 = self.parsed_cvss_base\n item.cvss_temporal_score_v3 = self.parsed_cvss_temporal\n item.published = self.parsed_date", "def cueword_statistics(self, xml_file_path):\n\n print('Extracting cueword statistics from:', xml_file_path, 'to:', CUEWORDS_STATS_PATH)\n\n if not os.path.exists(CUEWORDS_STATS_PATH):\n self.create_directories(CUEWORDS_STATS_PATH)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # For each file, open, parseXML\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n chapter_input = open(file, 'r', encoding='utf8')\n chapter_output = open(CUEWORDS_STATS_PATH+os.path.split(file)[-1]+'_stats.txt',\n 'w', encoding='utf8')\n\n # Try html.parser for ignoring lower and UPPER Tag and attr names\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n\n for sentence in chapter_input.find_all('s'):\n # Terminals and Semantics\n #terminals = sentence.find_all('t')\n semantics = sentence.find('sem')\n\n # If splitwords exist\n if semantics.find('splitwords'):\n splitwords = semantics.find('splitwords')\n splitword = splitwords.find_all('splitword')\n\n # For each splitword\n for s_w in splitword:\n\n # Get reference id\n # <splitword idref=\"x\">\n splitword_idref = s_w.get('idref')\n\n # Get corresponding terminal and its POS tag\n # <t id=\"x\" pos=\"ADJA\" word=\"unerschütterlichen\"/>\n terminal = sentence.find(id=splitword_idref).get('word')\n pos = sentence.find(id=splitword_idref).get('pos')\n\n #print(splitword_idref,'\\t',terminal,'\\t',pos)\n chapter_output.write('\\n' '=SPLITWORDS=' '\\n')\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\n' %\n (splitword_idref, terminal, pos))\n\n # Find parts of splitword\n parts = s_w.find_all('part')\n part1 = parts[0].get('id')\n part2 = parts[1].get('id')\n\n for part in parts:\n part_word = part.get('word')\n part_id = part.get('id')\n #print(part_id,'\\t',part_word)\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (part_id, part_word))\n\n # Find corresponding frames\n frames = semantics.find('frames')\n frame = frames.find_all('frame')\n\n for frame_tag in frame:\n\n # skip first letter in case of n|Negation\n if frame_tag['name'] == NEGATION_FRAME_NAME:\n\n # Find target\n target = frame_tag.find('target')\n fenode = target.find('fenode')\n fenode_id = fenode.get('idref')\n\n # Check part ID if == target ID\n if part1 == fenode_id or part2 == fenode_id or splitword_idref == fenode_id:\n\n part_word = sentence.find(id=fenode_id).get('word')\n #print(fenode_id,'\\t','target')\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (fenode_id, 'TARGET'))\n\n\n # try and except blocks because of parser lowerUPPER errors\n\n #Find Negated\n try:\n negated = frame_tag.find('fe', {'name' : NEGATED_TAG_NAME})\n negated_fenode_idref = negated.find('fenode').get('idref')\n except AttributeError:\n negated = ''\n negated_fenode_idref = ''\n #print(negated_fenode_idref,'\\t',negated['name'].lower())\n try:\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (negated_fenode_idref, negated['name'].upper()))\n except TypeError:\n chapter_output.write('')\n\n #Find Scope\n try:\n scope = frame_tag.find('fe', {'name' : SCOPE_TAG_NAME})\n scope_fenode_idref = scope.find('fenode').get('idref')\n except AttributeError:\n scope = ''\n scope_fenode_idref = ''\n #print(scope_fenode_idref,'\\t',scope['name'].lower())\n try:\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (scope_fenode_idref, scope['name'].upper()))\n except TypeError:\n chapter_output.write('')\n\n #Find Focus\n try:\n focus = frame_tag.find('fe', {'name' : FOCUS_TAG_NAME})\n focus_fenode_idref = focus.find('fenode').get('idref')\n except AttributeError:\n focus = ''\n focus_fenode_idref = ''\n\n #print(focus_fenode_idref,'\\t',focus['name'].lower())\n try:\n chapter_output.write('%s' '\\t' '%s' '\\n'\n % (focus_fenode_idref, focus['name'].upper()))\n except TypeError:\n chapter_output.write('')\n\n #end if splitwords\n\n else:\n\n # If Frames exist\n if semantics.find('frames'):\n\n frames = semantics.find('frames')\n frame = frames.find_all('frame')\n\n chapter_output.write('\\n' '=SCOPE/FOCUS=' '\\n')\n\n for frame_tag in frame:\n\n # skip first letter in case of n|Negation\n if frame_tag['name'] == NEGATION_FRAME_NAME:\n\n #scope_list = []\n\n # Find target\n target = frame_tag.find('target')\n fenode = target.find('fenode')\n fenode_id = fenode.get('idref')\n\n word = sentence.find(id=fenode_id).get('word')\n pos = sentence.find(id=fenode_id).get('pos')\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\n' % (fenode_id, word, pos))\n chapter_output.write('%s' '\\t' '%s' '\\n' % (fenode_id, 'TARGET'))\n\n #Find Negated\n if frame_tag.find('fe', {'name' : NEGATED_TAG_NAME}):\n try:\n negated = frame_tag.find('fe', {'name' : NEGATED_TAG_NAME})\n negated_fenode_idref = negated.find('fenode').get('idref')\n negated_word = sentence.find(id=negated_fenode_idref).get('word')\n negated_pos = sentence.find(id=negated_fenode_idref).get('pos')\n except AttributeError:\n negated = ''\n negated_fenode_idref = ''\n negated_word = ''\n negated_pos = ''\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\t' '%s' '\\n'\n % (negated_fenode_idref, negated['name'].upper(), negated_word, negated_pos))\n\n\n # Resolve Terminals if Scope on a complex graph\n def resolve_non_terminals(idref):\n \"\"\" This function resolves a complex graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = sentence.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if sentence.find(id=e_id).get('word') is not None:\n try:\n edge_word = sentence.find(id=e_id).get('word')\n edge_words.append(edge_word)\n except:\n pass\n if sentence.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n scopelist = []\n\n if frame_tag.find('fe', {'name' : SCOPE_TAG_NAME}):\n scope = frame_tag.find('fe', {'name' : SCOPE_TAG_NAME})\n scope_fenode = scope.find_all('fenode')\n for s_f in scope_fenode:\n s_id = s_f.get('idref')\n if sentence.find(id=s_id).get('word') is not None:\n try:\n scope_word = sentence.find(id=s_id).get('word')\n #scope_pos = scope_word.get('pos')\n scopelist.append(scope_word)\n except:\n pass\n if sentence.find(id=s_id).get('word') is None:\n pass\n else:\n pass\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\n'\n % (s_id, scope['name'].upper(), resolve_non_terminals(s_id)))\n\n focuslist = []\n\n\n #chapter_output.write(str(scope_list))\n #Find Focus\n if frame_tag.find('fe', {'name' : FOCUS_TAG_NAME}):\n focus = frame_tag.find('fe', {'name' : FOCUS_TAG_NAME})\n focus_fenode = focus.find_all('fenode')\n for f_f in focus_fenode:\n f_id = f_f.get('idref')\n if sentence.find(id=f_id).get('word') is not None:\n try:\n focus_word = sentence.find(id=f_id).get('word')\n focus_pos = sentence.find(id=f_id).get('pos')\n focuslist.append(focus_word)\n except:\n pass\n if sentence.find(id=f_id).get('word') is None:\n pass\n else:\n pass\n\n chapter_output.write('%s' '\\t' '%s' '\\t' '%s' '\\t' '%s' '\\t' '%s' '\\n'\n % (f_id, focus['name'].upper(), focus_pos, focus_word, resolve_non_terminals(f_id)))\n\n\n chapter_output.close()\n\n print('Cuewords statistics extracted to:', chapter_output.name)", "def gexfFormat(profile, recom, filename):\n # ! ids are raw -> strings\n clickedItems = set(map(lambda x: str(x[0]), profile)) # set of clicked items\n recomItems = set() # set of recommended items\n \n with open(filename, 'w') as f:\n # write header\n f.write(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n\"\"\")\n f.write(\"\"\"<gexf xmlns:viz=\"http:///www.gexf.net/1.2/viz\" version=\"1.2\" xmlns=\"http://www.gexf.net/1.2\">\\n\"\"\")\n f.write(\"\"\"<graph defaultedgetype=\"undirected\" idtype=\"string\" type=\"static\">\\n\"\"\")\n \n # write edges\n f.write(\"\"\"<edges>\\n\"\"\")\n id = 0\n for click in range(0, len(profile)): # for all the clicks\n print(\"Number of processed clicks: \", click)\n for rec in recom[click]: # for the topN recommendations\n f.write(\"<edge id=\\\"\" + str(id) + \"\\\" source=\\\"\" + str(rec[0]) + \"\\\" target=\\\"\" + str(profile[click][0]) + \"\\\" weight=\\\"\" + str(rec[1]) + \"\\\"/>\\n\")\n recomItems.add(str(rec[0]))\n id += 1\n \n f.write(\"\"\"</edges>\\n\"\"\")\n \n f.write(\"\"\"<nodes>\\n\"\"\")\n # write clicked item-nodes in an outter ring\n angleStep = 2*np.pi / float(len(clickedItems)) # polar coordinates angle step\n angle = 0 # polar coordinates angle [0, 2pi]\n R = 1000 # outter\n for item in clickedItems: # for all the clicks\n target = str(item)\n f.write(\"<node id=\\\"\" + target + \"\\\">\\n\")\n f.write(\"\\t\")\n f.write(\"\"\"<viz:color r=\"255\" g=\"0\" b=\"0\"></viz:color>\\n\"\"\") # red color\n f.write(\"<viz:position x=\\\"\" + str(R * np.cos(angle)) + \"\\\" y=\\\"\" + str(R * np.sin(angle)) + \"\\\" z=\\\"0.0\\\"/>\") # ring position\n f.write(\"</node>\\n\")\n angle += angleStep\n \n # write the rest item-nodes in an inner ring\n angleStep = 2*np.pi / float(len(recomItems - clickedItems)) # polar coordinates angle step\n angle = 0 # polar coordinates angle [0, 2pi]\n R = 600 # outter\n for item in recomItems - clickedItems: # for the rest of the items\n target = str(item)\n f.write(\"<node id=\\\"\" + target + \"\\\">\\n\")\n f.write(\"\\t\")\n f.write(\"<viz:position x=\\\"\" + str(R * np.cos(angle)) + \"\\\" y=\\\"\" + str(R * np.sin(angle)) + \"\\\" z=\\\"0.0\\\"/>\") # ring position\n f.write(\"</node>\\n\")\n angle += angleStep\n \n f.write(\"\"\"</nodes>\\n\"\"\")\n f.write(\"\"\"</graph>\\n</gexf>\"\"\")", "def _create_xml_report(self, test, xml_obj):\n xml_report_path = os.path.join(test.work_dir,\n self.XML_REPORT_PATH)\n with open(xml_report_path, 'w') as xml_report:\n xml_report.write(etree.tostring(xml_obj, pretty_print=True))", "def buildNecessaryData():\n emaildata = loadEmailData()\n emailids = list(emaildata.keys())\n if os.stat('res/dictionary.txt').st_size == 0:\n print(\"Creating word list...\")\n createWordList(emailids, emaildata)\n print(\"Counting all words...\")\n englishwords = importDictionary()\n countAllWords(emaildata, englishwords)", "def get_results(self):\n d = {}\n# r = {}\n for analyser in self.xml_tree.getroot():\n for child in analyser:\n if child.tag == 'all-records':\n for record in child:\n attributes = record.attrib\n sample = attributes['sampleId']\n assay_id = attributes['assayId']\n genotype = attributes['genotypeId']\n quality = attributes['description'].split('.')[0]\n if re.match(r'rs\\d+', assay_id):\n if sample in d:\n if assay_id in d[sample]:\n for allele in list(genotype):\n if allele not in d[sample][assay_id]['genotype']:\n d[sample][assay_id]['genotype'] += allele\n if quality not in d[sample][assay_id]['quality']:\n d[sample][assay_id]['quality'].append(quality)\n else:\n d[sample][assay_id] = {'genotype': genotype, 'quality': [quality]}\n else:\n d[sample] = {assay_id: {'genotype': genotype, 'quality': [quality]}}\n# if sample in r:\n# if assay_id in r[sample]:\n# for allele in list(genotype):\n# if allele not in r[sample][assay_id]:\n# r[sample][assay_id] += allele\n# else:\n# r[sample][assay_id] = genotype\n# else:\n# r[sample] = {assay_id: genotype}\n# for k, v in r.items():\n# for k1, v1, in v.items():\n# if len(v1) == 1:\n# v[k1] += v1\n# pprint.pprint(r)\n# df = pd.DataFrame.from_dict(r).transpose()\n# print(df)\n# df.to_excel('snpcheck.xlsx')\n return d", "def makedocs(projectfolder):\n featuremodel_path = path.join(projectfolder, \"productline\", \"model.xml\")\n configs_path = path.join(projectfolder, \"productline\", \"configs\")\n bddfeatures_path = path.join(projectfolder, \"bddfeatures\")\n testreports_path = path.join(projectfolder, \"testreports\")\n\n fmparser = parsers.FeatureModelParser()\n resultsparser = parsers.TestResultsParser()\n feature_tree_renderer = ftrenderer.FeatureTreeRenderer()\n\n docs_dir = path.join(projectfolder, \"docs/generated\")\n if path.exists(docs_dir):\n shutil.rmtree(docs_dir)\n makedirs(docs_dir)\n\n lektor_templates_path = \"doc_templates\"\n utilities.sed_inplace(\n path.join(lektor_templates_path, \"aplet.lektorproject\"),\n r'<<PROJECT>>',\n CONFIG[\"project_name\"])\n\n products = {}\n product_names = get_product_names_from_configs_path(configs_path)\n for product_name in product_names:\n productconfig_filepath = path.join(projectfolder, \"productline/configs\", product_name + \".config\")\n product_html_report_name = \"report{0}.html\".format(product_name)\n product_html_results_src = path.join(testreports_path, product_html_report_name)\n product_xml_report_name = \"report{0}.xml\".format(product_name)\n product_xml_results_src = path.join(testreports_path, product_xml_report_name)\n\n with open(productconfig_filepath, \"r\") as productconfig_file:\n products[product_name] = {}\n products[product_name]['features'] = [feature.strip() for feature in productconfig_file.readlines()]\n\n current_product_lektor_dir = path.join(lektor_templates_path, \"content/products\", product_name)\n if not path.exists(current_product_lektor_dir):\n makedirs(current_product_lektor_dir)\n\n product_filepath = path.join(current_product_lektor_dir,\"contents.lr\")\n shutil.copyfile(path.join(lektor_templates_path, \"helpers/product_contents.lr\"), product_filepath)\n\n feature_model = fmparser.parse_from_file(featuremodel_path)\n gherkin_pieces = ftrenderer.gherkin_pieces_grouped_by_featurename(bddfeatures_path)\n gherkin_piece_test_statuses = resultsparser.get_gherkin_piece_test_statuses_for_product_from_file(product_xml_results_src)\n configparser = parsers.ProductConfigParser(feature_model.root_feature.name)\n product_features = configparser.parse_config(productconfig_filepath)\n feature_model.trim_based_on_config(product_features)\n feature_model.add_gherkin_pieces(gherkin_pieces)\n feature_model.calculate_test_statuses(gherkin_piece_test_statuses)\n\n feature_tree_renderer.build_graphviz_graph(feature_model.root_feature)\n feature_tree_renderer.render_as_svg(current_product_lektor_dir, \"feature_model\")\n\n utilities.sed_inplace(product_filepath, r'<<PRODUCT>>', product_name)\n product_test_status = feature_model.root_feature.test_status\n utilities.sed_inplace(product_filepath, \"<<TEST_STATUS>>\", product_test_status.name)\n\n # Copy test run html report to generated docs\n if path.exists(product_html_results_src):\n shutil.copyfile(product_html_results_src, path.join(current_product_lektor_dir, product_html_report_name))\n\n click.echo(\"- Generating feature model SVG...\")\n click.echo(featuremodel_path)\n\n feature_model = fmparser.parse_from_file(featuremodel_path)\n gherkin_pieces = ftrenderer.gherkin_pieces_grouped_by_featurename(bddfeatures_path)\n gherkin_piece_test_statuses = resultsparser.get_gherkin_piece_test_statuses_for_dir(testreports_path)\n feature_model.add_gherkin_pieces(gherkin_pieces)\n feature_model.calculate_test_statuses(gherkin_piece_test_statuses)\n\n feature_tree_renderer.build_graphviz_graph(feature_model.root_feature)\n feature_tree_renderer.render_as_svg(path.join(lektor_templates_path, \"content/\"), \"feature_model\")\n\n click.echo(\"- Building site\")\n lektor_cmd = [\"lektor\", \"--project\", lektor_templates_path, \"build\", \"-O\", path.abspath(docs_dir)]\n click.echo(\"Running: \" + subprocess.list2cmdline(lektor_cmd))\n subprocess.call(lektor_cmd)\n\n product_map_renderer = mapbuilder.ProductMapRenderer()\n productline_generated_filepath = path.join(docs_dir, \"index.html\")\n html = product_map_renderer.get_productmap_html(feature_model, products)\n utilities.sed_inplace(productline_generated_filepath, r'<<PRODUCTMAP>>', html)", "def create_xml_regression(lfiles, lsbj, foxml):\n\n impl = xml.dom.minidom.getDOMImplementation()\n doc = impl.createDocument(None, \"some_tag\", None)\n top_element = doc.documentElement\n\n e = doc.createElement('subject')\n e.setAttribute('id', 'case')\n\n for i, fn in enumerate(lfiles):\n v = doc.createElement('visit')\n v.setAttribute('id', \"subj{}\".format(i))\n\n f = doc.createElement('filename')\n f.setAttribute('object_id', \"face\")\n t = doc.createTextNode(fn)\n f.appendChild(t)\n\n a = doc.createElement('age')\n x = doc.createTextNode(str(lsbj[i][\"age\"]))\n a.appendChild(x)\n\n\n v.appendChild(f)\n v.appendChild(a)\n e.appendChild(v)\n\n top_element.appendChild(e)\n\n with open(foxml, \"w\") as fo:\n fo.write(doc.toprettyxml())", "def load_xml_files_erisk(local_dir, token_position=0):\n users = {}\n prep = Preprocessor()\n c = 0\n for dir_path, dir_names, filenames in os.walk(local_dir):\n for name in filenames:\n tok = name.split(\"_\")\n if token_position > 0:\n key = tok[0] + tok[token_position]\n else:\n key = tok[token_position]\n key = key.strip(\".xml\")\n full_file = os.path.abspath(os.path.join(dir_path, name))\n dom = ET.parse(full_file, parser=ET.XMLParser(encoding=\"utf-8\"))\n writing = dom.findall('WRITING')\n for w in writing:\n title = w.find('TITLE').text\n text = w.find('TEXT').text\n post = title + \" \" + text\n # preprocess text\n new_text = prep.tokenize_reddit(post)\n\n if key in users.keys():\n users[key] += new_text + ' end_ '\n else:\n users[key] = new_text + ' end_ '\n\n c += 1\n print(\"Preprocessed chunk: \", c)\n\n return users", "def test_xml_files_with_missing_info():\n\n # Test when k is missing from constant type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/k_const.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when A is missing from Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/A_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when E is missing from Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/E_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when A is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/A_mod_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when b is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/b_mod_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when E is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/E_mod_arr.xml\"\n parser = XMLParser(xml_filename)", "def example_xml_file43():\n return load_xml('datacite-v4.3-full-example.xml')", "def buildxml2(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml2()\")\n # on master, control network bridge added earlier in startup()\n ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False)\n self.buildplatformxml2(ctrlnet)\n self.buildnemxml()\n self.buildeventservicexml()", "def process_cvat_xml(xml_file, image_dir, output_dir):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n #output_dir = os.path.join(output_dir, \"Annotations\")\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n ## occluded and pose are not tested within tracks\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified'\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n \n frame = frames.get( frameid, {} )\n \n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label,\n 'pose': pose, 'truncated': occluded }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n print( frameid )\n\n image_name = \"%s_%08d.jpg\" % (basename, frameid) ## KM: Revisit this for tracks. Hardcoded?\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n frame = frames[frameid]\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n occluded = box.get('occluded')\n pose = box.get('pose')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n writer.save(os.path.join(anno_dir, anno_name))\n\n else:\n for img_tag in cvat_xml.findall('image'):\n ## Discard path component; we expect user to provide path to images directory.\n ## It is probably easier for users to provide full path to images directory\n ## rather than having to figure out how much of the path is embedded in the XML\n ## as a relative or absolute path by CVAT.\n image_name = os.path.basename(img_tag.get('name'))\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified' ## Default if not found\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = output_dir #os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n #print(\"Writing {} (image: {})\".format(anno_name, image_name))\n writer.save(os.path.join(anno_dir, anno_name))", "def make_inert_xml(mech, species, out_file):\n file_type = '.xml'\n _check_input_filetype(mech, file_type)\n out_file = _check_output_filetype(out_file, file_type)\n species = _enforce_species_list(species)\n mech_dir, in_loc, out_loc = _get_file_locations(mech, out_file)\n\n with open(in_loc, 'r') as f:\n data_in = f.read()\n\n indicator = '<reactionData id=\"reaction_data\">'\n start_loc = data_in.find(indicator) + len(indicator) + 1\n header = data_in[:start_loc]\n to_scan = data_in[start_loc:]\n rxn_indicator = '<!-- reaction'\n scanned = to_scan.split(rxn_indicator)[1:]\n new_rxns = ''\n\n for loc, rxn in enumerate(scanned):\n # if any of the desired species are in the current reaction, delete\n # it from the mechanism\n rxn = rxn_indicator + rxn\n eqn_indicators = ['<equation>', '</equation>']\n eqn_start = rxn.find(eqn_indicators[0]) + len(eqn_indicators[0])\n eqn_end = rxn.find(eqn_indicators[1])\n eqn = rxn[eqn_start:eqn_end]\n\n if not any([_find_specie_in_str(s, eqn) for s in species]):\n new_rxns += rxn\n\n new_mech = header + '\\n\\n' + new_rxns\n with open(out_loc, 'w') as f:\n f.writelines(new_mech)\n f.flush()", "def print_xml_config(config_dictionary,**kwargs):\n \n #Check if we have passed a filename\n #If not, pass a default filename\n if 'config_file' in kwargs:\n config_file = kwargs['config_file']\n else:\n config_file = 'ebtel_config.xml'\n \n #Open the file\n f = open(config_file,'w')\n \n #Print necessary header info\n f.write('<?xml version=\"1.0\" ?>\\n')\n f.write('<input>\\n')\n\n #Loop through dictionary and print to xml file\n for key in config_dictionary:\n #Print tab delimiter, brackets and keyword\n f.write('\\t<')\n f.write(key)\n f.write('>')\n #Check if entry is a list\n #If so print it as a list\n if isinstance(config_dictionary[key],list) or type(config_dictionary[key]).__name__ == 'ndarray':\n #Make temporary list\n temp = config_dictionary[key]\n #Skip to new line\n f.write('\\n')\n #Begin loop over list\n for i in range(len(config_dictionary[key])):\n f.write('\\t\\t<')\n f.write(key+str(i))\n f.write('>')\n f.write(str(temp[i]))\n f.write('</')\n f.write(key+str(i))\n f.write('>\\n')\n #Print additional tab to preserve alignment\n f.write('\\t')\n else:\n #Print value\n f.write(str(config_dictionary[key]))\n #Close the brackets and print newline\n f.write('</')\n f.write(key)\n f.write('>\\n')\n \n #Close the main node of the file\n f.write('</input>')\n \n #Close the file\n f.close()", "def test_augmentsXML(self):\n fileName = self.mktemp()\n fp = FilePath(fileName)\n fp.setContent(oldAugmentsFormat)\n upgradeAugmentsXML(fp)\n self.assertEquals(fp.getContent(), newAugmentsFormat)", "def __set_xml():\n if len(activity) == 0:\n\n OS = DRIVER.OS\n xml_path = os.path.join(prjDir, \"config\", \"element_android.xml\")\n if OS == \"iOS\":\n xml_path = os.path.join(prjDir, \"config\", \"element_iOS.xml\")\n\n # open the xml file\n per = elementTree.parse(xml_path)\n all_element = per.findall('activity')\n\n for firstElement in all_element:\n activity_name = firstElement.get(\"name\")\n\n element = {}\n\n for secondElement in firstElement.getchildren():\n element_name = secondElement.get(\"name\")\n\n element_child = {}\n for thirdElement in secondElement.getchildren():\n\n element_child[thirdElement.tag] = thirdElement.text\n\n element[element_name] = element_child\n activity[activity_name] = element", "def get_test_resources():\n with open(\"test_data/repo/data/textgroup/work/textgroup.work.version-lat1.xml\") as file:\n text = Text(resource=file, urn=\"urn:cts:latinLit:textgroup.work.version-lat1\")\n\n with open(\"test_data/repo/full_inventory.xml\") as file:\n inventory = TextInventory(resource=file)\n work = inventory[\"urn:cts:latinLit:textgroup.work\"]\n textgroup = inventory[\"urn:cts:latinLit:textgroup\"]\n edition = inventory[\"urn:cts:latinLit:textgroup.work.version-lat1\"]\n\n return {\n \"text\": text,\n \"work\": work,\n \"textgroup\": textgroup,\n \"edition\": edition\n }", "def main():\n\n print (f'Reading schema file {args.schemafile} and writing RST to directory {args.outdir}')\n\n schema = xmlschema.XMLSchema(args.schemafile)\n\n # create warning.rst file for deprectaion warnings\n with open(\"warnings.rst\", \"w\") as warnfile:\n print(\".. Put any comments here\\n\", file=warnfile)\n print(\" Warning, this file is regenerated from the annotations in the schema file.\\n\", file=warnfile)\n print(\" Any changes will be overwritten by convert_xsd_to_rst.py.\\n\\n\\n\", file=warnfile)\n print(\"\\n\", file=warnfile)\n print(\"The following are potential future changes, as tagged in the schema with <warning> elements in the documentation. They may result in modifications or deletions in future versions of StationXML.\\n\", file=warnfile)\n print(\"\\n\\n\", file=warnfile)\n\n level_xpaths = ['fsx:FDSNStationXML',\n 'fsx:FDSNStationXML/fsx:Network',\n 'fsx:FDSNStationXML/fsx:Network/fsx:Station',\n 'fsx:FDSNStationXML/fsx:Network/fsx:Station/fsx:Channel',\n 'fsx:FDSNStationXML/fsx:Network/fsx:Station/fsx:Channel/fsx:Response']\n\n words = set()\n for i, xpath in enumerate(level_xpaths):\n xsd_element = schema.find(xpath)\n\n stop_element = None\n this_element = os.path.basename(xpath).split(':')[1]\n if i < 4:\n stop_element = os.path.basename(level_xpaths[i+1]).split(':')[1]\n\n level_elem = walk_tree(xsd_element)\n\n # Use Preamble instead of root element name\n if this_element==\"FDSNStationXML\":\n this_element=\"Preamble\"\n\n # Generate output file name for this level\n rst_file = os.path.join (args.outdir, f'level-{this_element.lower()}.rst')\n\n if args.verbose:\n print (f'Writing to {rst_file}')\n\n with open(rst_file, 'w') as outfile:\n print(\".. Put any comments here\\n\", file=outfile)\n print(\" Warning, this file is regenerated from the annotations in the schema file.\\n\", file=outfile)\n print(\" Any changes will be overwritten by convert_xsd_to_rst.py.\\n\", file=outfile)\n\n write_tree(level_elem, stop_element, outfile = outfile)\n\n recur_spelling(words, level_elem)\n save_spelling(words)", "def addEntry(self, listDictions):\n ## load xml\n improvDoc = loadIMProvFile(self.argsFile)\n entrname= 'Job'\n for dictions in listDictions:\n report = IMProvNode(entrname , None, **dictions)\n improvDoc.addNode(report)\n outfile = file( self.argsFile, 'w').write(str(improvDoc))\n return", "def read_xml(self):\n connection = urlopen(self.url)\n in_xml = connection.read()\n state = ElementTree.fromstring(in_xml)\n records = []\n record = []\n\n # Specific to CHP\n # TODO(David) Nested for loops are bad. Change this to be more\n # efficient, possibly use generators.\n for center in state:\n rec_center = center.attrib['ID']\n\n for dispatch in center:\n rec_dispatch = dispatch.attrib['ID']\n\n for log in dispatch:\n record = [rec_center, rec_dispatch]\n\n record.append(log.attrib['ID'])\n\n log_time = log.find('LogTime').text.strip('\"')\n log_type = log.find('LogType').text.strip('\"')\n location = log.find('Location').text.strip('\"')\n loc_desc = log.find('LocationDesc').text.strip('\"')\n area = log.find('Area').text.strip('\"')\n\n record.append(log_time)\n record.append(log_type)\n record.append(location)\n record.append(loc_desc)\n record.append(area)\n\n latlon = log.find('LATLON').text.strip('\"')\n\n (lat, lon) = latlon.split(':')\n lat = str(lat[:2]) + '.' + str(lat[2:])\n lon = '-' + str(lon[:3]) + '.' + str(lon[3:])\n\n record.append(lat)\n record.append(lon)\n\n records.append(record)\n\n self.records = records", "def build_configs():", "def generate_xml_tree(self):\n try:\n tree = et.parse(self.file)\n self.root = tree.getroot()\n self.blast_output = self.root[8]\n self.iteration = self.blast_output[0]\n self.iteration_hit = self.iteration[4]\n\n for i in self.iteration_hit:\n self.hits.append(i)\n\n for i in self.hits:\n h = []\n for j in i:\n h.append(j)\n\n for hsp in h[5]:\n procent = \"{0:.2f}\".format(int(hsp[10].text) / int(hsp[13].text) * 100)\n procent = float(procent)\n self.aligns.append(Alignment(h[2].text,\n hsp[1].text,\n procent,\n hsp[12].text,\n hsp[10].text,\n hsp[13].text,\n hsp[14].text,\n hsp[15].text,\n hsp[16].text))\n self.main_alignments.append(MainAlignment(i[2].text,\n self.aligns))\n self.aligns = []\n except IndexError:\n \"Bad file.\"", "def main():\n glob_pattern = \"{root}/{child}/*.xml\".format(root=MANCHESTER_ROOT, child=TARGET_CHILD)\n corpus_files = glob(glob_pattern)\n for filename in corpus_files:\n print(filename)\n to_csv(filtered_parent_freq_count([filename], 2))", "def main(src_xml, out='output.txt', sep='::'):\n \n soup = bs(open(src_xml).read(), 'lxml') \n data = [(p[0].text, s_convert(p[1].text), p[2].text)\n for p in zip(soup('weightedfrequency'),\n soup('headword'),\n soup('shortdefinition'))]\n res = ''\n words = []\n for item in data:\n try:\n if item[1].replace('\\n', '') in words:\n continue\n else:\n res += '{}{}{}{}{}\\n'.format(item[0],\n sep,\n item[1].replace('\\n', ''),\n sep,\n item[2].replace('\\n', ''))\n words.append(item[1].replace('\\n', ''))\n except:\n continue\n with open(out, 'w') as f:\n f.write(res)\n return res" ]
[ "0.61072654", "0.5717132", "0.5615994", "0.5543528", "0.55028045", "0.53621995", "0.5347263", "0.5297309", "0.5269815", "0.5254691", "0.52032775", "0.5177183", "0.5130147", "0.5118681", "0.51131713", "0.51111734", "0.5094071", "0.50852966", "0.5085046", "0.50843346", "0.50589895", "0.5041263", "0.50376314", "0.50374556", "0.5014333", "0.49962717", "0.49937144", "0.49916235", "0.49897355", "0.49828026", "0.4973738", "0.49674258", "0.49440247", "0.49363175", "0.49211448", "0.49160635", "0.4913846", "0.4903944", "0.490373", "0.49018025", "0.4892479", "0.4891718", "0.48910236", "0.48893228", "0.4886891", "0.48852265", "0.48839623", "0.48788133", "0.48747247", "0.48732892", "0.48709875", "0.48642775", "0.4856655", "0.48448792", "0.48286662", "0.48263744", "0.4824608", "0.48175403", "0.47857016", "0.47856194", "0.47823343", "0.47817886", "0.47815093", "0.47804773", "0.47803736", "0.4777209", "0.47725597", "0.4771328", "0.47693968", "0.47664157", "0.47644585", "0.4763028", "0.47623613", "0.47599238", "0.47551224", "0.4753274", "0.47391096", "0.4732845", "0.47276014", "0.47263637", "0.47238025", "0.47235912", "0.4718478", "0.47141662", "0.4710987", "0.4708887", "0.4703245", "0.46973857", "0.46945626", "0.46872616", "0.46842992", "0.46842057", "0.46818307", "0.46760923", "0.46728203", "0.46686438", "0.46662655", "0.46627524", "0.46612573", "0.46611884" ]
0.72865844
0
r"""The constructor for Config class Initializes the Config class
def __init__(self, config_file_name="config.json"): with open(config_file_name, "r") as config: f = dict(json.load(config)) for key, value in f.items(): setattr(self, key, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n self._cfg = ConfigDict() # current configuration\r\n self._default_config = ConfigDict() # default configuration\r\n self._temp_config = OrderedDict() # temporary configuration\r\n self._path = Path() # current configuration path\r\n self._default_path = Path() # default configuration path\r\n self._conversion_dict = None\r\n self._auto_cast = None\r\n self._write_flags = None\r\n self._force_load = None\r\n self._load_empty = None\r\n self._ask_path = None\r\n self._search_in_default_config = None\r\n self._init_count = 0\r\n self._policies = defaultdict(bool) # by default every modification is forbidden # WIP\r\n if args or kwargs:\r\n self.init(*args, **kwargs)\r\n logger.debug(\"Config object created.\")", "def __init__(self, config: Dict[str, Any]) -> None:\n self.config = config", "def __init__(self, config_dict: dict):\n self._config = config_dict", "def __init__(self):\n self.__parameters: ConfigParams = ConfigParams()", "def __init__(self, config):\n\n self.config = config", "def __init__(self, config: str) -> None:\n self.configuration = config", "def __init__(self, config: str) -> None:\n self.configuration = config", "def __init__(self, config: dict) -> None:\n super().__init__(config)", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def __init__(self, config=None):\n pass", "def __init__(self):\n self.config = {}", "def __init__(self, config, cfg):\n self.config = config\n self.cfg = cfg", "def init_config(self):\n pass", "def __init__(self, config: Optional[Config] = None):\n self.config = config or Config()", "def __init__(self) -> None:\n self.config: dict[str, str | int] = {}", "def __init__(self, configs):\n\n self.__configs = configs", "def init_config() -> Config:\n ...", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def initialize_from_config(self):", "def __init__(self, *args, **kwargs):\n wrap = lambda v: Config(v) if type(v) is dict else v\n kvdict = {k: wrap(v) for k, v in dict(*args, **kwargs).items()}\n super(Config, self).__init__(kvdict)\n self.__dict__ = self", "def __init__(self):\n\n if Config._instance:\n raise Exception('Config singleton is already instantiated. User Config.get_instance() obtain it.')\n\n parser = configparser.ConfigParser()\n parser.read('C:\\\\Users\\\\Akatosh\\\\PythonProjects\\\\note-it\\\\config\\\\config.ini')\n\n self.sections = {}\n\n for section in parser:\n self.sections[section] = _Section(parser[section])\n\n Config._instance = self", "def __init__(self, config):\n raise NotImplementedError (\"This is just an abstract interface\")", "def __init__(self, config):\n\n self.root = config.root\n self.pidfile = config.pidfile\n self.log_conf = config.logging", "def _init_config_(self):\n self._config= {}", "def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)", "def __init__(self, config: Tuple):", "def initialize(self, **kwargs):\n\n # Defining the configuration object\n self.config = kwargs.get('config')", "def __init__(self, config_file_name=\"config.json\"):\n self.config_file_name = config_file_name\n self._config = self._open_config_file()", "def __init__(self, config_path, normalize=False):\n self.config = {}\n _config_dict = {}\n self._config_path = Utils.expand_path(config_path)\n self.update = None\n self.normalize = normalize", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def Init(self, config):\r\n pass", "def __init__(self):\n ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG.read(os.path.join(os.path.dirname(__file__)))\n self.IPS = []", "def __init__(self, configuration):\n self._config = configuration", "def __init__(self, cfg=None):\n if cfg is None:\n cfg = {}\n\n self.cfg = cfg", "def __init__(self, config):\n super().__init__(config)\n self.collector_host = config.get(\"collector_host\")\n self.schedds = config.get(\"schedds\", [None])\n self.condor_config = config.get(\"condor_config\")\n self.constraint = config.get(\"constraint\", True)\n self.classad_attrs = config.get(\"classad_attrs\")\n self.correction_map = config.get(\"correction_map\")", "def __init__(self, config):\n\n self.locations_hltv_starting_ = config[sC.BUCKET_LOCATIONS][sC.HLTV_STARTING]\n self.score_starting_ = config[sC.BUCKET_LOCATIONS][sC.SCORE_STARTING]\n self.logs_starting_ = config[sC.BUCKET_LOCATIONS][sC.LOGS_STARTING]\n self.temp = config[sC.FOLDER_LOCATIONS][sC.TEMP_APP_ENGINE_FOLDER]\n self.results_ = config[sC.FOLDER_LOCATIONS][sC.CONFIGS_RESULTS]\n self.amxmodx_logs_ = config[sC.FOLDER_LOCATIONS][sC.ADDONS_AMXMODX_LOGS]\n self.cstrike_logs_ = config[sC.FOLDER_LOCATIONS][sC.CSTRIKE_LOGS]\n self.hltv_demos_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.HLTV_DEMOS_FUNC]\n self.ftp_logs_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.FTP_LOGS_FUNC]\n\n print('{} - Initialized'.format(__name__))", "def __init__(self, cfg=None, **kwargs):\n self.__dir = KITConfig.configDir\n self.__cfgFile = \"\"\n\n self.__cfg = {}\n self.__default = KITConfig.defaultConfig\n\n self.__setupLogger()\n\n if cfg is not None:\n self.__cfgFile = cfg\n self.load(cfg)", "def __init__(self):\n parameters_list = []\n self.config_dict = self.open_config(parameters_list)\n\n # Define defaults\n self.disc_gt = 0.0\n self.disc_out = 0.0", "def __init__(self, config_entry):\n self.config_entry = config_entry", "def __init__(self, config_entry):\n self.config_entry = config_entry", "def __init__(self):\n self.collectorName = COLLECTOR_NAME\n self.configCycleInterval = 20 # minutes\n self.cycleInterval = 5 * 60 # seconds\n\n # The configurationService attribute is the fully qualified class-name\n # of our configuration service that runs within ZenHub\n self.configurationService = 'NullConfig'\n\n # Will be filled in based on buildOptions\n self.options = None\n\n self.configCycleInterval = 20*60", "def init(self, *args, **kwargs):\n try:\n self._init(*args, **kwargs)\n except (ValueError, TypeError, UnicodeError, ConfigParser.Error), exc:\n raise ConfigInvalidError, str(exc), sys.exc_info()[2]", "def __init__(self, config_file, verbose):\r\n self.loadConfig(config_file)\r\n self.verbose = verbose", "def __init__(self, config, logger):\n self.config = config\n self.logger = logger", "def __init__(self, configs: Union[Config, Dict[str, Any]]):\n self.configs: Config = Config(configs, None, allow_new_hparam=True)", "def __init__(self, conf=None):\n # set interpolation to None so you can supply filename template\n # that contain % to config.set\n conf = ConfigParser(strict=False,\n inline_comment_prefixes=(';',),\n interpolation=None) if (conf is None) else conf\n super().__init__(conf)\n self._cycle = None\n self._logger = logging.getLogger('metplus')\n # config.logger is called in wrappers, so set this name\n # so the code doesn't break\n self.logger = self._logger\n\n # get the OS environment and store it\n self.env = os.environ.copy()\n\n # add section to hold environment variables defined by the user\n self.add_section('user_env_vars')", "def _init_config(self, configPath=None):\n # TODO: The SafeConfigParser class has been renamed to ConfigParser in Python 3.2.\n # This alias will be removed in future versions.\n # We still use SafeConfigParser for backwards compatibility with Python 2.\n self.config = SafeConfigParser()\n # Make option names case sensitive\n self.config.optionxform = str\n\n if configPath and os.path.isdir(configPath):\n configDir = configPath\n else:\n configDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')\n\n # List filenames in configDir alphabetically\n _, _, configFiles = next(os.walk(configDir))\n configFiles = sorted(configFiles, key=str.lower)\n\n # Read configuration pipeline\n for f in configFiles:\n with open(os.path.join(configDir, f)) as configFile:\n self.config.readfp(configFile)\n self._store_config_pass()\n\n if configPath and os.path.isfile(configPath):\n self.config.read(configPath)\n self._store_config_pass()\n\n appSection = 'application'\n self.appName = self._get_option_value(appSection, 'appName')\n self.appResource = self._get_option_value(appSection, 'appResource')\n self.appArgs = []\n appArgs = self._get_option_value(appSection, 'appArgs')\n if appArgs:\n self.appArgs = appArgs.split(' ')\n self.mainClass = self._get_option_value(appSection, 'mainClass')", "def __init__(self, name=None):\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))", "def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)", "def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Config, cls).__new__(cls)\n return cls._instance", "def __init__(self, filename=None):\n if filename:\n if not os.path.exists(filename):\n raise Exception(\"No configuration found at %s\" % filename)\n super(Configuration, self).__init__(filename)", "def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n self.config_entry = config_entry", "def __init__(self, settings):\n self._read_config(settings)", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def __init__(self, config_file):\n \n self.log = logging.getLogger(__name__)\n\n self.parser = ConfigParser.ConfigParser()\n if os.path.exists(config_file) and os.path.isfile(config_file):\n self.parser.read(config_file)\n self.log.debug(\"opened configuration '%s'\" % config_file)\n else:\n raise ConfigError(\"Config file missing\", \"File '%s' doesn't exist.\" % (config_file))\n\n self.config_file = config_file\n self.check_config()", "def __init__(self):\n\t\t\n\t\tsettings = configparser.SafeConfigParser(allow_no_value=True)\n\t\tlist=settings.read('data/settings.cfg')\n\t\tif not 'data/settings.cfg' in list:\n\t\t\tprint('no configuration file present.. making one')\n\t\t\tself.makeConfigFile(settings)\n\t\t\tshare = ['']\n\t\t\tself.nodes = []\n\t\telse:\n\t\t\tshare, nodes = self.openConfig(settings)\n\t\t\tself.nodes = nodes\n\t\t\n\t\t\n\t\tself.files = self.loadFiles(share)\t\t\n\t\tself.share = share\n\t\tself.kill= False\n\t\tself.downloads = {}\n\t\tself.currentVersion = (0,2,1)\n\t\tself.totalDownloads = 0\n\t\tself.current = 0\n\t\tself.config = settings", "def __init__(self, userconfig=None):\n configspec = _get_configspec()\n self._configspec = ConfigObj(configspec, interpolation=False,\n list_values=False, _inspec=True)\n configs_default = ConfigObj(interpolation=False,\n configspec=self._configspec)\n self._config = self._validate(configs_default)\n if userconfig:\n self.read_userconfig(userconfig)", "def __init__(self, config_file=None):\n\t\tself.options = {}\n\n\t\tif config_file:\n\t\t\tself.set_file(config_file)", "def config():\n return Config()", "def config():\n return Config()", "def __init__(self):\n self.filename = pathlib.Path(__file__).parent.absolute().__str__() + '/../../data/config.ini'\n self.data = ConfigParser()\n self.data.read(self.filename)", "def __init__(self, config_file):\n Config = ConfigParser.ConfigParser()\n Config.read(config_file)\n \n self.port_id = Config.get(\"SerialPortSection\", \"ComPort\")\n self.baud_rate = Config.get(\"SerialPortSection\", \"BaudRate\")\n self. timeout = Config.get(\"SerialPortSection\", \"TimeOut\")\n self.config_file = config_file", "def __init__(self, config):\n try:\n config['volume_id']\n config['access_key']\n config['secret_access_key']\n config['region']\n except KeyError, e:\n logging.error(repr(e))\n raise ImproperlyConfigured()\n\n if not config.has_key('keep'):\n config['keep'] = 5\n\n self.config = config", "def __init__(self, load_config):\n super().__init__()\n self._load_config = load_config", "def init(cls, config, src):\n cls.config = config", "def __init__(self):\n self.__default_config = ConfigParams.from_tuples(\n 'options.max_pool_size', 2,\n 'options.connect_timeout', 5000,\n 'options.auto_reconnect', True,\n 'options.max_page_size', 100,\n 'options.debug', True\n )\n\n # The logger\n self._logger: CompositeLogger = CompositeLogger()\n # The connection resolver\n self._connection_resolver: MongoDbConnectionResolver = MongoDbConnectionResolver()\n # The configuration options.\n self._options: ConfigParams = ConfigParams()\n # The MongoDB connection object.\n self._connection: pymongo.MongoClient = None\n # The MongoDB database name.\n self._database_name: str = None\n # The MongoDb database object.\n self._db: database.Database = None", "def __init__(self, path_to_config_file):\n self.file_path = path_to_config_file", "def _new():\n\treturn ConfigParser(\n\tdelimiters = ('=',),\n\tcomment_prefixes = ('#', ';'),\n\tdefault_section = 'default',\n\tallow_no_value = False,\n\tstrict = False,\n\tinterpolation = ExtendedInterpolation(),\n\tdefaults = {\n\t\t'debug': False,\n\t\t'datadir': path.join(path.expanduser('~'), '.local', 'rosshm'),\n\t\t'log.level': 'warn',\n\t\t'core.enable': True,\n\t\t'db.driver': 'sqlite',\n\t\t'db.name': 'rosshmdb',\n\t\t'db.config': '',\n\t\t'static.enable': True,\n\t\t'web.enable': True,\n\t},\n)", "def __init__(self, config):\n self.conf = dict( timeout=60\n , logfile=''\n , ip_addr='192.168.0.220'\n , username='admin'\n , password='admin'\n , prompt=\"Cursor\"\n , init=False\n , debug=0\n , recv_pause=0.25\n , waitfor=300\n , pause=2\n , stdout=1\n , ping_timeout=10\n )\n self.conf.update(config)\n (self.exp_state, self.exp_output, self.exp_recv_cnt) = ('', '', 0)\n if self.conf['init']: self.initialize()", "def __init__(self, config, well, directory):\n self.config = config\n self.well = well\n self.directory = directory", "def __init__(self, **kwargs):\n cls = self.__class__\n\n # Initialize all configurables and input arguments\n for arg in cls.configurables():\n try: # Read from class constructor\n setattr(self, arg, kwargs[arg])\n except KeyError:\n try: # Set from default value defined in class\n default_value = getattr(self, arg).kwargs[\"default\"]\n setattr(self, arg, default_value)\n except KeyError: # if nothing is provided, fallbakcs to None\n setattr(self, arg, None)\n\n self.input_arguments = None\n if cls.input_configurables():\n self.input_arguments = [\n getattr(self, arg) for arg in cls.input_configurables()\n ]\n\n self.json_config = cfg.JsonConfig(self.config)\n self.output_objects = []\n self.file = None", "def __init__(self, config_file=None):\n self.file = config_file\n self.parser = SafeConfigParser()\n if isinstance(self.file, (str, list)):\n self.parser.read(self.file)\n else: # assume file object was given instead\n self.parser.read_file(self.file)\n self._flask_cache = None\n self._assets_cache = None\n self._gridrealm_cache = None", "def __init__(self, environment='develop'):\n\n cwd = path.dirname(path.abspath(__file__))\n config_dir = path.join(cwd, 'configs')\n\n config_files = []\n for (root, _, file_names) in walk(config_dir):\n for file_name in file_names:\n config_files.append(path.join(root, file_name))\n config_files = sorted(config_files)\n\n for config_file in config_files:\n config = anyconfig.load(config_file)\n for key in config:\n self[key] = config[key]\n\n if environment in config_file:\n break", "def __init__(self, config, town):\n\t\tself.config = config", "def __init__(self):\n\n # open json config file that reads in information\n config_path = open(\"config.json\", \"r\")\n config_json = config_path.read()\n config_dict = json.loads(config_json)\n\n # assign object variables\n self.project_id = config_dict[\"project-id\"]\n self.bucket_name = config_dict[\"bucket-name\"]\n self.location_id = config_dict[\"key-location\"]\n self.key_ring_id = config_dict[\"key-ring-id\"]\n self.crypto_key_id = config_dict[\"crypto-key-id\"]\n self.service_account_email = config_dict[\"service-account-email\"]\n\n # close the file\n config_path.close()", "def __init__(self, config=None, broker=None):\n pass", "def __init__(self, config_path=None):\n self.config_path = None\n\n config_path = config_path or CONF.api_paste_config\n if not os.path.isabs(config_path):\n self.config_path = CONF.find_file(config_path)\n elif os.path.exists(config_path):\n self.config_path = config_path\n\n if not self.config_path:\n raise exception.ConfigNotFound(path=config_path)", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)", "def __init__(self, config_name: str, is_top_level_config: bool = False):\n self.config_name = config_name\n self.config_variable_name = to_snake_case(config_name) + \"_config\"\n self.is_top_level_config = is_top_level_config\n self.parameters: List[CppParameter] = list()\n self.configs: List[CppConfig] = list()", "def __init__(self, config_path: str = \"config.json\"):\n # Change here if you want to relocate you config file\n self.config = {}\n self.load_configuration(config_path)\n self.app_name = self.config.get('app_name', self.APP_NAME)", "def __init__(self):\n self.database = Database()\n self.load_config()", "def __init__(self, config, loop):\n self.config = config\n self.loop = loop", "def __init__(self, config_directory: Optional[pathlib.Path] = None):\n self._config_parser = configparser.ConfigParser()\n # Preserve case for keys.\n self._config_parser.optionxform = lambda x: x\n\n if config_directory is None:\n self._config_filepath = pathlib.Path(_KFP_CONFIG_FILE)\n else:\n self._config_filepath = config_directory / _KFP_CONFIG_FILE\n\n try:\n with open(str(self._config_filepath), 'r') as f:\n self._config_parser.read_file(f)\n except IOError:\n warnings.warn('No existing KFP Config file found')\n\n if not self._config_parser.has_section(_COMPONENTS_SECTION):\n self._config_parser.add_section(_COMPONENTS_SECTION)\n\n self._components = {}", "def __init__(self, config_directory, scale_override=None):\n self._config_directory = config_directory\n\n self._config_detector = DetectorConfig(config_directory)\n self._config_align = AlignConfig(config_directory, scale_override=scale_override)\n self._config_crystal = CrystalMatchConfig(config_directory)", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)\n #print(self.config)", "def __init__(self, config: str, wdir=os.getcwd()):\n\n self.config_path = config\n self.wdir = wdir\n\n with open(self.config_path) as fin:\n self.config_dic = yaml.load(fin, Loader=yaml.BaseLoader)\n\n # default is 00 for all interaction types\n self.config_parameters_dic = None\n\n # This parameter defines whether self_loops will be kept or not\n self.self_loops = True\n if 'self_loops' in self.config_dic:\n self.self_loops = bool(int(self.config_dic['self_loops']))", "def __init__(self) -> None:\n\n self.config_keys = ['APPS_HOST', 'APPS_PORT']\n super().__init__()\n\n self.APPS_HOST = str(self.APPS_HOST)\n \"\"\"Host where the server will be served\"\"\"\n\n self.APPS_PORT = int(self.APPS_PORT)\n \"\"\"Port where the server will be served\"\"\"", "def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n self.config_entry = config_entry\n self.options = dict(config_entry.options)", "def __init__(self):\n self.storefn = Config.getConfigFnPath()\n\n # Load the configuration file file\n self.load()", "def __init__(self):\n default_config = Config()\n query = Query(default_config)\n database = Database(default_config)\n common_util = CommonUtil(default_config, database)\n self.config = default_config\n self.query = query\n self.database = database\n self.common_util = common_util", "def __init__(self, config_file, log_config_file):\n self._config = Config(config_file)\n if self._config.load() is None:\n print(\"Failed to load configuration.\\n\")\n sys.exit(-1)\n\n # Setup logging.\n self._logger = setup_logger('s3replicationmanager', log_config_file)\n if self._logger is None:\n print(\"Failed to configure logging.\\n\")\n sys.exit(-1)\n\n self._config.print_with(self._logger)\n\n self._jobs = Jobs(self._logger, \"all-jobs\")\n self._subscribers = Subscribers()", "def __init__(self, **user_options):\n self.options = config.default_options.copy()\n self.configure(**user_options)", "def config(self):\n pass", "def config(self):\n pass", "def __init__(self, config_file = 'config.yaml'):\n\n self.name = ''\n self.img_dir = ''\n self.out_dir = ''\n self.cam_file = ''\n self.options_file = ''\n self.output_xml_file = ''\n\n # If there is an options file, it will overwrite the defaults \n if config_file is not None:\n self.load(config_file)", "def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]" ]
[ "0.8203434", "0.7962474", "0.78200513", "0.77837414", "0.77558804", "0.77334917", "0.77334917", "0.7729926", "0.7703412", "0.7703412", "0.7703412", "0.7694449", "0.7644489", "0.7622834", "0.75704795", "0.7559002", "0.754742", "0.75234354", "0.75190806", "0.74873096", "0.7462494", "0.7439778", "0.74078375", "0.7365843", "0.7317262", "0.7299957", "0.7277268", "0.72679234", "0.72675955", "0.72636247", "0.725348", "0.7241625", "0.7238692", "0.714317", "0.7140095", "0.71238536", "0.7117453", "0.70930606", "0.7080537", "0.7079229", "0.7068156", "0.7068156", "0.70346344", "0.70273846", "0.70235217", "0.70089227", "0.70030105", "0.70023495", "0.6960768", "0.69591624", "0.6955141", "0.6936373", "0.69220245", "0.69189006", "0.69123346", "0.6906409", "0.6906409", "0.6901492", "0.68945926", "0.6893016", "0.68837863", "0.6878797", "0.687846", "0.687846", "0.68779546", "0.6872583", "0.6839872", "0.683661", "0.6836073", "0.6835405", "0.68353206", "0.68340117", "0.6821051", "0.6808765", "0.68015224", "0.68007", "0.6785903", "0.67739147", "0.67595184", "0.6758853", "0.6757474", "0.673611", "0.6731681", "0.67313457", "0.67267287", "0.6720677", "0.67195946", "0.67115957", "0.6706078", "0.66941845", "0.6687704", "0.6683256", "0.6670862", "0.6667564", "0.6665597", "0.66647226", "0.6658989", "0.6658989", "0.6644963", "0.6644274" ]
0.69338214
52
r"""Class constructor for Config
def __init__(self, config_file_name="config.json"): self.config_file_name = config_file_name self._config = self._open_config_file()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, config: Dict[str, Any]) -> None:\n self.config = config", "def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n self._cfg = ConfigDict() # current configuration\r\n self._default_config = ConfigDict() # default configuration\r\n self._temp_config = OrderedDict() # temporary configuration\r\n self._path = Path() # current configuration path\r\n self._default_path = Path() # default configuration path\r\n self._conversion_dict = None\r\n self._auto_cast = None\r\n self._write_flags = None\r\n self._force_load = None\r\n self._load_empty = None\r\n self._ask_path = None\r\n self._search_in_default_config = None\r\n self._init_count = 0\r\n self._policies = defaultdict(bool) # by default every modification is forbidden # WIP\r\n if args or kwargs:\r\n self.init(*args, **kwargs)\r\n logger.debug(\"Config object created.\")", "def __init__(self):\n self.__parameters: ConfigParams = ConfigParams()", "def __init__(self, config: str) -> None:\n self.configuration = config", "def __init__(self, config: str) -> None:\n self.configuration = config", "def __init__(self, config):\n\n self.config = config", "def __init__(self, config=None):\n pass", "def __init__(self) -> None:\n self.config: dict[str, str | int] = {}", "def __init__(self, config_dict: dict):\n self._config = config_dict", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def __init__(self, config: dict) -> None:\n super().__init__(config)", "def __init__(self, config, cfg):\n self.config = config\n self.cfg = cfg", "def __init__(self, *args, **kwargs):\n wrap = lambda v: Config(v) if type(v) is dict else v\n kvdict = {k: wrap(v) for k, v in dict(*args, **kwargs).items()}\n super(Config, self).__init__(kvdict)\n self.__dict__ = self", "def __init__(self):\n self.config = {}", "def __init__(self, config: Optional[Config] = None):\n self.config = config or Config()", "def __init__(self, config):\n raise NotImplementedError (\"This is just an abstract interface\")", "def __init__(self, config: Tuple):", "def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Config, cls).__new__(cls)\n return cls._instance", "def __init__(self, configs):\n\n self.__configs = configs", "def initialize_from_config(self):", "def __init__(self):\n\n if Config._instance:\n raise Exception('Config singleton is already instantiated. User Config.get_instance() obtain it.')\n\n parser = configparser.ConfigParser()\n parser.read('C:\\\\Users\\\\Akatosh\\\\PythonProjects\\\\note-it\\\\config\\\\config.ini')\n\n self.sections = {}\n\n for section in parser:\n self.sections[section] = _Section(parser[section])\n\n Config._instance = self", "def __init__(self, configuration):\n self._config = configuration", "def __init__(self):\n ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG.read(os.path.join(os.path.dirname(__file__)))\n self.IPS = []", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def __init__(self, config_entry):\n self.config_entry = config_entry", "def __init__(self, config_entry):\n self.config_entry = config_entry", "def __init__(self, config):\n super().__init__(config)\n self.collector_host = config.get(\"collector_host\")\n self.schedds = config.get(\"schedds\", [None])\n self.condor_config = config.get(\"condor_config\")\n self.constraint = config.get(\"constraint\", True)\n self.classad_attrs = config.get(\"classad_attrs\")\n self.correction_map = config.get(\"correction_map\")", "def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)", "def __init__(self, config_file, verbose):\r\n self.loadConfig(config_file)\r\n self.verbose = verbose", "def init_config() -> Config:\n ...", "def __init__(self, config_path, normalize=False):\n self.config = {}\n _config_dict = {}\n self._config_path = Utils.expand_path(config_path)\n self.update = None\n self.normalize = normalize", "def __init__(self, config):\n\n self.root = config.root\n self.pidfile = config.pidfile\n self.log_conf = config.logging", "def _new():\n\treturn ConfigParser(\n\tdelimiters = ('=',),\n\tcomment_prefixes = ('#', ';'),\n\tdefault_section = 'default',\n\tallow_no_value = False,\n\tstrict = False,\n\tinterpolation = ExtendedInterpolation(),\n\tdefaults = {\n\t\t'debug': False,\n\t\t'datadir': path.join(path.expanduser('~'), '.local', 'rosshm'),\n\t\t'log.level': 'warn',\n\t\t'core.enable': True,\n\t\t'db.driver': 'sqlite',\n\t\t'db.name': 'rosshmdb',\n\t\t'db.config': '',\n\t\t'static.enable': True,\n\t\t'web.enable': True,\n\t},\n)", "def __init__(self, cfg=None):\n if cfg is None:\n cfg = {}\n\n self.cfg = cfg", "def __init__(self, config_file_name=\"config.json\"):\n with open(config_file_name, \"r\") as config:\n f = dict(json.load(config))\n for key, value in f.items():\n setattr(self, key, value)", "def __init__(self, config_file=None):\n\t\tself.options = {}\n\n\t\tif config_file:\n\t\t\tself.set_file(config_file)", "def __init__(self, path_to_config_file):\n self.file_path = path_to_config_file", "def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)", "def __init__(self, config, logger):\n self.config = config\n self.logger = logger", "def __init__(self, configs: Union[Config, Dict[str, Any]]):\n self.configs: Config = Config(configs, None, allow_new_hparam=True)", "def init_config(self):\n pass", "def config():\n return Config()", "def config():\n return Config()", "def __init__(self, config_file):\n Config = ConfigParser.ConfigParser()\n Config.read(config_file)\n \n self.port_id = Config.get(\"SerialPortSection\", \"ComPort\")\n self.baud_rate = Config.get(\"SerialPortSection\", \"BaudRate\")\n self. timeout = Config.get(\"SerialPortSection\", \"TimeOut\")\n self.config_file = config_file", "def __init__(self, filename=None):\n if filename:\n if not os.path.exists(filename):\n raise Exception(\"No configuration found at %s\" % filename)\n super(Configuration, self).__init__(filename)", "def __init__(self, conf=None):\n # set interpolation to None so you can supply filename template\n # that contain % to config.set\n conf = ConfigParser(strict=False,\n inline_comment_prefixes=(';',),\n interpolation=None) if (conf is None) else conf\n super().__init__(conf)\n self._cycle = None\n self._logger = logging.getLogger('metplus')\n # config.logger is called in wrappers, so set this name\n # so the code doesn't break\n self.logger = self._logger\n\n # get the OS environment and store it\n self.env = os.environ.copy()\n\n # add section to hold environment variables defined by the user\n self.add_section('user_env_vars')", "def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n self.config_entry = config_entry", "def __init__(self, name=None):\n self.name = name or \"default\"\n config_path = os.path.join(get_config_directory(), self.name + JSON)\n try:\n with open(config_path, mode='r') as config_file:\n self.config_dict = json.load(config_file)\n except Exception as ex:\n raise ColinConfigException(\"Config file '{}' cannot be loaded.\".format(config_path))", "def initialize(self, **kwargs):\n\n # Defining the configuration object\n self.config = kwargs.get('config')", "def __init__(self, config_name: str, is_top_level_config: bool = False):\n self.config_name = config_name\n self.config_variable_name = to_snake_case(config_name) + \"_config\"\n self.is_top_level_config = is_top_level_config\n self.parameters: List[CppParameter] = list()\n self.configs: List[CppConfig] = list()", "def __init__(self):\n parameters_list = []\n self.config_dict = self.open_config(parameters_list)\n\n # Define defaults\n self.disc_gt = 0.0\n self.disc_out = 0.0", "def __init__(self, config, town):\n\t\tself.config = config", "def __init__(self, name, config):\n self.name = name\n self.config = config\n self.values = []", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def __init__(self, **kwargs):\n cls = self.__class__\n\n # Initialize all configurables and input arguments\n for arg in cls.configurables():\n try: # Read from class constructor\n setattr(self, arg, kwargs[arg])\n except KeyError:\n try: # Set from default value defined in class\n default_value = getattr(self, arg).kwargs[\"default\"]\n setattr(self, arg, default_value)\n except KeyError: # if nothing is provided, fallbakcs to None\n setattr(self, arg, None)\n\n self.input_arguments = None\n if cls.input_configurables():\n self.input_arguments = [\n getattr(self, arg) for arg in cls.input_configurables()\n ]\n\n self.json_config = cfg.JsonConfig(self.config)\n self.output_objects = []\n self.file = None", "def __init__(self):\n self.collectorName = COLLECTOR_NAME\n self.configCycleInterval = 20 # minutes\n self.cycleInterval = 5 * 60 # seconds\n\n # The configurationService attribute is the fully qualified class-name\n # of our configuration service that runs within ZenHub\n self.configurationService = 'NullConfig'\n\n # Will be filled in based on buildOptions\n self.options = None\n\n self.configCycleInterval = 20*60", "def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n self.config_entry = config_entry\n self.options = dict(config_entry.options)", "def __init__(self, config, well, directory):\n self.config = config\n self.well = well\n self.directory = directory", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def __init__(self, userconfig=None):\n configspec = _get_configspec()\n self._configspec = ConfigObj(configspec, interpolation=False,\n list_values=False, _inspec=True)\n configs_default = ConfigObj(interpolation=False,\n configspec=self._configspec)\n self._config = self._validate(configs_default)\n if userconfig:\n self.read_userconfig(userconfig)", "def __init__(self, cfg=None, **kwargs):\n self.__dir = KITConfig.configDir\n self.__cfgFile = \"\"\n\n self.__cfg = {}\n self.__default = KITConfig.defaultConfig\n\n self.__setupLogger()\n\n if cfg is not None:\n self.__cfgFile = cfg\n self.load(cfg)", "def load(self):\n\n class Config(object):\n pass\n\n for key, val in self.kwargs.items():\n setattr(Config, key, val)\n return Config", "def __init__(self, config_path=None):\n self.config_path = None\n\n config_path = config_path or CONF.api_paste_config\n if not os.path.isabs(config_path):\n self.config_path = CONF.find_file(config_path)\n elif os.path.exists(config_path):\n self.config_path = config_path\n\n if not self.config_path:\n raise exception.ConfigNotFound(path=config_path)", "def __init__(self, config_file=None):\n self.file = config_file\n self.parser = SafeConfigParser()\n if isinstance(self.file, (str, list)):\n self.parser.read(self.file)\n else: # assume file object was given instead\n self.parser.read_file(self.file)\n self._flask_cache = None\n self._assets_cache = None\n self._gridrealm_cache = None", "def __init__(self, config):\n try:\n config['volume_id']\n config['access_key']\n config['secret_access_key']\n config['region']\n except KeyError, e:\n logging.error(repr(e))\n raise ImproperlyConfigured()\n\n if not config.has_key('keep'):\n config['keep'] = 5\n\n self.config = config", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)", "def __init__(self, config_path=None):\n config_path = config_path or CONF.api_paste_config\n if os.path.exists(config_path):\n self.config_path = config_path\n else:\n self.config_path = CONF.find_file(config_path)", "def __init__(self, config=None, broker=None):\n pass", "def __init__(self, config):\n\n self.locations_hltv_starting_ = config[sC.BUCKET_LOCATIONS][sC.HLTV_STARTING]\n self.score_starting_ = config[sC.BUCKET_LOCATIONS][sC.SCORE_STARTING]\n self.logs_starting_ = config[sC.BUCKET_LOCATIONS][sC.LOGS_STARTING]\n self.temp = config[sC.FOLDER_LOCATIONS][sC.TEMP_APP_ENGINE_FOLDER]\n self.results_ = config[sC.FOLDER_LOCATIONS][sC.CONFIGS_RESULTS]\n self.amxmodx_logs_ = config[sC.FOLDER_LOCATIONS][sC.ADDONS_AMXMODX_LOGS]\n self.cstrike_logs_ = config[sC.FOLDER_LOCATIONS][sC.CSTRIKE_LOGS]\n self.hltv_demos_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.HLTV_DEMOS_FUNC]\n self.ftp_logs_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.FTP_LOGS_FUNC]\n\n print('{} - Initialized'.format(__name__))", "def from_config(cls, config: Dict[str, Any]) -> \"ClassyLoss\":\n raise NotImplementedError()", "def __init__(self, **user_options):\n self.options = config.default_options.copy()\n self.configure(**user_options)", "def __init__(self, load_config):\n super().__init__()\n self._load_config = load_config", "def config(self):\n raise NotImplementedError", "def _from_config(cls, config, **kwargs):\n return cls(config, **kwargs)", "def __init__(self, config_file = 'config.yaml'):\n\n self.name = ''\n self.img_dir = ''\n self.out_dir = ''\n self.cam_file = ''\n self.options_file = ''\n self.output_xml_file = ''\n\n # If there is an options file, it will overwrite the defaults \n if config_file is not None:\n self.load(config_file)", "def __init__(self):\n self.filename = pathlib.Path(__file__).parent.absolute().__str__() + '/../../data/config.ini'\n self.data = ConfigParser()\n self.data.read(self.filename)", "def __init__(self):\n\n # open json config file that reads in information\n config_path = open(\"config.json\", \"r\")\n config_json = config_path.read()\n config_dict = json.loads(config_json)\n\n # assign object variables\n self.project_id = config_dict[\"project-id\"]\n self.bucket_name = config_dict[\"bucket-name\"]\n self.location_id = config_dict[\"key-location\"]\n self.key_ring_id = config_dict[\"key-ring-id\"]\n self.crypto_key_id = config_dict[\"crypto-key-id\"]\n self.service_account_email = config_dict[\"service-account-email\"]\n\n # close the file\n config_path.close()", "def from_configuration(cls, **kwargs):\n return cls(**kwargs)", "def Init(self, config):\r\n pass", "def init(self, *args, **kwargs):\n try:\n self._init(*args, **kwargs)\n except (ValueError, TypeError, UnicodeError, ConfigParser.Error), exc:\n raise ConfigInvalidError, str(exc), sys.exc_info()[2]", "def __init__(self, settings):\n self._read_config(settings)", "def __init__(self, config, loop):\n self.config = config\n self.loop = loop", "def __init__(self, config_file):\n \n self.log = logging.getLogger(__name__)\n\n self.parser = ConfigParser.ConfigParser()\n if os.path.exists(config_file) and os.path.isfile(config_file):\n self.parser.read(config_file)\n self.log.debug(\"opened configuration '%s'\" % config_file)\n else:\n raise ConfigError(\"Config file missing\", \"File '%s' doesn't exist.\" % (config_file))\n\n self.config_file = config_file\n self.check_config()", "def __init__(self, config=None):\n config_dict = {}\n if config:\n config_dict = json.load(config)\n\n self.android = config_dict.get(\"android\")\n self.linux = config_dict.get(\"linux\")\n self.atf = config_dict.get(\"atf\")\n self.qemu = config_dict.get(\"qemu\", \"qemu-system-aarch64\")", "def __init__(self, config_file: str = \"config.json\"):\n path_to_config = (Path(sys.modules[self.__module__].__file__).parent\n / config_file)\n with open(path_to_config, \"r\") as f:\n self.options = json.load(f)", "def init(cls, config, src):\n cls.config = config", "def __init__(self, config_path: str = \"config.json\"):\n # Change here if you want to relocate you config file\n self.config = {}\n self.load_configuration(config_path)\n self.app_name = self.config.get('app_name', self.APP_NAME)", "def __init__(self, config: Dict[str, Any], data: Data, verbose: bool) -> None:\n self.__verbose = verbose\n self.__data = data\n self.__config = config", "def __init__(self):\n\t\tConfigFile.__init__(self)\n\t\tself.created_by = None\n\t\tself.created_on = None\n\t\tself.definition = None\n\t\tself.note = None\n\t\tself.rejected_by = None\n\t\tself.rejected_on = None\n\t\tself.replaced_by = None\n\t\tself.status = 'elaboration'\n\t\tself.status_reason = None\n\t\tself.term = None\n\t\tself.todo = None\n\t\tself._valid_file_extension = 'def'", "def _init_config_(self):\n self._config= {}", "def __init__(self, config: str, wdir=os.getcwd()):\n\n self.config_path = config\n self.wdir = wdir\n\n with open(self.config_path) as fin:\n self.config_dic = yaml.load(fin, Loader=yaml.BaseLoader)\n\n # default is 00 for all interaction types\n self.config_parameters_dic = None\n\n # This parameter defines whether self_loops will be kept or not\n self.self_loops = True\n if 'self_loops' in self.config_dic:\n self.self_loops = bool(int(self.config_dic['self_loops']))", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)\n #print(self.config)", "def __init__(self, config_directory, scale_override=None):\n self._config_directory = config_directory\n\n self._config_detector = DetectorConfig(config_directory)\n self._config_align = AlignConfig(config_directory, scale_override=scale_override)\n self._config_crystal = CrystalMatchConfig(config_directory)", "def __init__(self):\n\t\t\n\t\tsettings = configparser.SafeConfigParser(allow_no_value=True)\n\t\tlist=settings.read('data/settings.cfg')\n\t\tif not 'data/settings.cfg' in list:\n\t\t\tprint('no configuration file present.. making one')\n\t\t\tself.makeConfigFile(settings)\n\t\t\tshare = ['']\n\t\t\tself.nodes = []\n\t\telse:\n\t\t\tshare, nodes = self.openConfig(settings)\n\t\t\tself.nodes = nodes\n\t\t\n\t\t\n\t\tself.files = self.loadFiles(share)\t\t\n\t\tself.share = share\n\t\tself.kill= False\n\t\tself.downloads = {}\n\t\tself.currentVersion = (0,2,1)\n\t\tself.totalDownloads = 0\n\t\tself.current = 0\n\t\tself.config = settings", "def __init__(self):\n self.__default_config = ConfigParams.from_tuples(\n 'options.max_pool_size', 2,\n 'options.connect_timeout', 5000,\n 'options.auto_reconnect', True,\n 'options.max_page_size', 100,\n 'options.debug', True\n )\n\n # The logger\n self._logger: CompositeLogger = CompositeLogger()\n # The connection resolver\n self._connection_resolver: MongoDbConnectionResolver = MongoDbConnectionResolver()\n # The configuration options.\n self._options: ConfigParams = ConfigParams()\n # The MongoDB connection object.\n self._connection: pymongo.MongoClient = None\n # The MongoDB database name.\n self._database_name: str = None\n # The MongoDb database object.\n self._db: database.Database = None", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def __init__(self) -> None:\n\n self.config_keys = ['APPS_HOST', 'APPS_PORT']\n super().__init__()\n\n self.APPS_HOST = str(self.APPS_HOST)\n \"\"\"Host where the server will be served\"\"\"\n\n self.APPS_PORT = int(self.APPS_PORT)\n \"\"\"Port where the server will be served\"\"\"" ]
[ "0.8146151", "0.81249666", "0.7914005", "0.7902959", "0.7902959", "0.78517556", "0.7830942", "0.78164166", "0.7816199", "0.78047544", "0.78047544", "0.78047544", "0.7794701", "0.7631528", "0.7613544", "0.76106983", "0.7570277", "0.7570083", "0.7505082", "0.7465383", "0.7454133", "0.7362795", "0.72564214", "0.7250131", "0.7242494", "0.7229574", "0.72265446", "0.72265446", "0.7194435", "0.7181348", "0.7175195", "0.71561235", "0.7149797", "0.7111156", "0.7110856", "0.71093625", "0.7102539", "0.71023625", "0.706302", "0.7062595", "0.7056412", "0.7047717", "0.70417416", "0.70386314", "0.70386314", "0.7030067", "0.70222646", "0.7019634", "0.70142233", "0.7012678", "0.7009522", "0.699475", "0.6974509", "0.69613636", "0.69570494", "0.694711", "0.6944664", "0.69441354", "0.6930602", "0.69067186", "0.6901887", "0.6901887", "0.68925226", "0.6889054", "0.6877439", "0.6876283", "0.68661237", "0.6857761", "0.6853694", "0.68463284", "0.6845723", "0.6843961", "0.68405473", "0.6840285", "0.6833352", "0.6829174", "0.6823486", "0.6802595", "0.67933804", "0.67912775", "0.679085", "0.6789616", "0.6786914", "0.67869127", "0.6779256", "0.6778579", "0.67699254", "0.6763202", "0.67585015", "0.6754568", "0.67489636", "0.6737121", "0.67358637", "0.6732636", "0.6711185", "0.67053986", "0.6693244", "0.6691935", "0.66906476", "0.6690254" ]
0.7287139
22
Load the config file
def _open_config_file(self): try: with open(self.config_file_name,encoding='utf-8') as json_data_file: conf = json.load(json_data_file) return conf except FileNotFoundError: with open(self.config_file_name, 'w',encoding='utf-8') as json_data_file: json.dump({},json_data_file,indent=2) return {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_config(self):\n pass", "def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)", "def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n self.update_attributes_from_config(conf)", "def load(file):\n _config.load(file)", "def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)", "def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))", "def read_config(self, config_filename):", "def load_config(self, config_file):\n self.config = ConfigParser.ConfigParser()\n self.config.read(config_file)", "def load_config(self):\n with open(self.TEMPERATURE_CONFIG_FILE_PATH, 'r') as file:\n self.config = json.load(file)", "def load_config_file(self):\n\n conf_file = config.DEFAULT_CONFIGURATION_FILE\n\n if self.options and getattr(self.options, \"conf_file\"):\n conf_file = self.options.conf_file\n if (\n not os.path.exists(conf_file) and\n not os.path.exists(\"%s.d\" % conf_file)\n ):\n raise Exception(\n (\n \"The specified configuration file \"\n \"does not exist. File=(%s)\"\n ) % self.options.conf_file\n )\n\n self.from_file(conf_file)", "def load_conf(self):\n self._read_uconf()", "def load_config():\n\t\ttry:\n\t\t\tconf = ConfigParser()\n\n\t\t\tconfig_path = get_config_path()\n\t\t\tconf.read(config_path)\n\n\t\t\t# save references to conf, and config_path in class variables\n\t\t\tConfig.config_path = config_path\n\t\t\tConfig.conf = conf\n\n\t\t\tConfig.source_dir = conf.get('paths', 'source_dir')\n\t\t\tConfig.lyrics_dir = conf.get('paths', 'lyrics_dir')\n\n\t\t\tConfig.save_to_file = conf.getboolean('actions', 'save_to_file')\n\t\t\tConfig.save_to_tag = conf.getboolean('actions', 'save_to_tag')\n\n\t\t\tConfig.overwrite = conf.getboolean('actions', 'overwrite')\n\n\t\t\t# Load all the sources\n\t\t\tConfig.lyric_wikia = conf.getboolean('sources', 'lyric_wikia')\n\t\t\tConfig.musix_match = conf.getboolean('sources', 'musix_match')\n\t\t\tConfig.lyricsmode = conf.getboolean('sources', 'lyricsmode')\n\t\t\tConfig.az_lyrics = conf.getboolean('sources', 'az_lyrics')\n\n\t\t\t# Loading this with user config, we need to call the load_config only once at start.\n\t\t\tConfig.lyric_files_in_dir = glob2.glob(os.path.join(Config.lyrics_dir, '**/*.txt'))\n\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to load config.')\n\t\t\tprint(e)", "def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))", "def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))", "def load_config():\n global config\n\n with open(\"config.json\") as f:\n json_config = f.read()\n f.close()\n config = json.loads(json_config)", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(join(get_current_path(), 'ib.config'))\n\treturn cfg", "def read_configuration (self):\n\t\tself.config.read(self._configfile)", "def load_config():\n config = ConfigParser()\n config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))\n return config", "def load_config():\n global config\n with open('config.yml', 'r') as file:\n config = yaml.load(file)", "def load_config():\n here = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(here, 'config.json')\n with open(config_path, encoding='utf-8') as f:\n return json.load(f)", "def load_config():\n config = configparser.ConfigParser()\n config.read('config.ini')\n return config", "def load(self):\n with open(self.conf_fname, \"r\") as fd:\n config = json.load(fd)\n \n return config", "def load(self):\n try:\n _config_file = open(self.config, 'r+')\n data = json.loads(_config_file.read())\n except (ValueError, IOError):\n data = {}\n\n self.update(data)", "def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)", "def loadConfig(self):\r\n self.config.read(self.CONFIG_FILE)\r\n try:\r\n assert \"Settings\" in self.config\r\n except AssertionError:\r\n print(\"Settings do not exist, creating new config file...\")\r\n self.saveConfig()\r\n settings = self.config[\"Settings\"]\r\n self.dataPath = settings.get(\"datapath\",fallback=\"\")\r\n self.videoPath = settings.get(\"videopath\",fallback=\"\")\r\n self.dataOffset = settings.getfloat(\"dataoffset\",fallback=0)\r\n self.colBlindMode = settings.getboolean(\"colblindmode\",False)\r\n if self.videoPath != \"\":\r\n self.loadVideo(self.videoPath,loadAudio=False)\r\n if self.dataPath != \"\":\r\n self.loadData(self.dataPath)", "def _load_config():\n fname = _get_config_fname()\n if fname is None or not op.isfile(fname):\n return dict()\n with open(fname, 'r') as fid:\n config = json.load(fid)\n return config", "def load_config(self):\n if not self.config_file_path:\n return False\n with open(self.config_file_path) as f:\n self.config = yaml.load(f)\n return True", "def loadConfig(self, config_file):\r\n\r\n import json\r\n\r\n self.config = None\r\n\r\n try:\r\n with open(config_file) as f:\r\n self.config = json.load(f)\r\n except OSError as err:\r\n print(\"Unable to process {}, {}\".format(config_file, err))\r\n sys.exit(1)", "def loadConfig():\n lines = []\n config = {}\n here = path.dirname(__file__)\n fn = path.join(here,'manatee.conf')\n try:\n with codecs.open(fn,'rU','utf-8') as conf:\n lines = conf.readlines()\n conf.close()\n except IOError as e:\n print \" Could not open configuration file: %s\" % e\n\n for line in lines:\n try:\n line = line.strip()\n if line:\n values = [x.strip() for x in line.split('=')]\n config[values[0]] = values[1]\n except Exception as e:\n print \"There was an error in the configuration file: %s\" % e\n # TODO: Any strings from the config file that might be displayed or passed into the SQL server need to be validated here.\n# config = validateConfig(config)\n return config", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(os.path.join(get_current_directory(), 'citi.config'))\n\treturn cfg", "def load ( self ):\n files = config.get_or_fail ( 'REPO.config_files' )\n for f in files:\n self.load_file ( f )", "def load_conf(self, filename):\n\n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename) as file:\n self.conf = json.loads(file.read())", "def cli_load_config(self, args) -> str:\n path = args.config_path\n if not os.path.isfile(path):\n return error(\"Path {} DNE\".format(path))\n\n try:\n self.config = config.from_file(path)\n return ok(\"Configuration loaded from {}\".format(path))\n except FileNotFoundError as err:\n return error(\"Could not load file: {}\".format(err))\n except json.JSONDecodeError as json_err:\n return error(\"Could not parse json file {}\".format(json_err))", "def load_config( self, config_file=None ):\n if config_file is None:\n config_file = os.path.dirname(self.dbfile) + '/final.ini'\n config_parser = ConfigParser.ConfigParser()\n config_parser.read( config_file )\n self.config = config_parser\n return self.config", "def load(self):\n config_dict = {}\n with open(\n os.path.join(\n os.path.dirname(\n os.path.abspath(\n inspect.stack()[0][1]\n )\n ),\n \"config.txt\"), 'r') as config_file:\n for line in config_file:\n if not line.startswith('#'):\n line = line.strip().split('=', 1)\n if len(line) == 2:\n config_dict[line[0]] = line[1]\n return config_dict", "def load_config(path):\n # opens config file\n try:\n config = configparser.ConfigParser()\n config.read(path)\n return config\n except Exception as e:\n print(\"Error loading config file: \", e)\n sys.exit(1)", "def load_from_conf(self):\r\n raise NotImplementedError", "def __read_config(self):\n with open(self.config_file, 'r') as data_file:\n dict = json.load(data_file)\n self.ibooks_doc_root = dict[\"ibooks_doc_root\"]\n self.library_folder = dict[\"library_folder\"]\n self.annotation_folder = dict[\"annotation_folder\"]\n self.tmp_dir = dict[\"tmp_dir\"]", "def load_config():\n proj_dir = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(proj_dir, \"config.yml\")\n conf = yaml.safe_load(open(config_path))\n return conf", "def load_config():\n config_file = os.path.dirname(os.path.abspath(__file__)) + '/../config.json'\n with open(config_file, 'r') as f:\n config = json.load(f)\n\n return config", "def load_from_conf(self):\n raise NotImplementedError", "def load_config(self, filename: str=None):\n if not filename:\n filename = self.config_file\n with open(filename) as file_object:\n config = json.load(file_object)\n if isinstance(config, dict):\n for key, value in config.items():\n self.config[key] = value", "def load_config():\n config_file = os.path.join(\n Path(os.path.dirname(os.path.realpath(__file__))).parent,\n \"config.ini\"\n )\n if not os.path.exists(config_file):\n raise FileNotFoundError(config_file)\n app_config = configparser.ConfigParser()\n app_config.read(config_file)\n return app_config['uberoo']", "def load_config(self):\n h_config = configparser.ConfigParser()\n with self.config_file.open() as configfile:\n h_config.read_file(configfile)\n if not (\"general\" in h_config.keys() and \"unifi\" in h_config.keys() and \"hue\" in h_config.keys()):\n logging.warning(\"Configuration file {} is invalid.\".format(self.config_file))\n return\n if not self.configuration.interval:\n self.configuration.interval = int(h_config[\"general\"][\"interval\"])\n if not self.configuration.wifi_clients:\n self.configuration.wifi_clients = h_config[\"general\"][\"wifi_clients\"].split(\",\")\n if not self.configuration.schedules_names:\n self.configuration.schedules_names = h_config[\"general\"][\"schedules_name\"].split(\",\")\n if not self.configuration.unifi_host:\n self.configuration.unifi_host = h_config[\"unifi\"][\"host\"]\n if not self.configuration.unifi_port:\n self.configuration.unifi_port = int(h_config[\"unifi\"][\"port\"])\n if not self.configuration.unifi_username:\n self.configuration.unifi_username = h_config[\"unifi\"][\"username\"]\n if not self.configuration.unifi_password:\n self.configuration.unifi_password = h_config[\"unifi\"][\"password\"]\n if not self.configuration.hue_host:\n self.configuration.hue_host = h_config[\"hue\"][\"host\"]\n if not self.configuration.hue_port:\n self.configuration.hue_port = int(h_config[\"hue\"][\"port\"])\n if not self.configuration.hue_key:\n self.configuration.hue_key = h_config[\"hue\"][\"key\"]\n\n if \"general\" in h_config.keys():\n if not self.configuration.pub_host:\n self.configuration.pub_host = h_config[\"zmq\"][\"host\"]\n if not self.configuration.pub_port:\n self.configuration.pub_port = int(h_config[\"zmq\"][\"port\"])\n if \"no_pub\" not in self.configuration:\n self.configuration.no_pub = bool(int(h_config[\"zmq\"][\"disabled\"]))\n\n if \"logging\" in h_config.keys():\n if \"syslog_host\" in h_config[\"logging\"].keys() and not self.configuration.syslog_host:\n self.configuration.syslog_host = h_config[\"logging\"][\"syslog_host\"]\n if \"syslog_port\" in h_config[\"logging\"].keys():\n self.configuration.syslog_port = int(h_config[\"logging\"][\"syslog_port\"])\n if \"log_file\" in h_config[\"logging\"].keys() and not self.configuration.log_file:\n self.configuration.log_file = Path(h_config[\"logging\"][\"log_file\"])\n\n logging.info(\"Configuration loaded from {}\".format(str(self.config_file)))\n logging.debug(self.configuration)", "def load_config(self, filename):\n # check if config file exists\n if not os.path.exists(filename):\n raise Exception(\"Can't find configuration {}.\".format(filename))\n # load config file\n config = configparser.ConfigParser()\n with open(filename, 'r') as config_file:\n config.read_file(config_file)\n return config", "def load_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getOpenFileName(self,\n \"Open Config\",\n CONFIG_DIR,\n \"Config Files (*.cfg)\")\n else:\n file_path = path\n self._load_state(file_path)\n #self.write_text(\"Loaded config @ {}\".format(file_path))", "def _load_config_file(self, path: str) -> Dict[str, Any]:\n try:\n with open(path) as file:\n conf = json.load(file)\n except FileNotFoundError:\n raise OperationalException(\n f'Config file \"{path}\" not found!'\n ' Please create a config file or check whether it exists.')\n\n return conf", "def __load_config(self) -> dict:\n file = open(\"config.json\")\n config_file = json.load(file)\n file.close()\n return config_file", "def load(filepath):\n with open(filepath) as f:\n return Config(json.load(f))", "def _load_config(file):\n try:\n return bb.parse.handle(os.path.join('conf', file), bb.data.init() )\n except IOError, e:\n return None", "def load_config(self, filename):\n # read entire file for metadata\n fh = open(filename, 'r')\n self.file_contents = fh.read()\n\n # replace !include directives with content\n config_dir = os.path.split(filename)[0]\n include_re = re.compile('^!include\\s+(.*)$', re.MULTILINE)\n def include_repl(matchobj):\n fname = os.path.join(config_dir, matchobj.group(1))\n with open(fname) as f:\n return f.read()\n while re.search(include_re, self.file_contents): # for recursive !include\n self.file_contents = re.sub(include_re, include_repl, self.file_contents)\n\n # read in dictionary\n self.config = self.__ordered_load(self.file_contents)\n\n # convert functions of other params to true expressions\n for k in self.config.keys():\n self.config[k] = ExperimentConfig.__convert_key(self.config[k])\n\n # load core configuration\n return self.config", "def load(filename):\n conf = CommonConfig.get()\n conf.update(toml.load(filename))\n return conf", "def load( self ):\n ini = codecs.open(self.filename,\"r\",\"utf-8\",errors=\"replace\",buffering=0)\n for l in ini:\n l = l.strip()\n if l:\n (name,value) = l.split(\"=\",1)\n self.conf[name.strip()] = value.strip()\n ini.close()", "def loadconfig(self, configfile=None):\n realconfig = configfile\n if configfile is None:\n realconfig = Infopage.DEFAULT_CFGFILE\n try:\n fh = io.open(realconfig, 'r')\n config = json.load(fh)\n self.config.update(config)\n except IOError as e:\n if configfile is None:\n # ignore if the default config does not exist\n pass\n else:\n raise e", "def load_config(self) -> Dict[str, Any]:\n # Load all configs\n config: Dict[str, Any] = self.load_from_files(self.args.get(\"config\", []))\n\n return config", "def load_config(cls, path: str) -> GlobalConfig:\n with open(path, \"r\") as config_file:\n config_dict = yaml.load(config_file)\n cls.config = GlobalConfig(config_dict)\n return cls.config", "def loadConfig(fileName=None):\n if not fileName:\n fileName = Config.userDir + \"config.py\"\n try:\n config = literal_eval( (open(fileName).read()) )\n except Exception,e:\n print(e)\n return\n for c in Config.userConfig:\n if c in config:\n setattr(Config, c, config[c])\n Config.update()", "def parse(self):\n try:\n with open(self.path, 'r') as ymlfile:\n self.__cfg = yaml.load(ymlfile)\n except IOError:\n self.log(\"File {0} not found -- aborting\".format(self.path))\n raise ConfigFileException", "def load_config(config_file=None):\n if Config.CONFIG:\n return Config.CONFIG\n else:\n try:\n if not config_file:\n config_file = os.path.join('./config/', 'bcl2fastq.config.yaml')\n Config.CONFIG = Config._load_yaml_config(config_file)\n return Config.CONFIG\n except IOError:\n raise IOError((\"There was a problem loading the configuration file. \"\n \"Please make sure that {0} exists and that you have \"\n \"read permissions\".format(config_file)))", "def load_conf():\n if os.path.exists(CONF_FILE):\n with open(CONF_FILE, 'r') as infile:\n return json.load(infile)\n else:\n return {}", "def _get_config_from_file(self, filename):\n\n with open(filename, 'r') as f:\n config = load(f)\n return config", "def _read_config_file(self):\r\n\r\n try:\r\n with open(self.config, 'r') as f:\r\n config_data = json.load(f)\r\n except FileNotFoundError:\r\n config_data = {}\r\n\r\n return config_data", "def load_config(config_file_path):\n global config\n try:\n config_file_path = os.path.abspath(config_file_path)\n assert config_file_path\n with open(file=config_file_path) as yaml_data:\n loaded_config = yaml.safe_load(yaml_data)\n for k in config:\n if k in loaded_config:\n config[k] = loaded_config[k]\n except AssertionError:\n print(f\"Config file {config_file_path} not found or unreadable ! Exiting..\")\n quit(1)", "def load_from_file(self, file_path):\n\n with open(file_path) as f:\n config_text = f.read()\n self.load_from_string(config_text)", "def _load_config_log(self):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n if not os.path.isfile(config_path):\n return {}\n with open(config_path, 'r') as f:\n data = yaml.load(f)\n return data", "def load(self, file):\n self.namespace['workflow'].configfile(file)\n self.updateNamespace()", "def load_configuration(self) -> None:\n config_file = self.default_config_file\n if self.config_file:\n config_file = self.config_file\n self.config = configparser.ConfigParser(delimiters=\"=\")\n # mypy is unhappy with us assigning to a method - (monkeypatching?)\n self.config.optionxform = lambda option: option # type: ignore\n self.config.read(config_file)", "def load_config(self):\n DEFAULT_CONFIG = {\n \"test_times\": 30,\n \"perfherder_protocol\": \"http\",\n \"perfherder_host\": \"local.treeherder.mozilla.org\",\n \"perfherder_client_id\": \"\",\n \"perfherder_secret\": \"\",\n \"perfherder_repo\": \"mozilla-central\",\n \"dashboard_host\": \"\",\n \"dashboard_ssh\": \"\"\n }\n logger_hasal.info('Loading config file from {} ...'.format(self._config_path))\n if os.path.isfile(self._config_path):\n with open(self._config_path, 'r') as f:\n ret_obj = json.load(f)\n test_times = ret_obj.get('test_times', '')\n perf_protocol = ret_obj.get('perfherder_protocol', '')\n perf_host = ret_obj.get('perfherder_host', '')\n perf_repo = ret_obj.get('perfherder_repo', '')\n if test_times:\n logger_hasal.info('Test Times: {}'.format(test_times))\n if perf_protocol:\n logger_hasal.info('Perfherder Protocol: {}'.format(perf_protocol))\n if perf_host:\n logger_hasal.info('Perfherder Host: {}'.format(perf_host))\n if perf_repo:\n logger_hasal.info('Perfherder Repo: {}'.format(perf_repo))\n return ret_obj\n else:\n with open(self._config_path, 'w') as f:\n f.write(json.dumps(DEFAULT_CONFIG))\n logger_hasal.info('No config.json file {}. Generate default config.'.format(self._config_path))\n return DEFAULT_CONFIG", "def load_config(config_path):\n global config\n with open(config_path) as config_file:\n config = munchify(yaml.safe_load(config_file))", "def load_config(self):\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n if not os.path.exists(conf_file):\n return {}\n with open(conf_file, \"r\") as ifile:\n return json.load(ifile)", "def load_config(self):\n logger.debug('loading config file: %s', self.config_file)\n if os.path.exists(self.config_file):\n with open(self.config_file) as file_handle:\n return json.load(file_handle)\n else:\n logger.error('configuration file is required for eventify')\n logger.error('unable to load configuration for service')\n raise EventifyConfigError(\n 'Configuration is required! Missing: %s' % self.config_file\n )", "def load_config(self):\n try:\n self.config = yaml.load(open(roslib.packages.get_pkg_dir('human_activities') + '/config/config.ini', 'r'))\n print \"config loaded:\", self.config.keys()\n\n return True\n except:\n print \"no config file found in /human_activities/config/config.ini\"\n return False", "def load_config(cls, config_file = None):\n config = ConfigParser()\n \n files = [\"/etc/imp.cfg\", os.path.expanduser(\"~/.imp.cfg\"), \".wm\", \".imp\"]\n if config_file is not None:\n files.append(config_file)\n \n config.read(files)\n cls.__instance = config", "def load_config(self, config_file = None):\n if config_file:\n return ET.parse(config_file)\n else:\n return ET.parse(self.config_file)", "def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))", "def _load_config(path) -> dict:\n with open(path, \"r\") as F:\n return json.load(F)", "def load_configuration(self, path):\n with open(path) as conf_file:\n if path.name not in self.configuration:\n self.configuration[path.name] = {}\n self.configuration[path.name] = json.load(conf_file)", "def get_config(self):\n if self.config is None:\n self.config = Configuration()\n\n #Hard coded the file for now, will change with Django interface\n self.config.parse_file('config')", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def load(self, file, config={}):\n if not os.path.exists(file):\n raise SystemExit('ERROR: config file at \"{f}\" does not exist'.format(f=file))\n config = config.copy()\n cp = ConfigParser.ConfigParser()\n cp.read(file)\n for sec in cp.sections():\n name = sec.lower()\n for opt in cp.options(sec):\n config[name + \".\" + opt.lower()] = cp.get(sec, opt).strip()\n return config", "def load_configuration(self):\n config = ConfigParser.ConfigParser()\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.properties')\n config.read(config_file)\n return dict(config.defaults())", "def init_config(cls, path):\n try:\n config_string = open(path).read()\n except EnvironmentError as ex:\n LOGGER.error('Could not load %s file, error: %s', path, ex)\n sys.exit()\n\n try:\n cls.config = json.loads(config_string)\n except ValueError as ex:\n LOGGER.error(' %s file is not valid json, error: %s', path, ex)\n sys.exit()", "def load_config(self, cfg: ConfigParser):\n log.debug('loading configuration file')\n section = cfg['DEFAULT']\n # use host-specific configuration, if any\n if self.hostname in cfg:\n section = cfg[self.hostname]\n self.path = os.path.abspath(os.path.expanduser(section['repo_dir']))\n self.gpg_key_id = section['gpg_key_id']\n self.ignored_files = section['ignored_files'].split(',')\n self.ignored_files.append('.gitkeep')", "def load(self, filename):\n config = AbstractConfig(config_file=filename)\n\n self.__idx(config)", "def load_config(filename):\n with open(filename, \"r\") as stream:\n try:\n global CONFIG\n CONFIG = yaml.load(stream)\n except yaml.YAMLError as ex:\n print(ex)", "def load_from_file(self, config_file):\n try:\n \n self.configParser = configparser.ConfigParser(allow_no_value=True)\n self.configParser.read(config_file)\n \n\n self.uri = self._readConfigOption(\"RepositoryConfiguration\", \"uri\").rstrip('/')\n self.db_driver = self._readConfigOption(\"Database\", \"db_driver\")\n self.db_user = self._readConfigOption(\"Database\", \"db_user\")\n self.db_password = self._readConfigOption(\"Database\", \"db_password\")\n self.db_database = self._readConfigOption(\"Database\", \"db_database\")\n self.db_hostname = self._readConfigOption(\"Database\", \"db_hostname\")\n self.db_port = int(self._readConfigOption(\"Database\", \"db_port\"))\n self.db_authentiacation = self._readConfigOption(\"Database\", \"db_authentication\")\n\n # Check if dirs are readable\n readable_dir(self.uri)\n\n \n except Exception as e:\n raise Exception('Failed in reading config file %s. Original message: %s' % (config_file, e))", "def load_config(self, context: ResourceCommandContext, config_file_location: str) -> None:\n enqueue_keep_alive(context)\n self.handler.load_config(context, config_file_location)", "def load_config():\n config_check()\n try:\n with open(os.path.abspath(CONFIG_PATH), 'rb') as config_file:\n config = pickle.load(config_file)\n except IOError:\n # Create new empty config file\n with open(os.path.abspath(CONFIG_PATH), 'wb') as config_file:\n config = {\n \"blogs\": [],\n \"active_blog\": None\n }\n pickle.dump(config, config_file)\n return config", "def load():\n print(\"Loading Configuration file..\")\n\n def load_defaults():\n global _conf\n _conf = get_defaults()\n save()\n\n if not os.path.exists(__config_file):\n load_defaults()\n return\n\n global _conf\n with open(__config_file, 'r', encoding='utf-8') as stream:\n _conf = yaml.round_trip_load(stream)\n \n if _conf is None:\n load_defaults()\n return\n \n version = _conf.get('_conf', -1)\n if version != VERSION:\n migrate(version)\n _conf['_conf'] = VERSION\n save()\n\n def mergeDict(old: dict, new: dict, layer=1) -> dict:\n \"\"\"\n Merge a dictionary into another while prefering the old values over the new\n\n :param old: original dictionary\n :param new: new dictionary to merge\n \"\"\"\n \n from collections import Mapping\n changed = False\n for key, val in new.items():\n # print(\"{} ({})\".format(key, type(old.get(key))))\n if not key in old:\n print(\"{}Adding new value {}\".format(' ' * layer, key))\n changed = True\n old[key] = val\n elif issubclass(type(old[key]), Mapping) and issubclass(type(val), Mapping):\n print(\"{}Merging dict {}\".format(' ' * layer, key))\n changed = changed or mergeDict(old[key], val, layer + 1)\n\n return changed\n \n defaults = get_defaults()\n if mergeDict(_conf, defaults):\n save()", "def load_config(self, filename, basedir = '.'):\n #find absolute path for config file\n (f, filepath) = self.find_file(filename, [basedir, __VALIDATA_ETC__])\n if filepath in self.include:\n raise ConfigError('Recursively include config file \"%s\"!' % filepath)\n self.include.add(filepath)\n cfg = yaml.load(f)\n f.close()\n\n #decide base directory for current config file\n basedir = dirname(filepath)\n\n #get log file path\n if '__logfile' in cfg:\n logfile = cfg['__logfile']\n if logfile[0] != '/':\n logfile = basedir + '/' + logfile\n self.logfile = logfile\n\n #check if there's any external reference\n for key in cfg:\n #ignore keywords\n if key[:2] == '__':\n continue\n elif key[:1] == '_':\n if isinstance(cfg[key], list):\n continue\n #load external reference\n refname = cfg[key]\n (f, filepath) = self.find_file(refname, [basedir, __VALIDATA_ETC__])\n cfg[key] = [x.rstrip('\\r\\n').decode('utf8') for x in f if x.rstrip('\\r\\n') != '']\n f.close()\n print 'Reference file \"%s\" loaded.' % refname\n\n #load include file(s)\n if '__include' in cfg:\n include = cfg['__include']\n del cfg['__include']\n if not isinstance(include, list):\n include = [include]\n tmp = {}\n for i in include:\n tmp.update(self.load_config(i, basedir))\n tmp.update(cfg)\n cfg = tmp\n\n print 'Config file \"%s\" loaded.' % filename\n return cfg", "def load_config(config_file):\n try:\n with open('settings.json', 'r') as f:\n return json.loads(f.read())\n except (IOError, Exception) as e:\n print '%s' % e\n exit()", "def read_config():\n with open(CONFIG_PATH) as config_file:\n return json.load(config_file)", "def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)", "def load_cfg(filepath=\"./config.yaml\"):\n with open(filepath, \"r\") as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def load_conf(self):\n\n self.load_file(self.ini_file)\n self.files = []\n conf_file = open(self.ini_file, \"r\")\n for l in conf_file:\n self.files.append(l.strip())\n conf_file.close()", "def _load_config(self):\n\n for p in self._paths:\n if p.exists():\n with p.open() as f:\n c = yaml.safe_load(f)\n if c:\n c['_config_file'] = str(p)\n return c\n else:\n raise ConfigurationError(f\"Didn't find a config file in paths: {self._paths}\")\n\n return {}", "def parse_config(self):\n # TODO: parse config file\n pass", "def readConfig(self):\n filename = self.cfg.get(\"CFG_FILE\")\n\n logging.info(\"Reading configuration file **{}**\".format(filename))\n try:\n # track previous MAP_FILE setting - map only needs to be redrawn if it's changed\n oldmap = self.cfg.get(\"MAP_FILE\") # may be None\n execfile(filename, self.cfg)\n\n # display setings\n out = '\\n'\n for a, b in self.cfg.iteritems():\n # exclude unprintables\n if a is not \"__builtins__\" and a is not \"MAPREF\":\n out = \"{} \\t {}: {}\\n\".format(out, a, b)\n # logging.info\n logging.info(\"Options read from configuration file: {}\".format(out))\n\n self.processMap()\n self.initAgent()\n logging.info(\"Starting agent pre-processing...\")\n self.processPrefs()\n self.setStart(self.cfg.get(\"START\"))\n self.setGoal(self.cfg.get(\"GOAL\"))\n\n if self.gui is not None: # reset has been called from the gui\n self.gui.setLmap(self.lmap)\n if not oldmap == self.cfg.get(\"MAP_FILE\"):\n self.gui.vmap.drawMap(self.lmap)\n self.hdlReset() # includes resetVars\n else:\n self.resetVars() # no attempt to update GUI\n\n\n except p4.BadMapException:\n self.updateStatus(\"Unable to load map: \" + self.cfg.get(\"MAP_FILE\"))\n except p4.BadAgentException:\n self.updateStatus(\"Unable to load agent: \" + self.cfg.get(\"AGENT_FILE\"))\n except:\n # unexpected error\n logging.error(\"Trace-back: \\n {}\".format(traceback.format_exc()))\n self.updateStatus(\"Problem reading config file!\")", "def load(self, file, config={}):\n if not os.path.exists(file):\n err = 'ERROR: config file at \"{f}\" does not exist'\n err = err.format(f=file)\n raise SettingsError(err)\n config = config.copy()\n cp = GoulashConfigParser()\n cp.read(file)\n return cp._sections", "def config():\n with open(config_path) as config_file:\n data = json.load(config_file)\n return data", "def load_config(self):\n\n with open(os.path.expanduser(self.config_filename), 'r') as f:\n lines = f.readlines()\n\n _usable = lambda l: not(l.startswith('#') or l.strip() == '')\n lines = filter(_usable, lines)\n\n def _build_config(key, value, d):\n \"\"\" Called recursively to split up keys \"\"\"\n pieces = key.split('.', 1)\n if len(pieces) == 1:\n d[pieces[0]] = value.strip()\n else:\n d[pieces[0]] = _build_config(pieces[1], value, {})\n\n return d\n\n d = {}\n for line in lines:\n if '=' not in line:\n continue\n\n key, value = line.split('=')\n d = _build_config(key, value, d)\n\n return d" ]
[ "0.8379018", "0.8353927", "0.8132022", "0.8111965", "0.8104528", "0.79837865", "0.7953077", "0.78238404", "0.7770456", "0.77264285", "0.77023524", "0.7679735", "0.7665702", "0.7665702", "0.76194495", "0.76163685", "0.7611368", "0.75783414", "0.7574488", "0.7554147", "0.752987", "0.75162137", "0.7503696", "0.74936754", "0.7470804", "0.7467447", "0.7466623", "0.74653757", "0.74632823", "0.74539894", "0.7429376", "0.7421454", "0.7407486", "0.74065197", "0.7400068", "0.7380263", "0.73612833", "0.73517627", "0.73487437", "0.7342034", "0.73260874", "0.7297708", "0.72916794", "0.72827816", "0.7282646", "0.72805446", "0.72768736", "0.726665", "0.7260592", "0.72448766", "0.72423935", "0.7233299", "0.7190436", "0.71866673", "0.71820235", "0.7162534", "0.71461856", "0.7135165", "0.71240366", "0.7113913", "0.7107502", "0.7100857", "0.7100783", "0.7090044", "0.7071489", "0.70660836", "0.70641524", "0.70618796", "0.7053403", "0.7053185", "0.7045926", "0.7045669", "0.7034596", "0.7034403", "0.703184", "0.70280665", "0.7027885", "0.70165664", "0.701145", "0.70002097", "0.69975114", "0.6994143", "0.6986454", "0.698299", "0.69791204", "0.69726515", "0.6972059", "0.69709504", "0.6967259", "0.6957323", "0.6957137", "0.6950377", "0.6935683", "0.69317657", "0.6926174", "0.6922323", "0.692064", "0.69203484", "0.6914944", "0.69078416", "0.69035274" ]
0.0
-1
Saves the config file
def save_config_file(self): with open(self.config_file_name, 'w',encoding='utf-8') as outfile: json.dump(self._config, outfile,indent=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def save():\n\n env.config.save(env.config_file)", "def save():\n\t\ttry:\n\t\t\t#paths\n\t\t\tConfig.conf.set('paths', 'source_dir', Config.source_dir)\n\t\t\tConfig.conf.set('paths', 'lyrics_dir', Config.lyrics_dir)\n\n\t\t\t#actions\n\t\t\tConfig.setBool('actions', 'save_to_file', Config.save_to_file)\n\t\t\tConfig.setBool('actions', 'save_to_tag', Config.save_to_tag)\n\n\t\t\t#sources\n\t\t\tConfig.setBool('sources', 'lyric_wikia', Config.lyric_wikia)\n\t\t\tConfig.setBool('sources', 'musix_match', Config.musix_match)\n\t\t\tConfig.setBool('sources', 'lyricsmode', Config.lyricsmode)\n\t\t\tConfig.setBool('sources', 'az_lyrics', Config.az_lyrics)\n\n\t\t\twith open(Config.config_path, 'w') as configfile:\n\t\t\t\tConfig.conf.write(configfile)\n\t\t\treturn True\n\n\t\t# Catch all config parser errors\n\t\texcept BaseConfigParserError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False", "def saveConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()", "def save(self):\n self.__config.sync()\n self.__saved = True\n Logger().debug(\"Configuration saved\")", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)", "def save(self):\n try:\n with open(self._filename, 'w') as conf_file:\n conf_file.write(json.dumps(self._data))\n except OSError:\n _LOGGER.exception(\"Can't store config in %s\", self._filename)", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def saveConfig():\n with open(_CONFIG_FNM, 'w') as configfile:\n CONFIG_DICT.write(configfile,\n space_around_delimiters=True)", "def save(self, config_path):\n raise NotImplementedError()", "def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)", "def save_config(self):\n data = json.dumps(self.cfg)\n\n try:\n file = open(self.cfg_file_name, 'w')\n file.write(data)\n except OSError as err:\n print(\"can't save property: {0}\".format(err))\n else:\n file.close()", "def save(self) -> None:\n logger.info(\"Saving to config...\")\n yml.save(self._config, self.configpath)", "def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)", "def saveExitConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()\n self.close()", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def save_config() -> None:\n with open(_config_file, \"w\", newline=\"\") as config_file:\n json.dump(_config, config_file, indent=4)\n config_file.truncate()", "def save(self) -> None:\n self._client.save_config()", "def save(self):\n\t\tself.CONFIG.save()\n\t\tself.temp_files.save()", "def save(self):\n with open(self._CONFIG_FILE_PATH, 'w') as config_file:\n json.dump(vars(self), config_file)\n return self._CONFIG_FILE_PATH", "def save_to_file(self):\n check_path(self.config_path)\n\n with open(self.settings_file, 'w') as settings_file:\n options = self._get_options()\n json.dump(options,\n \t settings_file,\n \t indent=4,\n \t separators=(',', ': '))", "def save_config(self):\n with open(self.config_file, 'w') as fout:\n json.dump({'name_dict': self._name_dict, 'metric_dict': self._metric_dict, 'credential_path': self.credential_path, 'path_for_worksheet_name': self.path_for_worksheet_name}, fout)", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def save(self):\n Registry.SetKey(self.CONFIG_NAME, self.config, True)\n self.load() # for validation", "def save(self):\n # Always write out components in alphabetical order for determinism,\n # especially in tests.\n for function_name in sorted(self._components.keys()):\n self._config_parser[_COMPONENTS_SECTION][\n function_name] = self._components[function_name]\n\n with open(str(self._config_filepath), 'w') as f:\n self._config_parser.write(f)", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)", "def save_config(self, new_config, filename=None):\n self.cfg.update(new_config)\n if filename is None:\n self.cfg.filename = self.cfg_filename\n else:\n self.cfg.filename = filename\n self.cfg.write()\n logger.info(\"Config file %s written out\" % self.cfg.filename)", "def save(self) -> bool:\n config_file = self.DEFAULT_CONFIG_LOCAL\n for filename in self.CONFIG_LOCAL:\n if os.path.isfile(filename):\n config_file = filename\n break\n\n with open(config_file, \"w\") as f:\n try:\n stream = yaml.dump(self.to_dict(), indent=2, default_flow_style=False)\n f.write(stream)\n\n except Exception as e:\n raise click.ClickException(\n f\"Error while saving config in {config_file}:\\n{str(e)}\"\n )\n return True", "def SaveConfig(self):\n config_value = getattr(self, APPDATA)\n path_value = config_value.AbsolutePaths[0]\n default_cfg_file = os.path.join(path_value, CONFIG_FILE_NAME)\n temp_file = default_cfg_file + '.TEMP'\n if os.path.exists(default_cfg_file):\n json.dump(type(self)._CURRENT_CONFIG,\n open(temp_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)\n EnsureBackup(temp_file, default_cfg_file)\n else:\n if not os.path.isdir(path_value):\n os.mkdir(path_value)\n json.dump(type(self)._CURRENT_CONFIG,\n open(default_cfg_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')", "def save_config(self, filename: str=None):\n if not filename:\n filename = self.config_file\n with open(filename, \"w\") as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)", "def saveConfig(config):\n global SW_CONFIG\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", config['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", config['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", config['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", config['sw_version'])\n cf.set(\"sw_config\", \"startup\", config['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", False)\n cf.set(\"run_config\", \"backup\", False)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()\n SW_CONFIG = config", "def save_config(self, path):\n if os.path.isdir(path):\n path = os.path.join(path, 'config.json')\n print('Save config to {}'.format(path))\n with open(path, 'w', encoding='utf-8') as w:\n w.write(json.dumps(self.to_dict(), indent=2,\n sort_keys=True))", "def save(self, savedir='.', fname=None):\n # Build up the file path to write to.\n dirpath = os.path.abspath(savedir)\n if fname is None:\n fname = os.path.basename(self.fname)\n\n name = os.path.splitext(fname)[0]\n path = os.path.join(dirpath, name + '.conf')\n\n # Put all comments and attributes into string formats.\n lines = ['# %s' % comment for comment in self.comments]\n lines.append('')\n for letter, section in self.config_guide.items():\n names = getattr(self, section)\n if names:\n if isinstance(names, basestring):\n line = '%s: %s;' % (letter, names)\n else:\n line = '%s: %s;' % (letter, ', '.join(names))\n lines.append(line)\n\n # Write the config.\n with open(path, 'w') as f:\n f.write('\\n'.join(lines))", "def save_to_conf(self):\n raise NotImplementedError", "def save_config(self, *args, **kwargs):\n raise NotImplementedError", "def save(self):\n try:\n self.write(open(self._cfg_path, 'w'))\n return True\n except PermissionError as err:\n if err.errno == 13:\n return False\n raise err", "def save_to_conf(self):\r\n raise NotImplementedError", "def save_configuration(config):\n with open(cwd + '/configuration.pickle', 'wb') as handle:\n pickle.dump(config, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save_config(self):\n if not os.path.exists(self._conf_dir):\n os.makedirs(self._conf_dir)\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n with open(conf_file, \"w\") as ofile:\n json.dump(self.conf, ofile, indent=2)", "def save_config(self, path=\"\"):\n if not path:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n file_path = QtGui.QFileDialog.getSaveFileName(self,\n \"Save Config\",\n CONFIG_DIR,\n \"Config File (*.cfg)\")\n else:\n file_path = path\n self._save_state(file_path)\n self.write_text(\"Saved config @ {}\".format(file_path))", "def save_config():\n # Order the load flags using load_keys...\n od_load_flags = OrderedDict()\n for k in load_keys:\n od_load_flags[k] = load_flags[k]\n pawstools.save_cfg(od_load_flags,cfg_file)", "def save_config(config_path: str, data: dict):\n with open(config_path, 'w') as j:\n dump(data,j)", "def write_config_file():\n\tif not config_parser:\n\t\tprint \"Config module not loaded. I don't save anything.\"\n\t\treturn\n\n\tf = file(config_file, \"w\")\n\tconfig_parser.write(f)\n\tf.close()", "def save( self ):\n ini = codecs.open(self.filename,\"w\",\"utf-8\",errors=\"replace\",buffering=0)\n for (name,value) in self.conf.items():\n print >>ini, name, \"=\", value\n ini.close()", "def save_configurations(self):\n # Get the file path\n self.data_path = self.data_path_entry.get()\n # Open the file\n with open(self.data_path, 'rb') as file:\n self.log('Opened ' + str(self.data_path))\n # Un-serialize\n info = pickle.load(file)\n # Write the new properties\n self.main_window.overwrite_properties(info)\n\n self.exit()", "def save(self):\r\n if not self.filename:\r\n raise IOError(errors['NoConfigFileYet'])\r\n self.onSave()\r\n stuff = dict()\r\n for thing in ['aliases', 'triggers']:\r\n stuff[thing] = [] # Populate with (args, kwargs) pairs.\r\n if self.config.get('saving', thing):\r\n for c, o in getattr(self, thing).iteritems():\r\n stuff[thing].append(o.serialise())\r\n stuff['variables'] = dict()\r\n if self.config.get('saving', 'variables'):\r\n for v in self.variables:\r\n if hasattr(self, v):\r\n var = getattr(self, v)\r\n if type(var) in self.basicTypes:\r\n stuff['variables'][v] = var\r\n stuff['config'] = self.config.get_dump()\r\n with open(self.filename, 'w') as f:\r\n json.dump(stuff, f, indent = 1, sort_keys = True) # Finally write the completed dictionary.\r", "def save_conf(self):\r\n self.sendAndRecv(\"SAVECONF\\r\\n\")", "def save_settings(self):\n with open(self.settings_path, \"w\") as f:\n json.dump(self.settings, f, indent=4)", "def save_config(self, save_path: str) -> None:\n os.makedirs(save_path, exist_ok=True)\n model_hyperparameters_path = os.path.join(save_path, MODEL_HYPERPARAMETERS_FILE_NAME)\n save_json(model_hyperparameters_path, self.config_obj.to_dict())", "def _save(self):\n file = open(\"settings.ini\", \"w\")\n self._parser.write(file)\n file.close()", "def _save_cfg_to_file(self, server_id, cfg):\n\t\tfile = self.SettingsFolder + '{}.yml'.format(server_id)\n\t\twith open(file, 'w') as f:\n\t\t\tyaml.dump(cfg, f, default_flow_style=False)", "def saveSettings(self):\n helpers.saveFile(self.dataDir, self.settingsFilename, json.dumps(self.settings))", "def save_configuration(self):\n dom = self.vistrailsStartup.startup_dom()\n doc = dom.documentElement\n configuration_element = enter_named_element(doc, 'configuration')\n doc.removeChild(configuration_element)\n self.configuration.write_to_dom(dom, doc)\n self.vistrailsStartup.write_startup_dom(dom)\n dom.unlink()", "def update(self):\n self.save_config_file()", "def save(config, filename=None):\n filename = add_directory(filename or 'configure.json')\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory, 0o700)\n with open(filename, \"w\") as f:\n json.dump(config, f, indent=2, sort_keys=True)", "def save_config(config):\n with open(os.path.abspath(CONFIG_PATH), 'wb') as config_file:\n pickle.dump(config, config_file)\n return config", "def saveConfig(self, name=None):\n\n configDir = self.mwGlob['configDir']\n\n if self.config.get('profileName', '') == 'config':\n if 'reference' in self.config:\n del self.config['reference']\n\n # default saving for reference\n if name is None:\n name = self.config.get('reference', 'config')\n\n fileName = configDir + '/' + name + '.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n # if we save a reference first, we have to save the config as well\n if name != 'config':\n fileName = configDir + '/config.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n return True", "def save_config(conf, default):\n print()\n if yes_no('Would you like to save your configuration?'):\n name = simple_response(\n 'What would you like to name your configuration?')\n path = ask_path(\n 'Please enter the path you would like your configuration saved to',\n default=default)\n file_path = os.path.join(path, name)\n if file_path.find('.json') == -1:\n file_path += '.json'\n with open(file_path, 'w+') as f:\n json.dump(conf, f, indent=4)", "def saveNewConfiguration(self):\n selection = tk.filedialog. \\\n asksaveasfilename(title=\"Save CHUM configuration\")\n if selection:\n self._currentConfiguration = selection\n self._saveToFilePath(selection)", "def saveCurrentConfig():\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", SW_CONFIG['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", SW_CONFIG['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", SW_CONFIG['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", SW_CONFIG['sw_version'])\n cf.set(\"sw_config\", \"startup\", SW_CONFIG['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", RUN_CONFIG['pop'])\n cf.set(\"run_config\", \"backup\", RUN_CONFIG['backup'])\n cf.add_section(\"hook_config'\")\n for k, v in HOOK_CONFIG:\n cf.set(\"hook_config\", k, v)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()", "def save(self, config_file: typing.TextIO):\n json.dump(self.to_dict(), config_file, indent=4)", "def write_config(self, filename):\n self.config.filename = filename\n self.config.write()", "def save_config(**kwargs):\n if kwargs == {}:\n kwargs = config._config\n current_config = _load_config()\n current_config.update(**kwargs)\n # write to disk\n fname = _get_config_fname()\n if fname is None:\n raise RuntimeError('config filename could not be determined')\n if not op.isdir(op.dirname(fname)):\n os.mkdir(op.dirname(fname))\n with open(fname, 'w') as fid:\n json.dump(current_config, fid, sort_keys=True, indent=0)", "def writeShREEKConfig(self, filename):\n self._ShREEKConfig.save(filename)\n return", "async def save_config(self):\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command to ask for saving config. Wait till the question to overwrite\n # the startup file (\"Overwrite file [startup-config].... (Y/N)[N] ?\")\n output = await self.send_command(self.cmd_save_config, pattern=\"?\")\n\n # Confirm to save the config\n output += await self.send_command(\"Y\")\n\n # Return the commands of the configuration saving process\n return output", "def save_config(conf, save_path):\n with open(os.path.join(save_path), \"w\") as f:\n f.write(yaml.dump({'param': conf}, default_flow_style=False))", "def saveSettings():\t\n\tglobal settings\n\tfout = open(config_file,'w')\n\tfout.write(json.dumps(settings, sort_keys=True, indent=4))\n\tfout.close()", "def save(self):\n # TODO: save the file", "def saveConfig(config, filepath=None):\n result = False\n if filepath is None:\n filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"res/\", \"config.ini\")\n try:\n with open(filepath, 'wb') as configfile:\n config.write(configfile)\n result = True\n except Exception, e:\n print \"*** Caught Exception: %r ***\" % e\n return result", "def save(self):\n sublime.save_settings(self.file_name)", "def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )", "def save_file(self, force=False): # type: (bool) -> bool\n if self._modified or force:\n logging.info('Cyra is writing your config to %s' % self._file)\n\n with open(self._file, 'w') as f:\n f.write(self.export_toml())\n\n self._modified = False\n return True\n return False", "def save(self):\n \n f = file(self.conf_file, \"w\")\n f.write(header + \"\\n\".join(map(str, self.db)) + \"\\n\")\n f.close()", "def save(config, path=None):\n if path is None:\n path = settings.HOST_CONFIG_PATH\n\n with open(path, 'w') as output:\n output.write(yaml.safe_dump(config, default_flow_style=False))", "def save_conf(self, name=None):\n \n if name:\n filename = name\n \n else:\n filename = \"conf_\" + str(self.conf[\"device\"]) + \"_\" + datetime.today().strftime('%Y-%m-%d') + \".txt\"\n \n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename, \"w\") as file:\n json.dump(self.conf, file)", "def saveConfigFileDlg( self ):\n fileName = QtGui.QFileDialog.getSaveFileName( self, \"Save Full Config As...\", self.rsrc.lastFolder, \"Config files (*.cfg)\" )\n if ( fileName ):\n self.saveConfigFile( fileName )\n path, fName = os.path.split( str( fileName ) )\n self.rsrc.lastFolder = path", "def save(self):\n if self.location is None:\n logger.debug(\"Save requested but not saving settings, \"\n \"location is None\")\n return\n\n if self._saving or not self._dirty:\n return\n\n self._saving = True\n\n logger.debug(\"Saving settings...\")\n\n with open(self.location + \".new\", 'w') as f:\n self.write(f)\n\n try:\n # make it readable by current user only, to protect private data\n os.fchmod(f.fileno(), 384)\n except:\n pass # fail gracefully, eg if on windows\n\n f.flush()\n\n try:\n os.rename(self.location, self.location + \".old\")\n except:\n pass # if it doesn'texist we don't care\n\n os.rename(self.location + \".new\", self.location)\n\n try:\n os.remove(self.location + \".old\")\n except:\n pass\n\n self._saving = False\n self._dirty = False", "def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)", "def write_config(self):\n logging.debug(\"Writing configuration file: %s\" % self.config_file)\n f = open(self.config_file, \"w\")\n self.config.write(f)\n f.close()", "def save_config(self):\n if not os.path.exists(USER_CONFIG_PATH):\n os.makedirs(USER_CONFIG_PATH)\n\n # obtener el config actual\n config = self.get_config()\n\n # obtener el cliente\n client = self._args.get('client')\n\n # ciertos parametros no se tienen que salvar\n args = self._args.copy()\n for item in ['doc', 'command', 'client']:\n if item in args:\n args.pop(item)\n\n # actualizar el cliente default\n config['client'] = client\n\n # actualizar el resto de los parametros para ese cliente\n for item in args:\n if client in config:\n config[client][item] = args.get(item)\n else:\n config[client] = {item: args.get(item)}\n\n with open(USER_CONFIG_FILE, 'w') as config_file:\n yaml.dump(config, config_file, default_flow_style=False,\n allow_unicode=True)", "def save(self,filename=None,defaults=False):\n # Check filename or use default filename\n if not filename:\n if self.__configfile:\n filename=self.__configfile\n else:\n raise Exception(_('EVOGTK: Need a filename for saving preferences'))\n # Set widget values on config parser\n for section in self.__optionstruct:\n for option in self.__optionstruct[section]:\n widgets=self.__optionstruct[section][option][1]\n # Get default value\n value=vars(self)[section].get_option(option)\n if not defaults and widgets:\n # Use widget value\n val=self.__guidata.__getattr__(widgets[0])\n if val:\n value=val\n # Create section in file if not exists\n if not self.__config.has_section(section):\n self.__config.add_section(section)\n value=vars(self)[section].set_option(option,value)\n # Write config to file\n fd=open(filename,'wb')\n self.__config.write(fd)\n fd.close()", "async def save_config(self):\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command\n output = await self.send_command(self.cmd_save_config)\n\n # Return the commands of the configuration saving process\n return output", "def save_config(self, cfg):\n filename = self.get_filename(\"config\", ext=\".txt\")\n if filename is None:\n return\n\n if not isinstance(cfg, XFasterConfig):\n cfg = XFasterConfig(cfg)\n\n try:\n creator = os.getlogin()\n except OSError:\n creator = \"unknown\"\n with open(filename, \"w\") as f:\n f.write(\n \"# Created by {} on {:%Y-%m-%d %H:%M:%S}\\n\\n\".format(\n creator, datetime.datetime.now()\n )\n )\n cfg.write(f)\n\n return filename", "def save():\n log.info(\"Saving settings file\")\n with open(SETTINGS_FILE, \"w\") as file:\n json.dump(_names, file)", "def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()", "def save(self, filename=\"startup-config\"):\n command = f\"copy running-config {filename}\"\n # Changed to send_command_timing to not require a direct prompt return.\n self.native.send_command_timing(command)\n # If the user has enabled 'file prompt quiet' which dose not require any confirmation or feedback.\n # This will send return without requiring an OK.\n # Send a return to pass the [OK]? message - Increase delay_factor for looking for response.\n self.native.send_command_timing(\"\\n\", delay_factor=2)\n # Confirm that we have a valid prompt again before returning.\n self.native.find_prompt()\n log.debug(\"Host %s: Configuration saved.\", self.host)\n return True", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "def save_settings(self):\n settings = {'camera': self.comboCamera.currentIndex(),\n 'rotation': self.comboRotation.currentIndex(),\n 'colors': {\n 'min_hue': self.spinMinHue.value(),\n 'max_hue': self.spinMaxHue.value(),\n 'min_saturation': self.spinMinSaturation.value(),\n 'max_saturation': self.spinMaxSaturation.value(),\n 'min_value': self.spinMinValue.value(),\n 'max_value': self.spinMaxValue.value(),\n }, 'diameter': self.spinDiameter.value(),\n 'lifter': self.lineEditLifter.text(),\n 'save_video': self.checkSaveVideo.isChecked()\n }\n settings_file = open('./resources/settings.json', 'w')\n json.dump(settings, settings_file, indent=4)\n settings_file.close()\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Settings saved.', 5000)", "def save_to_config(self) -> None:\n config_path = os.path.join(self.base_path, \"config.json\")\n\n with open(config_path, \"r\") as _json:\n c_dict = json.load(_json)\n\n c_dict[\"mean_similarity_error\"] = self.ME\n c_dict[\"similarity_correlation\"] = self.pearson_corr\n c_dict[\"similarity_spearman_correlation\"] = self.spearman_corr\n\n with open(config_path, \"w\") as _json:\n json.dump(c_dict, _json)", "def save_cfg(self, output_dir):\n output_path = os.path.join(output_dir, 'level_config.cfg')\n shutil.copy(self.cfg_path, output_path)", "def save(config: dict, out_dir: str, filename: str = \"config.yaml\"):\n assert filename.endswith(\".yaml\")\n with open(os.path.join(out_dir, filename), \"w+\") as f:\n f.write(yaml.dump(config))", "def save(self, file_name):\n saved_data = { \"start_config\" : self.start_config, \"action_storage\" : self.action_storage } \n with open(file_name, 'wb') as fh:\n pickle.dump(saved_data, fh)", "def save_options(self,config,options_file):\n \n config.set('manager-editable','media_offset',self.media_offset)\n config.set('manager-editable','profiles_offset',self.pp_profiles_offset)\n config.set('manager-editable','use_sudo',self.use_sudo)\n config.set('manager-editable','options',self.options)\n\n config.set('manager-editable','autostart_path',self.autostart_path) \n config.set('manager-editable','autostart_use_sudo',self.autostart_use_sudo)\n config.set('manager-editable','autostart_options',self.autostart_options)\n \n with open(options_file, 'wb') as config_file:\n config.write(config_file)", "def onExportConfig(self, evt):\n dlg = wx.FileDialog(self.view, \"Save As Configuration File\", wildcard = \"*.ini\" ,\n style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)\n if dlg.ShowModal() == wx.ID_OK:\n fileName=dlg.GetPath()\n self.config.exportConfig(fileName=fileName, e=None)", "def save(self):\n Preferences.setVCS(\n \"AutoClose\",\n self.vcsAutoCloseCheckBox.isChecked())\n Preferences.setVCS(\n \"AutoSaveFiles\",\n self.vcsAutoSaveCheckBox.isChecked())\n Preferences.setVCS(\n \"AutoSaveProject\",\n self.vcsAutoSaveProjectCheckBox.isChecked())\n Preferences.setVCS(\n \"StatusMonitorInterval\",\n self.vcsStatusMonitorIntervalSpinBox.value())\n Preferences.setVCS(\n \"MonitorLocalStatus\",\n self.vcsMonitorLocalStatusCheckBox.isChecked())\n Preferences.setVCS(\n \"AutoUpdate\",\n self.autoUpdateCheckBox.isChecked())\n \n self.saveColours(Preferences.setProjectBrowserColour)" ]
[ "0.9018369", "0.89983326", "0.87025833", "0.8437155", "0.83241445", "0.8323876", "0.8261388", "0.82597125", "0.8157242", "0.8059971", "0.80546784", "0.805303", "0.7990608", "0.79761726", "0.79709685", "0.79246825", "0.7923481", "0.7910758", "0.7910608", "0.78848404", "0.7884696", "0.78810245", "0.78804606", "0.786675", "0.78557724", "0.7831463", "0.78197193", "0.78163725", "0.7814579", "0.7752738", "0.7747439", "0.7745395", "0.7720092", "0.76957107", "0.76787686", "0.7631607", "0.76250196", "0.7610422", "0.76009536", "0.75982755", "0.75957185", "0.7571225", "0.7548178", "0.7542437", "0.75124896", "0.7488222", "0.74820966", "0.7479538", "0.7477034", "0.747386", "0.7464519", "0.7395819", "0.7380424", "0.7380073", "0.7372492", "0.73685724", "0.7366541", "0.73460734", "0.734535", "0.73405105", "0.73395145", "0.7335472", "0.7331998", "0.732765", "0.7327125", "0.7319074", "0.73185754", "0.73104215", "0.7309882", "0.7288159", "0.7286821", "0.72695917", "0.7253319", "0.72455", "0.72414684", "0.7234386", "0.72338676", "0.7201672", "0.7192053", "0.71385455", "0.71380025", "0.713022", "0.7116065", "0.7115737", "0.71143585", "0.7112588", "0.7106298", "0.70921206", "0.70854926", "0.7071014", "0.705838", "0.7053607", "0.7041643", "0.704047", "0.7028983", "0.70217574", "0.701391", "0.6997121", "0.6987919", "0.6986385" ]
0.8239592
8
Return value stored in config
def get(self, key, default_val=None): if key not in self._config.keys(): # we don't want KeyError return default_val # just return None if not found return self._config[key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_value(self, name):\r\n if name in self.config_values:\r\n return self.config_values[name]", "def _get_config_value(self, section, key):\n return config.get(section, key)", "def value(self) -> str:\n return self._config.get('value')", "def get(self, key):\n return self.config.get(key)", "def config_value(name):\n def get():\n try:\n return config.get('yourls', name)\n except (NoOptionError, NoSectionError):\n return None\n return get", "def get_system_value(name: str):\n return Config.objects.first().__dict__[name]", "def get_config(self):\n return {'value': self.value}", "def config(self):\n return self[CONFIG_KEY]", "def getconfig(self, key):\n return self.config[key]", "def get_config(self, key):\n return self.data[key]", "def get_value(self, key):\n if key not in self._config:\n raise ValueError(\"%s not in self.config\"%key)\n return self._config[key][\"value\"]", "def get_config(self, name):\n return self.configs[name][0]", "def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"", "def get_config_value(self, key):\n try:\n self.logger.write_to_log('config data requested', 'model')\n\n return self.db_handler.get_config_value(key)[0]\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def __getitem__(self, name):\n return self.config[name]", "def getvalue(self,num,name):\n return self.M.conf(num)[name]", "def get_config():\n return CONFIG", "def get_value(key: str) -> str:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.get(\"wsgi\", key)", "def __getitem__(self, key):\n return self.config[key]", "def get_config(self,config):\n return self.parser.get(\"main\", config)", "def __getitem__(self, name : str) -> Any:\n return self._client.get_config()[name]", "def value(self):\n\n memcached_items = memcache_services.get_multi([self.name])\n if self.name in memcached_items:\n return memcached_items[self.name]\n\n datastore_item = config_models.ConfigPropertyModel.get(\n self.name, strict=False)\n if datastore_item is not None:\n memcache_services.set_multi({\n datastore_item.id: datastore_item.value})\n return datastore_item.value\n\n return self.default_value", "def get_configval(self, keyname, defaultval=None):\n return self.cfghelper.get_value(keyname,defaultval)", "def _getConfigParam(self, name, default=None):\n return self.config.get(self._configPrefix + name.lower(), default)", "def get_config(self, key):\n return getattr(self.args, 'conf.{}'.format(key))", "def get_config_var(name):\n return get_config_vars().get(name)", "def setting(self, config, name, default=None):\n\n return config.get(name, default) if config else default", "def config(self):\n return CurrentProject().config.config[self.key]", "def get(self) -> dict:\n return Config.get()", "def get(self, key):\n self._check(key)\n return unicode(self.__config.value(key).toString())", "def get(query):\n global INITIALIZED\n global CONFIG\n global GLOBAL_CONFIG\n\n if not INITIALIZED:\n raise Exception('[XOS-Config] Module has not been initialized')\n\n val = Config.get_param(query, CONFIG)\n if not val:\n val = Config.get_param(query, GLOBAL_CONFIG)\n if not val:\n val = Config.get_param(query, default.DEFAULT_VALUES)\n if not val:\n # TODO if no val return none\n # raise Exception('[XOS-Config] Config does not have a value (or a default) parameter %s' % query)\n return None\n return val", "def config(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"config\")", "def getValue(self, valueName):\n\t\treturn self.settings[valueName][0]", "def get_config(item: str) -> Union[str, int]:\n file = load_config_file(\"config.json\")\n\n value = file.get(item)\n\n if value is None:\n raise Exception(f\"Your config is out of date! Missing a value for {item}\")\n return value", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config" ]
[ "0.8236148", "0.78571916", "0.78411025", "0.77996963", "0.7762611", "0.7640735", "0.7605088", "0.7578148", "0.7571694", "0.7504212", "0.7446086", "0.7428434", "0.7364911", "0.7358118", "0.7330266", "0.73145777", "0.7290636", "0.72804093", "0.7263993", "0.7168558", "0.7160482", "0.71520907", "0.71280974", "0.7100109", "0.7096089", "0.70873976", "0.70827675", "0.7059195", "0.7049757", "0.703598", "0.7030822", "0.7022724", "0.6986693", "0.6973593", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343", "0.6938343" ]
0.0
-1
Update the config file
def update(self): self.save_config_file()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conf_update(self):\n pass", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def __update(self):\n if self.__file:\n target_file = open(self.__file)\n for attr in dir(self):\n if not attr.startswith(\"_\") and \\\n (self.__overwrite or (attr not in self.__exclude)) \\\n and not self.__is_attr_callable(attr):\n try:\n delattr(self, attr)\n except AttributeError:\n pass\n pool = yaml.load(target_file)\n target_file.close()\n if pool: # could be None\n for key, val in pool.iteritems():\n if not key.startswith(\"_\") and \\\n (self.__overwrite or (key not in self.__exclude)) \\\n and not self.__is_attr_callable(key):\n setattr(self, key, val)\n if hasattr(self, 'log_config_file_changes')\\\n and self.log_config_file_changes:\n logging.getLogger(__name__).info(\"Config file has updated.\")", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def config_update(self, update: io.BytesIO) -> None:\n self.__logger.debug('Eva.config_update called')\n return self.__http_client.config_update(update)", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def refresh_config(self):\n with open(config_name, 'rb') as f:\n self.CONFIG = simplejson.load(f)\n\n return self", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def update_conf_file():\n filepath = remote_dir + \"/apache2/conf/httpd.conf\"\n fabric.contrib.files.sed(filepath, 'myproject', project_name)", "def reload_config(self):\n pass", "def update_config(self, kv: dict):\n self._configs.update(kv)\n self._save()", "def _refreshconfig(self):\n self.config = ConfigGenerator(os.path.join(self.rundir, const.CONFIG_FILE))", "def refresh_configuration(self):\n pass", "def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())", "def update_config_file(**kwargs):\n config_file = try_read_file()\n config_file.update(kwargs)\n config_file = {key: value for key, value in config_file.items() if value is not None}\n logging.info('open config file %s', config_file_path)\n with open(config_file_path, 'w') as f:\n logging.info('begin io %s', config_file_path)\n json.dump(config_file, f, indent=4)\n logging.info('end io %s', config_file_path)", "def use_config_file(self):\n self.config_file = self.find_config_file()\n if self.config_file:\n self.apply_config_file(self.config_file)", "def reload(self):\n self.read(self._cfg_path)", "def testUpdateConfigFile(self):\n # Test update project field.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"\")\n # Test add ssh key path in config.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path,\n \"ssh_private_key_path\", \"test_path\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"test_path\")\n # Test config is not a file\n with mock.patch(\"os.path.isfile\") as chkfile:\n chkfile.return_value = False\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")", "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def update_config(config_file, config_base=None):\n if config_base is None:\n config_base = def_config_file\n assert(os.path.isfile(config_base))\n if not os.path.isfile(config_file):\n shutil.copy(config_base, config_file)\n cp = CisConfigParser()\n cp.read(config_file)\n miss = []\n if platform._is_win: # pragma: windows\n miss += update_config_windows(cp)\n with open(config_file, 'w') as fd:\n cp.write(fd)\n for sect, opt, desc in miss: # pragma: windows\n warnings.warn((\"Could not locate option %s in section %s.\"\n + \"Please set this in %s to: %s\")\n % (opt, sect, config_file, desc))", "def update_packages(self, config_file):\n entries = yacman.load_yaml(config_file)\n self.update(entries)\n return True", "def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)", "def config_edits(configfile):\n try:\n\n # Read in the file\n filedata = None\n with open(configfile, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n '/home/scratch01/sradanov/A2C2/NCEP/', '').replace('/home/estimr2/sradanov/Operational/', '')\n\n # Write the file out again\n with open(configfile, 'w') as file:\n file.write(filedata)\n\n LOGGER.info('configfile modified')\n except Exception:\n LOGGER.exeption('Failed to modify configfile:')\n\n return configfile", "def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()", "def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)", "def save_config(self, new_config, filename=None):\n self.cfg.update(new_config)\n if filename is None:\n self.cfg.filename = self.cfg_filename\n else:\n self.cfg.filename = filename\n self.cfg.write()\n logger.info(\"Config file %s written out\" % self.cfg.filename)", "def write_config(self, filename):\n self.config.filename = filename\n self.config.write()", "def update_shed_config(self, shed_conf):\n for index, my_shed_tool_conf in enumerate(self._dynamic_tool_confs):\n if shed_conf['config_filename'] == my_shed_tool_conf['config_filename']:\n self._dynamic_tool_confs[index] = shed_conf\n self._save_integrated_tool_panel()", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())", "def update_feature(selfs, k, v, cfg_path):\n with open(cfg_path, 'r') as cfg:\n file_dict = yaml.safe_load(cfg)\n # overprint the entries with the new config_dict\n file_dict['{}'.format(k)] = v\n with open(cfg_path, 'w') as w_file:\n w_file.write(yaml.dump(file_dict))", "def edit_cfg(config_file):\n\n GUI().cfgEditor(config_file)", "def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n self.update_attributes_from_config(conf)", "def _update_auto_config(self):\n\n # Initialize the yaml data\n nodes = {}\n with open(self._autoconfig_filename, \"r\") as stream:\n try:\n ydata = yaml.load(stream)\n if \"nodes\" in ydata:\n nodes = ydata[\"nodes\"]\n except yaml.YAMLError as exc:\n print(exc)\n return\n\n for i in nodes.items():\n key = i[0]\n node = i[1]\n\n # Interfaces\n node[\"interfaces\"] = {}\n for item in self._nodes[key][\"interfaces\"].items():\n port = item[0]\n interface = item[1]\n\n node[\"interfaces\"][port] = {}\n addr = \"{}\".format(interface[\"pci_address\"])\n node[\"interfaces\"][port][\"pci_address\"] = addr\n if \"mac_address\" in interface:\n node[\"interfaces\"][port][\"mac_address\"] = interface[\"mac_address\"]\n\n if \"total_other_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_other_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_other_cpus\"\n ]\n if \"total_vpp_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_vpp_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_vpp_cpus\"\n ]\n if \"reserve_vpp_main_core\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"reserve_vpp_main_core\"] = self._nodes[key][\"cpu\"][\n \"reserve_vpp_main_core\"\n ]\n\n # TCP\n if \"active_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"active_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"active_open_sessions\"\n ]\n if \"passive_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"passive_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"passive_open_sessions\"\n ]\n\n # Huge pages\n node[\"hugepages\"][\"total\"] = self._nodes[key][\"hugepages\"][\"total\"]\n\n # Write the auto config config file\n with open(self._autoconfig_filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def update(self, obj):\n\n self.cfg.update(obj)", "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def update_global_config(self, config, **kwargs):\n pass", "def update_config(update):\n global _config\n new_config = copy.deepcopy(_config)\n _update_dict_recursive(new_config, update)\n logging.config.dictConfig(new_config)\n _configure_ulog_bridge()\n _config = new_config", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)", "def config_update_from_file(cls, config_file: str = None) -> None:\n if config_file is None:\n config_file = cls._config[\"graph_app_config\"]\n\n if config_file != \"\":\n cls._logger.debug(\n \"[%s]: Update config from file: %s\", cls.__name__, config_file\n )\n\n cls._config[\"graph_app_config\"] = config_file\n\n config_yaml: Dict = {}\n with open(config_file, \"r\") as f:\n try:\n config_yaml = safe_load(f)\n except YAMLError:\n cls._logger.exception(\n \"Exception loading config file '%s'\", config_file\n )\n\n if config_yaml:\n config_update = {\n \"graph_{}\".format(k): config_yaml.get(k, None)\n for k in config_yaml.keys()\n }\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)", "def overwrite(cls, config_file_overwrite: str):\n conf_overwrite: dict = GC.read_conf(config_file_overwrite)\n for sec, attr in conf_overwrite.items():\n for key, val in attr.items():\n try:\n _ = GC.conf[sec][key]\n GC.conf[sec][key] = val\n except KeyError:\n print(\"Overwrite config file has section/key that \"\n \"don't exist in base config!!! Abort!!!\")\n sys.exit(1)", "def update(self, config_dict):\n self._update(config_dict, allow_new_keys=True)", "def reload(self):\n with open(self._config) as f:\n self.data = json.load(f)", "def write_config(self):\n logging.debug(\"Writing configuration file: %s\" % self.config_file)\n f = open(self.config_file, \"w\")\n self.config.write(f)\n f.close()", "def _add_db_in_config(self):\n logger.info(f\"Updating configuration file in {self.configfile}\")\n with open(self.configfile, \"a\") as fp:\n print(self.ref_name + \".genome : \" + self.ref_name, file=fp)", "def updated(self, newConfiguration):\n log.debug('ConfigListener: configuration %s updated' % newConfiguration)", "def update_configs(self, config):\n for what in self.plugins: # backend, repo etc.\n for key in self.plugins[what]: # s3, filesystem etc.\n # print(\"Updating configuration of\", what, key)\n self.plugins[what][key].config(what='set', params=config)\n return", "def with_config_update(self):\n original_config = self.load_config()\n\n config_data = original_config.json\n if str(self.ITEM_PUBLIC_ID) in config_data[f\"{self.ITEM_TYPE}s\"]:\n config_data[f\"{self.ITEM_TYPE}s\"].remove(str(self.ITEM_PUBLIC_ID))\n config_data[f\"{self.ITEM_TYPE}s\"].append(\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:0.0.1\"\n )\n self.dump_config(AgentConfig.from_json(config_data))\n try:\n yield\n finally:\n self.dump_config(original_config)", "def check_update(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\r\n try:\r\n sock.settimeout(2)\r\n sock.bind(('', 8080))\r\n sock.listen(1)\r\n conn, addr = sock.accept()\r\n except socket.timeout:\r\n return False\r\n sock.settimeout(None)\r\n with conn:\r\n conn.send(configuration)\r\n data = conn.recv(1024)\r\n with open('new_config.json', 'wt') as jsonfile:\r\n json.dump(data, jsonfile)\r\n self.set_new_configuration()", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def on_save(self):\r\n #new_config = ConfigParser.RawConfigParser()\r\n cur_config = self.config.dict_config\r\n #\r\n # update the dict_config\r\n cur_config[\"access_restriction\"][\"ips\"] = self.text_ips.get(1.0, tk.END).strip()\r\n cur_config[\"access_restriction\"][\"ar_url\"] = self.entry_url.get().strip()\r\n #\r\n cur_config[\"email\"][\"relay_server_host\"] = self.entry_server_host.get().strip()\r\n cur_config[\"email\"][\"relay_server_port\"] = self.entry_server_port.get().strip()\r\n cur_config[\"email\"][\"email_from\"] = self.entry_from.get().strip()\r\n cur_config[\"email\"][\"recipients\"] = self.text_recipients.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_enabled_subject\"] = self.entry_enabled_subject.get().strip()\r\n cur_config[\"email\"][\"ar_enabled_body\"] = self.text_enabled_body.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_disabled_subject\"] = self.entry_disabled_subject.get()\r\n cur_config[\"email\"][\"ar_disabled_body\"] = self.text_disabled_body.get(1.0, tk.END).strip()\r\n\r\n #self.action.save_config()\r\n # # sync dict_config to the gui\r\n # for section in self.config.dict_config:\r\n # new_config.add_section(section)\r\n # for item in self.config.dict_config[section]:\r\n # new_config.set(section, item, self.config.dict_config[section][item])\r\n # #\r\n # # saving to a file\r\n # with open(self.config.file_path, 'w') as newconfigfile:\r\n # new_config.write(newconfigfile)\r\n #\r\n # # mbox.showinfo(\"Information\",\r\n # # \"Current configuration has been successfully saved to '%s'\" % os.path.basename(self.configfile))\r\n # self.console.debug(\"Configuration has been saved to '%s'\" % self.config.file_path)\r", "def load_file(self, update=True): # type: (bool) -> None\n if os.path.isfile(self._file):\n logging.info('Cyra is reading your config from %s' % self._file)\n\n with open(self._file, 'r') as f:\n toml_str = f.read()\n self.load_toml(toml_str)\n else:\n self._modified = True\n\n # Write file if non existent or modified\n if update:\n self.save_file()", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def apply_config(filename):\n with open(filename) as config_file:\n config = json.load(config_file)\n for setting, value in config.items():\n CoreConfig.__dict__[setting] = value", "def update_config_file(invoker: AirflowInvoker) -> None:\n airflow_cfg_path = invoker.files[\"config\"]\n logging.debug(f\"Generated default '{str(airflow_cfg_path)}'\")\n\n # open the configuration and update it\n # now we let's update the config to use our stubs\n airflow_cfg = configparser.ConfigParser()\n\n with airflow_cfg_path.open() as cfg:\n airflow_cfg.read_file(cfg)\n logging.debug(f\"Loaded '{str(airflow_cfg_path)}'\")\n\n config = invoker.plugin_config_processed\n for section, cfg in config.items():\n airflow_cfg[section].update(cfg)\n logging.debug(f\"\\tUpdated section [{section}] with {cfg}\")\n\n with airflow_cfg_path.open(\"w\") as cfg:\n airflow_cfg.write(cfg)\n logging.debug(f\"Saved '{str(airflow_cfg_path)}'\")", "def update(name, value, config_dir=None):\n if name not in Config.__ALLOWED:\n msg = f'Cannot update configuration; value \"{name}\" is not allowed.'\n raise ConfigurationError(msg)\n config_dir = Config.resolve_config_dir(config_dir)\n config_dat, config_file = Config.get_config_file(\n config_dir,\n round_trip_load=True,\n quiet=True,\n )\n config_dat.update({name: value})\n Config.write_config_file(config_dat, config_file)\n if Config.is_set:\n Config.__conf[name] = value", "def update_validation_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('validation'):\n rconfig.add_section('validation')\n rconfig.set(\n 'validation', 'connect_method',\n 'floating' if self.ext_net else 'fixed')\n rconfig.set(\n 'validation', 'network_for_ssh',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def update(self):\n \n dbpath, config = self._start()\n \n self.config.obo = check_file(config.obo, dbpath, \"obo\") \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=True) \n phen_file = check_file(config.model_phenotypes, dbpath,\n \"model_phenotypes\", allow_none=True)\n \n summary = self._update(desc_file, phen_file) \n if len(summary[\"incorrect_ids\"]) == 0 and not config.skip_compute:\n self._compute(models=summary[\"new_phenotypes\"])\n \n self._end()", "def _update_config_from_file(config, cfg_file):\n config.defrost()\n with open(cfg_file, 'r') as infile:\n yaml_cfg = yaml.load(infile, Loader=yaml.FullLoader)\n for cfg in yaml_cfg.setdefault('BASE', ['']):\n if cfg:\n _update_config_from_file(\n config, os.path.join(os.path.dirname(cfg_file), cfg)\n )\n config.merge_from_file(cfg_file)\n config.freeze()", "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"", "def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)", "def updateSettings(self):\n self.parser.read(self.file)\n self.showTicker = self.parser.getboolean('Settings', 'showTicker')\n self.verbose = self.parser.getboolean('Settings', 'verbose')\n self.sleepTime = self.parser.getint('Settings', 'sleeptime')\n self.saveGraph = self.parser.getboolean('Settings', 'saveGraph')\n self.graphDPI = self.parser.getint('Settings', 'graphDPI')", "def update(d):\n # get (or create) config path\n p= initialize()['config']\n\n with lockfile.LockFile(p):\n # load current configuration\n cnf = load_config(open(p))\n\n # merge \n def dict_merge(a, b):\n '''recursively merges dict's. not just simple a['key'] = b['key'], if\n both a and bhave a key who's value is a dict then dict_merge is called\n on both values and the result stored in the returned dictionary.\n from https://www.xormedia.com/recursively-merge-dictionaries-in-python/\n '''\n if not isinstance(b, dict):\n return b\n result = copy.deepcopy(a)\n for k, v in b.items():\n if k in result and isinstance(result[k], dict):\n result[k] = dict_merge(result[k], v)\n else:\n result[k] = copy.deepcopy(v)\n return result\n cnf = dict_merge(cnf, d)\n\n # save \n dump_config(cnf, open(p,'w'))", "def update_compute_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('compute'):\n rconfig.add_section('compute')\n rconfig.set(\n 'compute', 'fixed_network_name',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def update_settings(self):\n\n param = \"settings.py\"\n self._check_path_availability([\"get_settings_dir\", \"get_settings_dir_to\"])\n self.updater.update_files(\n self.analizer.get_settings_dir(),\n self.analizer.get_settings_dir_to(),\n param,\n )\n return self.write_debug_message(\"Settings upgrade is done!\\n\")", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count", "def upgrade_config(configFile:str, oldSampleFile:str, newSampleFile:str, unsafeAttributesFile:str, filetype:str):\n\n #If config file is not present then abort merging.\n if not os.path.isfile(configFile):\n Log.error(f'config file {configFile} does not exist')\n raise Exception(f'ERROR: config file {configFile} does not exist')\n\n Log.info(f'config file {str(configFile)} upgrade started.')\n\n # old sample file\n conf_old_sample = filetype + oldSampleFile\n cs_conf_old_sample = S3CortxConfStore(config=conf_old_sample, index=conf_old_sample)\n\n # new sample file\n conf_new_sample = filetype + newSampleFile\n cs_conf_new_sample = S3CortxConfStore(config=conf_new_sample, index=conf_new_sample)\n conf_new_sample_keys = cs_conf_new_sample.get_all_keys()\n\n # unsafe attribute file\n conf_unsafe_file = filetype + unsafeAttributesFile\n cs_conf_unsafe_file = S3CortxConfStore(config=conf_unsafe_file, index=conf_unsafe_file)\n conf_unsafe_file_keys = cs_conf_unsafe_file.get_all_keys()\n\n # active config file\n conf_file = filetype + configFile\n cs_conf_file = S3CortxConfStore(config=conf_file, index=conf_file)\n conf_file_keys = cs_conf_file.get_all_keys()\n\n #logic to determine which keys to merge.\n keys_to_overwrite = []\n for key in conf_new_sample_keys:\n #If key is marked for unsafe then do not modify/overwrite.\n if key in conf_unsafe_file_keys:\n continue\n #if key not present active config file then add it\n # (this will also add and hence effectively overwrite keys removed in above [] handing\n # and hence will always result in overwrite for these keys from the new sample file).\n if key not in conf_file_keys:\n keys_to_overwrite.append(key)\n #if key is not unsafe and value is not changed by user then overwrite it.\n elif cs_conf_file.get_config(key) == cs_conf_old_sample.get_config(key):\n keys_to_overwrite.append(key)\n #if user has changed the value of the key then skip it.\n else:\n continue\n\n cs_conf_file.merge_config(source_index=conf_new_sample, keys_to_include=keys_to_overwrite)\n cs_conf_file.save_config()\n Log.info(f'config file {str(configFile)} upgrade completed')", "def update_tempest_conf_file(conf_file, rconfig):\n with open(TempestCommon.tempest_conf_yaml, encoding='utf-8') as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def update_cfg_file(cfg, scoring, logr):\n cfg[SCORER][SCORER_PATH] = SCORING_PATHS.get(scoring)\n cfg[SCORER][SCORER_OPTIONS] = SCORING_ATTRS.get(scoring).get(logr)\n return cfg", "async def setconfigfile(self, ctx, *, config_file):\n self.settings.setConfigFile(config_file)\n await ctx.send(inline('Done'))", "def load(self):\n try:\n _config_file = open(self.config, 'r+')\n data = json.loads(_config_file.read())\n except (ValueError, IOError):\n data = {}\n\n self.update(data)", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def update_config(self, config):\n self.config = {\n \"key\": \"\",\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['github_api_key']", "def modify_config_file(config_file, search_config, replace_config):\n with open(config_file, 'r+') as f:\n content = f.read()\n f.seek(0)\n f.write(content.replace(search_config, replace_config))\n f.truncate()\n f.close()", "def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir", "def _update_cfg_from_files(self, files):\n\t\tfor file in files:\n\t\t\twith open(self.SettingsFolder + file) as f:\n\t\t\t\tself._add_cfg_to_list(file[:-4], yaml.load(f))", "def _update_cloudwatch_config(self, config_type):\n param_name = self._get_ssm_param_name(config_type)\n cw_config_ssm = self._set_cloudwatch_ssm_config_param(\n param_name, config_type)\n cur_cw_config_crc = self._sha1_hash_file(config_type)\n ssm_cw_config_crc = self._sha1_hash_json(cw_config_ssm)\n # check if user updated cloudwatch related config files.\n # if so, perform corresponding actions.\n if cur_cw_config_crc != ssm_cw_config_crc:\n logger.info(\n \"Cloudwatch {} config file has changed.\".format(config_type))\n self.CLOUDWATCH_CONFIG_TYPE_TO_UPDATE_CONFIG_FUNC.get(\n config_type)()", "def change_pwds(config):\n if not config:\n click.echo(help_msg)\n return\n try:\n user_config = imp.load_source('config', config)\n except IOError as e:\n click.echo(\"File %s not found.\" % config)\n logger.error(\"Invalid path to config file: %s\" % e)\n except Exception as e:\n click.echo(\"Ooups. Something went wrong.\")\n click.echo(e)\n logger.critical(\"%s\" % e)\n else:\n for i in dir(config_f):\n if not i.startswith(\"__\"):\n try:\n user_config.__dict__[i]\n except KeyError:\n user_config.__dict__[i] = config_f.__dict__[i]\n handle_exceptions(main, user_config)", "def reload_ini(self):\n while True:\n\n if round(os.path.getmtime(self.config_file)) > self.config_last_modified:\n print('Config Changes Detected, Reloading .ini File')\n config = configparser.ConfigParser()\n config.read(self.config_file)\n self._set_ini_options(config)\n self.config_last_modified = round(os.path.getmtime(self.config_file))\n\n time.sleep(3)", "def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)", "def update_dashboard_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if env.get('DASHBOARD_URL'):\n if not rconfig.has_section('dashboard'):\n rconfig.add_section('dashboard')\n rconfig.set('dashboard', 'dashboard_url', env.get('DASHBOARD_URL'))\n else:\n rconfig.set('service_available', 'horizon', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def _on_config_changed(self, _):\n self._configure_pod()", "def update_local(self, name, file=None):\n self._recent_caller = name\n self._local = {}\n if file is None:\n file = self._findConfigPath(name)\n\n if file and os.path.exists(file):\n config = CustomConfigObj(file,\n encoding='UTF8',\n parent_config=self._main_config)\n\n self._update(self._local, config)\n return True", "def configure(self, config: dict):\n self.config.update(config)", "def update(self, config):\n # find keys are in config but not in self.config\n extra_keys = set(config.keys()) - set(self.config.keys())\n if len(extra_keys) > 0:\n raise ValueError(\"keys {} in config are not in Config.config\".format(extra_keys))\n # update self.config by config\n else:\n self.config.update(config)", "def update(self):\n # TO DO for updating urls if changed\n pass", "def save(self):\n if self.changed:\n logger.info(\"Overwriting Redis config\")\n self.client.config_rewrite()\n self.changed = False", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))", "def update_config_file(commands_to_add, commands_to_remove):\r\n\r\n # Parse the config.txt file contents\r\n config_file_contents = {}\r\n if os.path.exists(utils.CONFIG_FILE_PATH):\r\n config_file_string = utils.get_config_file()\r\n first_line = True\r\n for line in config_file_string.split('\\n'):\r\n if first_line:\r\n first_line = False\r\n continue\r\n if not line.strip(): continue\r\n if not line.startswith('#'):\r\n line = line.split('\\t')\r\n config_file_contents[line[0]] = line\r\n\r\n # Remove the specified contents\r\n for cmd in commands_to_remove:\r\n config_file_contents.pop(cmd, None)\r\n\r\n # Add the specified contents\r\n for cmd in commands_to_add:\r\n config_file_contents[cmd] = [cmd] + ['none']*3\r\n\r\n # Archive old config.txt\r\n if os.path.exists(utils.CONFIG_FILE_PATH):\r\n current_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\r\n obs_ending = '-obsolete-{0}.txt'.format(current_time)\r\n obs_path = obs_ending.join(utils.CONFIG_FILE_PATH.rsplit('.txt',1))\r\n os.rename(utils.CONFIG_FILE_PATH, obs_path)\r\n\r\n # Print new sorted config.txt\r\n out_handle = open(utils.CONFIG_FILE_PATH, 'w')\r\n out_handle.write('cmd_name\\texecute\\tload_module\\tunload_module')\r\n out_handle.write('\\n')\r\n for cmd, line in sorted(config_file_contents.iteritems()):\r\n out_handle.write('\\t'.join(line))\r\n out_handle.write('\\n')\r\n out_handle.close()", "def update_config(doc, signum):\n log = logging.getLogger(__name__)\n log.info('Caught signal %d (%s). Reloading configuration.', signum, '/'.join(SIGNALS_INT_TO_NAME[signum]))\n if not GLOBAL_MUTABLE_CONFIG['--config']:\n log.warning('No previously defined configuration file. Nothing to read.')\n return\n\n # Read config.\n try:\n config = _get_arguments(doc)\n except DocoptcfgFileError as exc:\n logging.getLogger(__name__).error('Config file specified but invalid: %s', exc.message)\n return\n\n # Resolve relative paths.\n _real_paths(config)\n\n # Validate.\n try:\n _validate_config(config)\n except ConfigError:\n return\n\n # Update.\n GLOBAL_MUTABLE_CONFIG.update(config)\n\n # Re-setup logging.\n setup_logging(GLOBAL_MUTABLE_CONFIG)\n log.info('Done reloading configuration.')", "def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()", "def init_config(self):\n # self.config.read(self.cnfgfile)\n if not self.config.has_section(VERSION_SECTION):\n self.config.add_section(VERSION_SECTION)", "def configfile(self, fp):\n if not self.modifier.skip_configfile:\n if os.path.exists(fp):\n self.configfiles.append(fp)\n c = snakemake.io.load_configfile(fp)\n update_config(self.config, c)\n if self.overwrite_config:\n logger.info(\n \"Config file {} is extended by additional config specified via the command line.\".format(\n fp\n )\n )\n update_config(self.config, self.overwrite_config)\n elif not self.overwrite_configfiles:\n fp_full = os.path.abspath(fp)\n raise WorkflowError(\n f\"Workflow defines configfile {fp} but it is not present or accessible (full checked path: {fp_full}).\"\n )\n else:\n # CLI configfiles have been specified, do not throw an error but update with their values\n update_config(self.config, self.overwrite_config)", "def reload(self):\n self.load_config()\n # Seems we need to explicitly refresh this\n if self.main_instance:\n self.main_instance.config = self.config", "def _load_conf(self, conf):\n f = open(self.file, \"w\")\n f.write(conf)\n f.close()", "def config(self, config_dict):\r\n self._cfg.config = config_dict", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)" ]
[ "0.8264877", "0.7606795", "0.74441475", "0.726482", "0.7236753", "0.72217864", "0.7091417", "0.7060331", "0.70171785", "0.70084494", "0.6937647", "0.69036394", "0.6887495", "0.68652135", "0.6852088", "0.6795112", "0.679408", "0.67330885", "0.6727831", "0.6705917", "0.66979545", "0.6675949", "0.66608924", "0.66542983", "0.6618892", "0.65922624", "0.6573851", "0.6571257", "0.6559911", "0.65597725", "0.65567434", "0.6554245", "0.6543328", "0.6535085", "0.6530818", "0.64974326", "0.64898056", "0.6485235", "0.64847267", "0.64548755", "0.6454667", "0.6451614", "0.6450422", "0.64390045", "0.64377356", "0.6428822", "0.6420591", "0.6417948", "0.6410888", "0.6397023", "0.6397023", "0.63952154", "0.63882166", "0.6377484", "0.63772804", "0.6377112", "0.6373792", "0.637171", "0.6366796", "0.63563544", "0.6341555", "0.63331044", "0.63302946", "0.6325132", "0.6317451", "0.63149947", "0.6299498", "0.6295931", "0.6295813", "0.6282219", "0.6272947", "0.6271646", "0.62679833", "0.62666065", "0.62409", "0.6239175", "0.6238545", "0.6224374", "0.62176615", "0.6215808", "0.6214888", "0.6210739", "0.62070346", "0.6193919", "0.61893666", "0.6187311", "0.618511", "0.6181811", "0.6173924", "0.61705506", "0.6170119", "0.6159923", "0.61452854", "0.61448556", "0.6143913", "0.6135089", "0.61108994", "0.6107523", "0.61063075", "0.60997254" ]
0.88768095
0
FILL COLUMN2 WITH MOST LIKELY VALUES BASED ON COLUMN1
def fillgaps(column1,column2,train,test): ddict={} d1=test[[column1,column2]].dropna().values d2=train[[column1,column2]].dropna().values c1=np.array(d1[:,0].tolist()+d2[:,0].tolist()) c2=np.array(d1[:,1].tolist()+d2[:,1].tolist()) for ic1 in np.unique(c1): ddict[ic1]=(c2[c1==ic1].mean(),c2[c1==ic1].std()) full_data = [train, test] for dataset in full_data: for missing in np.where(np.isnan(dataset[column2]))[0]: m,s=ddict[dataset[column1][missing]] if s<=0: dataset[column2][missing]=m else: dataset[column2][missing]=np.random.normal(loc=m,scale=s,size=1) return (train,test)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fill_col1_val_where_col2_notna(col1, col2, val):\n fill_ser = col1.copy()\n fill_ser[col2.notna()] = val\n return col1.fillna(fill_ser)", "def fill_col(col, x):\n col.append(x)\n return col", "def merge(line):\n #Step1. Putting 0 to the end of the list.\n result = []\n for cell in line:\n if cell != 0:\n result.append(cell)\n for cell in range(line.count(0)):\n result.append(0)\n #Step2. Replaced with a tile of twice the value and a zero tile\n for cell in range(len(result)-1):\n if result[cell] == result[cell+1] and len(result) != 1:\n result[cell] += result[cell]\n result[cell+1] = 0\n #Step3. Repeat step1\n final_result = []\n for cell in result:\n if cell != 0:\n final_result.append(cell)\n for cell in range(result.count(0)):\n final_result.append(0)\n return final_result", "def fill_cells_to_the_right(nonogram, row, col):\n sth_changed = False\n\n # leeway stores a number of fillable cells to the left\n # -1 at the end returns length of line, when there is no true empty cell\n left_cells = nonogram.data.get_row(row)[:col]\n leeway = (left_cells[::-1]+[-1]).index(-1)\n\n block_length = find_min_block_length(nonogram, row, col)\n\n # filling cells enforced by minimal block length\n for position in range(col + 1, col + block_length - leeway):\n nonogram.fill_cell(row, position, 1)\n sth_changed = True\n\n return sth_changed", "def modify_d2(d1, d2):\n val_list = [i for i in d2.keys()]\n \n for key in val_list:\n for i in range(len(d2[key])):\n try:\n val = d1[d2[key][i][2]]\n d2[key][i][2] = val\n if None in d2[key][i]:\n d2[key][i].remove(None)\n except:\n pass\n return d2", "def fill_cells_to_the_left(nonogram, row, col):\n sth_changed = False\n\n # leeway stores a number of fillable cells to the right\n # -1 at the end returns length of line, when there is no true empty cell\n right_cells = nonogram.data.get_row(row)[col+1:]\n leeway = (right_cells + [-1]).index(-1)\n\n block_length = find_min_block_length(nonogram, row, col)\n\n # filling cells enforced by minimal block length\n for position in range(col + leeway + 1 - block_length, col):\n nonogram.fill_cell(row, position, 1)\n sth_changed = True\n\n return sth_changed", "def normalize(column):\n value_set = set(column)\n unique_count = len(value_set)\n if unique_count == 1:\n # skip everything in this column. \n return []\n elif unique_count == 2:\n zero = list(value_set)[0]\n one = list(value_set)[1]\n normalized_column = []\n for value in column:\n normalized_column.append(1 if value == one else 0)\n return [normalized_column]\n else: \n all_values = list(value_set)\n normalized_column = []\n\n # expand into multiple columns \n for index in range(len(all_values)):\n normalized_column.append([])\n\n for value in column:\n for index in range(len(all_values)):\n normalized_column[index].append(1 if value == all_values[index] else 0)\n \n return normalized_column", "def merge(line):\r\n # Create a copy of the input list line\r\n list_copy=[]\r\n #adding the none zero elements of line to list_copy\r\n for dummy_i in range(len(line)):\r\n if line[dummy_i] != 0:\r\n list_copy.append(line[dummy_i])\r\n # adding the appropriate number of zeros to match the length of list_copy and line\r\n for dummy_j in range(len(list_copy),len(line)):\r\n list_copy.append(0)\r\n \r\n # merging the tiles that have the same value\r\n for dummy_k in range(len(list_copy)-1):\r\n # checking for equal values of the adjacent tiles \r\n if list_copy[dummy_k]!=0 and list_copy[dummy_k]==list_copy[dummy_k+1]:\r\n # if equal double the value of the first tile and assign zero to second tile\r\n list_copy[dummy_k]=2*list_copy[dummy_k]\r\n list_copy[dummy_k+1]=0\r\n \r\n #shifting the rest of the values ot the tiles by one place\r\n for dummy_p in range(dummy_k+1,len(list_copy)-1):\r\n list_copy[dummy_p]=list_copy[dummy_p+1]\r\n if (len(line)>3):\r\n list_copy[-2]=list_copy[-1]\r\n list_copy[-1]=0\r\n # returning list_copy which is the answer\r\n return list_copy", "def fill_hom(patient, gene):\n\n first = 'HR_' + patient + '_First_' + gene + '_Split'\n second = 'HR_' + patient + '_Second_' + gene + '_Split'\n\n for column in data.columns:\n f = re.match(second, column)\n if f:\n data[second] = data[second].fillna(data[first])\n else:\n pass", "def fill_data(column, data):\n data[column].fillna(data[column].value_counts().index[0], inplace=True)", "def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1", "def _update_context_no_unique_values(metadata, column, unique_values):\r\n\r\n return None", "def merge(line):\n line2=[]\n line3=[]\n line4=[]\n pair=0\n shift=0\n line1=[0]*len(line)\n if(len(line)==1):\n for iota in line:\n line1[0]=iota\n return line1\n \n for iota in xrange(len(line)):\n line4.append(line[iota])\n \n for iota in xrange(len(line)):\n line3.append(line[iota])\n \n \n \n for xinn in xrange(len(line3)):\n for iota in xrange(len(line3)-1):\n if(line3[iota]==0):\n if((line3[iota+1])>0):\n temp=line3[iota];\n line3[iota]=line3[iota+1];\n line3[iota+1]=temp\n shift=1\n xinn=xinn+1\n \n \n if(shift==1):\n for iota in xrange(len(line3)):\n line2.append(line3[iota])\n else:\n for iota in xrange(len(line4)):\n line2.append(line4[iota])\n \n \n \n \n \n \n \n for olay in range(len(line2)-1):\n \n \n if(line2[olay]==line2[olay+1]):\n line1[olay]=2*line2[olay];\n line2[olay+1]=0\n line1[olay+1]=line2[olay+1]\n pair=1;\n olay=olay+2\n else:\n line1[olay]=line2[olay]\n line1[olay+1]=line2[olay+1]\n \n \n \n \n \n \n \n \n \n \n if(pair==0):\n for lonn in xrange(len(line3)):\n line1[lonn]=line3[lonn]\n return line1\n \n \n \n for xinn in xrange(len(line1)):\n for iota in xrange(len(line1)-1):\n if(line1[iota]==0):\n if((line1[iota+1])>0):\n temp=line1[iota];\n line1[iota]=line1[iota+1];\n line1[iota+1]=temp\n \n xinn=xinn+1\n \n return line1", "def switchColumn(data_file, column1, column2):\n\tdata = []\n\tfor dataLine in readData(data_file):\n\t\ttmp = dataLine[column1-1]\n\t\tdataLine[column1-1] = dataLine[column2-1]\n\t\tdataLine[column2-1] = tmp\n\t\tdata.append(dataLine)\n\twriteData(data_file, data)", "def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)", "def fillna_negtive1(df, target=None):\n if not target:\n target = ['price', 'image_top_1']\n for col in target:\n df[col] = df[col].fillna(-1)\n return None", "def _exch(self, ix_1, ix_2):\n tmp = self._vals[ix_1]\n self._vals[ix_1] = self._vals[ix_2]\n self._vals[ix_2] = tmp", "def addColumnValues(self, column):\n nr1 = self.data.shape[1]\n nr = len(column)\n if nr1 == 0:\n # case 1: empty table\n if nr == 0:\n # case 1a: we're just adding a name\n self.data = numpy.reshape(self.data, (1, 0))\n pass\n else:\n # case 1b: we're adding a column of values\n self.data = numpy.reshape(numpy.array(column), (1, nr))\n pass\n pass\n else:\n # case 2: non-empty table\n if nr1 > 0 and nr != nr1:\n raise Exception(\"New column must have the same length as existing ones %s %s\"%(nr1,nr))\n new_column = numpy.reshape(numpy.array(column), (1, nr))\n self.data = numpy.concatenate((self.data, new_column))\n pass\n return", "def _merge_row(self, row1, row2):\n\n duprow = list(row1)\n duprow.extend(list(row2))\n row1.clear()\n overlap_map = {}\n\n for body, overlap in duprow:\n if body not in overlap_map:\n overlap_map[body] = 0\n overlap_map[body] += overlap\n\n for body, overlap in overlap_map.items():\n row1.add((body, overlap))", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def assemble_col(c1, c2):\n c1.extend(c2)\n return c1", "def fill_row(row, x):\n row.append(x)\n return row", "def merge(line):\r\n setup_line = line[:]\r\n new_line = []\r\n for num in setup_line:\r\n if num == 0:\r\n setup_line.remove(0)\r\n setup_line.append(0)\r\n \r\n setup_line.append(0)\r\n setup_line.append(0)\r\n setup_line.append(0)\r\n setup_line.append(0)\r\n \r\n for itr in range(len(line)):\r\n if setup_line[0] == setup_line[1] and setup_line[0] != 0:\r\n new_line.append(setup_line[0] * 2)\r\n setup_line.remove(setup_line[0])\r\n setup_line.remove(setup_line[0])\r\n else:\r\n new_line.append(setup_line[0])\r\n setup_line.remove(setup_line[0])\r\n new_line.append(0)\r\n \r\n for itr in range(len(new_line)):\r\n for num in new_line[len(line):]:\r\n if num == 0:\r\n new_line.remove(0)\r\n \r\n return new_line", "def merge(line):\n lst = [0] * len(line) # we start with a 0-filled list.\n pos = 0 # index position in the new list\n pvl = 0 # we keep the previous value\n for val in line:\n if val: # we only care about the non zero values.\n if not pvl: # this tile is empty\n lst[pos] = val # let's fill with val\n pvl = val\n elif pvl - val: # different non zero values?\n pos += 1\n lst[pos] = val # tiles don't merge\n pvl = val\n else: # same values!\n lst[pos] <<= 1 # it merges!\n pos += 1\n pvl = 0 # next value is 0\n return lst", "def merge(line):\n # replace with your code\n result = []\n for index in range(len(line)):\n result.append(0)\n result = shift_down(line, result)\n for index in range(len(result) - 1):\n if result[index] == result[index + 1]:\n result[index] *= 2\n result[index + 1] = 0\n result = shift_down(result, result)\n return result", "def merge_right(row):\r\n row1 = reverse(row)\r\n row2 = add_tiles(row1)\r\n row3 = reverse(row2)\r\n row = row3\r\n\r\n return row", "def merge(line):\r\n origin_len = len(line)\r\n new_line = list(line)\r\n empty_space = 0\r\n # remove zero\r\n while empty_space in new_line:\r\n new_line.remove(0)\r\n # merge\r\n tile_cursor = 0\r\n for dummy_count in range(len(new_line) - 1):\r\n if tile_cursor >= (len(new_line) - 1):\r\n break\r\n elif new_line[tile_cursor] == new_line[tile_cursor + 1]:\r\n new_line[tile_cursor] = 2 * new_line[tile_cursor]\r\n new_line[tile_cursor + 1] = 0\r\n tile_cursor = tile_cursor + 2\r\n else:\r\n tile_cursor += 1\r\n \r\n #remove zero\r\n while empty_space in new_line:\r\n new_line.remove(0)\r\n list_zero = [0] * (origin_len - len(new_line))\r\n new_line.extend(list_zero)\r\n \r\n return new_line", "def fill_league_currency(self, df, latest_currency_list):\n league_currency_list = [currency[0] for currency in df.columns]\n for lastest_currency in latest_currency_list:\n if lastest_currency not in league_currency_list:\n df[lastest_currency, df.columns[0][1]] = np.nan\n df = df.sort_index(axis=1)\n return df", "def merge(line):\n # create a list of non zero values from input\n input_size = len(line)\n line = [dummy_value for dummy_value in line if dummy_value > 0]\n \n # create an output list of same length as input with zero values\n line2 = [0] * input_size\n \n #update the output list with the non zero input list based on certain conditions\n line2[0:len(line)] = line\n \n pos = [dummy_no for dummy_no in range(0, len(line2))]\n \n for jos in pos[0:input_size -1]:\n if line2[jos] == line2[pos[jos+1]]:\n line2[jos] = line2[jos] + line2[pos[jos+1]]\n line2[jos+1] = 0\n \n # repeat last two steps above\n # create an output list of same length as input with zero values\n line2 = [dummy_val for dummy_val in line2 if dummy_val > 0]\n \n # create an output list of same length as input with zero values\n line3 = [0] * input_size\n \n #update the output list with the non zero input list \n line3[0:len(line2)] = line2\n \n return line3", "def fill_missing_admission_type(df):\n for admit_type in df[\"admission_type\"].unique():\n type_facilities = df[df[\"admission_type\"] == admit_type][\"facility\"].unique()\n\n df[\"admission_type\"] = np.where(\n (df[\"admission_type\"].isnull() & df[\"facility\"].isin(type_facilities)),\n admit_type,\n df[\"admission_type\"],\n )\n\n return df", "def merge(line):\r\n result_list = [0] * len(line)\r\n medium_list = []\r\n zero = 0\r\n \r\n for non_zero in range(len(line)):\r\n if line[non_zero] > 0:\r\n medium_list.append(line[non_zero])\r\n \r\n for position in range(len(medium_list)):\r\n result_list.pop(position)\r\n result_list.insert(position, medium_list[position])\r\n \r\n \r\n for tile in range(len(result_list)):\r\n if tile + 1 > len(result_list) -1:\r\n break\r\n if result_list[tile] == result_list[tile + 1]:\r\n result_list[tile] *= 2\r\n result_list[tile+1] = 0\r\n \r\n \r\n while zero in result_list:\r\n result_list.remove(zero) \r\n \r\n \r\n while len(result_list) != len(line):\r\n result_list.append(zero)\r\n \r\n \r\n return result_list", "def _rebuild_compareset(self, result, rewrapped_columns, columns):\n normalize = lambda x: x if (isinstance(x, str) or not x) else tuple(x)\n rewrapped_columns = normalize(rewrapped_columns)\n columns = normalize(columns)\n\n if rewrapped_columns == columns:\n return result # <- EXIT!\n\n missing = self._missing\n def rebuild(x):\n lookup_dict = dict(zip(rewrapped_columns, x))\n return tuple(lookup_dict.get(c, missing) for c in columns)\n return CompareSet(rebuild(x) for x in result)", "def merge(self, nums1, m, nums2, n):\n nums1.extend([0]*n)\n j=0\n for i in range(len(nums1)):\n if nums2[j]<nums1[i]:\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1", "def expand_df(df, column):\n expanded2 = pd.DataFrame({\n col: np.repeat(df[col].values, df[column].str.len())\n for col in df.columns.drop(column)}\n ).assign(**{column: list(np.concatenate(df[column].values))})\n return expanded2", "def right_to_left(data, key, col_tuple, col_tuples, re_tuples,\n re_triple, cnt):\n first_col = col_tuples[0]\n second_col = col_tuples[1]\n if col_tuple == first_col or col_tuple == second_col:\n if col_tuple == first_col:\n data[\"tmp_0\"] = data[col_tuple[1]].astype(str).str.extract(re_triple[cnt][0].format(key),\n expand=True)\n else:\n data[\"tmp_0\"] = data[col_tuple[1]].astype(str).str.extract(re_triple[cnt][0],\n expand=True)\n data[\"tmp_1\"] = data[\"tmp_0\"].str.extract(re_triple[cnt][1],\n expand=True)\n if col_tuple == first_col:\n regex = re_triple[cnt][2].format(key)\n data[\"tmp_0\"] = data[\"tmp_0\"].str.extract(regex, expand=True)\n else:\n regex = re_triple[cnt][2]\n data[\"tmp_0\"] = data[\"tmp_0\"].str.extract(regex, expand=True)\n tmp_0_clean = data[\"tmp_0\"].str.strip()\n data[col_tuple[0]] = data[col_tuple[0]].fillna(tmp_0_clean)\n if col_tuple == first_col:\n data[col_tuple[1]] = data[col_tuple[1]].replace(re_triple[cnt][0].format(key),\n np.NaN, regex=True)\n else:\n data[col_tuple[1]] = data[col_tuple[1]].replace(re_triple[cnt][0][:-15]+\")\",\n np.NaN, regex=True)\n data[col_tuple[1]] = data[col_tuple[1]].replace(re_triple[cnt][0],\n np.NaN, regex=True)\n tmp_1_clean = data[\"tmp_1\"].str.strip()\n data[col_tuple[1]] = data[col_tuple[1]].fillna(tmp_1_clean)\n data = data.drop(columns=\"tmp_0\")\n data = data.drop(columns=\"tmp_1\")\n return data", "def map (a_data,a_column,a_old,a_new) :\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)", "def fix_null_vals(dataset):\n\tprint(\"\\tFixing null values\")\n\n\tif not dataset.isnull().any().any():\n\t\treturn dataset\n\telse:\n\t\treturn dataset.fillna(method=\"ffill\")", "def combine_columns(allowed_columns):\n\n v_columns = [v for v in allowed_columns if v in df.columns]\n v_columns.sort()\n for i in range(1, len(v_columns)):\n df[v_columns[0]] = df[v_columns[0]].fillna(df[v_columns[i]])\n df.drop(v_columns[i], 1, inplace=True)\n return v_columns[0]", "def merge(self, nums1, m, nums2, n):\n n=len(nums1)\n j=0\n for i in range(n):\n if nums2[j]<nums1[i] and nums2[j]<len(nums2):\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1\n if nums1[i]==0 and nums2[j]<len(nums2):\n nums1[i]=nums2[j]", "def fill_end_of_the_row(nono, row):\n ending = nono.limits.get_row_endings(row, -1)\n sth_changed = fill_range_in_row(nono, row,\n range(ending + 1, nono.meta_data.n_cols),\n -1)\n return sth_changed", "def _propagate_duplicate_cols(self, duplicate_cols):\n for duplicate in duplicate_cols:\n no_suffix = \"_\".join(duplicate.split(\"_\")[:-1])\n null_idx = self._hybrid_meta[no_suffix].isnull()\n non_null_vals = self._hybrid_meta.loc[null_idx, duplicate].values\n self._hybrid_meta.loc[null_idx, no_suffix] = non_null_vals", "def merge(line):\n result = [0 for idx in range(len(line))]\n count = 0\n # push all non-zero values to the left of the tile\n for num in line:\n if num != 0:\n result[count] = num\n count += 1\n # merge adjacent values only once left to right\n for idx in range(len(list(result))-1):\n if result[idx] == result[idx+1]:\n result[idx] = result[idx] + result[idx+1]\n result.pop(idx+1)\n result.append(0)\n return result", "def map_value(self, df, from_column, to_column, value = None,\n values_map_column = None, values_map = None):\n if from_column not in df.columns:\n return df\n\n if value:\n df[to_column] = np.where(df[from_column].notnull(), value, None)\n elif values_map_column and values_map:\n #add unit value regardless if there is a measure value\n df[to_column] = df[values_map_column].map(values_map)\n #reset all unit values where there's no corresponding measure\n df[to_column] = df[to_column].mask(df[from_column].isnull(), None)\n\n return df", "def _prepare_data_to_aug(\n self, col: pd.Series, freq=0.2\n ) -> Tuple[pd.Series, pd.Series]:", "def reduce_possibilities_by_column(self):\n y = self.targetCell.y\n for i in range(1,10): #content\n for n in range(9): #x-coord adjacent cells\n neighbour_cell = self.puzzleGrid.grid[n][y]\n if self.targetCell != neighbour_cell:\n self.targetCell.column_neighbour_possibilities.append( neighbour_cell.possibilities)\n if str(i) == neighbour_cell.finalNumber:\n self.RemovePossiblityFromTargetCell(i)\n self.targetCell.column_neighbour_possibilities = flatten_list(self.targetCell.column_neighbour_possibilities)", "def acumula_x2(self, x2, rodada):\n self.x2[rodada].append(x2)", "def fill_with_mode(filename, column):\r\n df=None\r\n df=pd.read_csv(filename)\r\n df[column].fillna(df[column].mode()[0], inplace=True)\r\n return df", "def concat(column_based_table_1: dict[str, list[str]], column_based_table_2: dict[str, list[str]]) -> dict[str, list[str]]:\n combined_data_table: dict[str, list[str]] = {}\n for column in column_based_table_1:\n combined_data_table[column] = column_based_table_1[column]\n keys_list = list(combined_data_table.keys())\n for column in column_based_table_2:\n if column in keys_list:\n column_data = combined_data_table[column]\n column_data_2 = column_based_table_2[column]\n # append to list\n for item in column_data_2:\n column_data.append(item)\n combined_data_table[column] = column_data\n else:\n combined_data_table[column] = column_based_table_2[column]\n return combined_data_table", "def merge(line):\n length = len(line)\n merge_line = [element for element in line if element != 0]\n \n for index in range(0, len(merge_line)-1):\n if merge_line[index] == merge_line[index+1]:\n merge_line[index] *= 2\n merge_line[index+1] = 0\n merge_line = [element for element in merge_line if element != 0]\n while len(merge_line) != length:\n merge_line.append(0)\n \n return merge_line", "def ffill(data):\n last = data[0]\n new = []\n for line in data:\n if line:\n new.append(line)\n last = line\n else:\n new.append(last)\n return new", "def fill_beggining_of_the_row(nonogram, row):\n origin = nonogram.limits.get_row_origins(row, 0)\n sth_changed = fill_range_in_row(nonogram, row, range(origin), -1)\n return sth_changed", "def fill(self, value):\n if self.fragmented:\n (self[self._begin:].view(ndarray)).fill(value)\n (self[:self._end].view(ndarray)).fill(value)\n else:\n if self._begin < self._end:\n part = self[self._begin:self._end]\n elif self._end == 0:\n part = self[self._begin:]\n\n (part.view(ndarray)).fill(value)", "def augment_column(self, col: pd.Series,) -> pd.Series:", "def merge(self, nums1, m, nums2, n):\n nums1.extend([0]*len(nums2))\n j=0\n for i in range(len(nums2)):\n if nums2[i]<nums1[j]:\n nums1.pop()\n print(nums1)\n nums1.insert(j,nums2[i])\n j=j+1", "def fix_values(df, col):\n broken_values = [value for value in df[col]]\n fixed_values = []\n for value in broken_values:\n fixed_values.append(int(value.replace(',','')\n .replace('$','')))\n df[col] = fixed_values", "def fill_range_in_row(nonogram, row, cols, value):\n sth_changed = False\n\n for col in cols:\n change = nonogram.fill_cell(row, col, value)\n sth_changed = sth_changed or change\n\n return sth_changed", "def fill(self, recoValue, l1Value, weight=1.):\n self._total.fill(recoValue, weight)\n self._dist.fill(l1Value, weight)\n if l1Value > self._threshold:\n self._pass.fill(recoValue, weight)", "def island_fill(i: int, j: int, islandMatrix: List[List[int]]) -> List[List[int]]:\n q = [(i, j)]\n while q:\n element = q.pop()\n if islandMatrix[element[0]][element[1]] == 1:\n for e in neighbourhood(element[0], element[1], islandMatrix):\n q.append(e)\n islandMatrix[element[0]][element[1]] = 2\n return islandMatrix", "def merge(self, nums1, m, nums2, n):\n n0=len(nums1)-n\n j=0\n for i in range(n0):\n while nums2[j]<nums1[i]:\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1", "def _switch_column_values(self, row, column_target):\n cols = row.index.tolist()\n column_target_index = cols.index(column_target)\n\n if column_target_index == 0:\n column_source_index = 1\n elif column_target_index == len(cols) - 1:\n column_source_index = column_target_index - 1\n else:\n column_source_index = column_target_index - 1\n\n column_source = cols[column_source_index]\n replace_value = row[column_source]\n if self.log:\n print(\">>> Replacing values between {}: {} and {}: {}\".format(column_target, row[column_target]\n , column_source, replace_value))\n row[column_source] = row[column_target]\n row[column_target] = replace_value\n return row", "def DealWithMissingValues(data_set: pd.DataFrame):\n data_set.fillna(method=\"pad\", inplace=True)", "def set_cell_by_index(self, column_index, cell):\n while len(self) <= column_index:\n self.append(None)\n self[column_index] = cell", "def fill_between_the_blocks(nonogram, row):\n hints = nonogram.data.get_row_hints(row)\n endings = nonogram.limits.get_row_endings(row)\n origins = nonogram.limits.get_row_origins(row)\n\n sth_changed = False\n\n for i in range(len(hints) - 1):\n cols = range(endings[i] + 1, origins[i+1])\n changed = fill_range_in_row(nonogram, row, cols, -1)\n sth_changed = sth_changed or changed\n\n return sth_changed", "def fill_valid2(df):\n for idx, row in df.iterrows():\n if pd.isnull(row['cid']) &\n (pd.isnull(row['byr']) |\n pd.isnull(row['iyr']) |\n pd.isnull(row['hgt']) |\n pd.isnull(row['hcl']) |\n pd.isnull(row['ecl']) |\n pd.isnull(row['pid']) |\n pd.isnull(row['hgt'])):\n df['valid'][idx] = 0", "def generate_column(values, previous_table):\n\n # result = []\n\n for i in range(0, len(previous_table)):\n # current_item = previous_table[i] + values[i]\n # result.append(current_item)\n values[i][0:0] = previous_table[i]\n\n # return result\n print \"Column(s) attached to result\"\n return values", "def merge(self, nums1, m, nums2, n):\n n0=len(nums1)-n\n j=0\n for i in range(n0):\n if nums2[j]<nums1[i]:\n nums1.remove(0)\n nums1.insert(i,nums2[j])\n j=j+1", "def fill_missing(self):\n df = self.df\n # Filling with default values\n logger.debug(\"Filling from distributions...\")\n for field in HeatStrokeDataFiller.default_map or field in HeatStrokeDataFiller.positive_default:\n if field not in df.columns:\n logger.warning(\"(%s) missing from data-frame columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to default: %s\" % (field, HeatStrokeDataFiller.default_map[field]))\n default_value = HeatStrokeDataFiller.default_map[field]\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=False)\n how_many_to_fill = np.sum(where)\n if field in HeatStrokeDataFiller.positive_default:\n # Use default positive dietributions\n distribution = HeatStrokeDataFiller.positive_default[field]\n df[field].loc[where] = distribution(how_many_to_fill)\n else:\n logger.debug(\"Using default %s for field: %s\" % (default_value, field))\n # Use default values\n df[field].loc[where] = np.array([default_value] * how_many_to_fill)\n\n # Filling with Zeros\n logger.debug(\"Fillling with zeros...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_zero:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from columns\" % field)\n continue\n logger.debug(\"Setting missing in \\\"%s\\\" to 0\" % field)\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = np.zeros(how_many_to_fill)\n\n # Filling in columns with the average from the rest of the column\n logger.debug(\"Filling with agerages...\")\n for field in HeatStrokeDataFiller.fields_to_fill_with_average:\n if field not in df.columns:\n logger.warning(\"\\\"%s\\\" missing from data-frame columns\" % field)\n continue\n\n where = HeatStrokeDataFiller.find_where_missing(df, field, find_nan=True, find_str=True)\n data = df[field][np.invert(where)]\n mean = np.mean(data)\n std = np.std(data)\n if mean == np.nan or std == np.nan:\n mean, std = (0, 0)\n logger.debug(\"Setting missing in \\\"%s\\\" with: %.3f +/- %.3f\" % (field, mean, std))\n how_many_to_fill = np.sum(where)\n df[field].loc[where] = mean + std * np.random.random(how_many_to_fill)\n\n fields_not_modified = set(df.columns) - set(HeatStrokeDataFiller.default_map.keys()) - HeatStrokeDataFiller.fields_to_fill_with_zero - HeatStrokeDataFiller.fields_to_fill_with_zero\n logger.debug(\"Fields not modified: %s\" % fields_not_modified.__str__())\n return df", "def mode_impute(self, column_val):\n mode = column_val.mode()[0]\n column_val = column_val.fillna(mode)\n return column_val", "def __init__(self, v1, v2):\n mergedData = []\n list(map(mergedData.extend, list(zip_longest(v1, v2))))\n self.data = list(filter(lambda x: x is not None, mergedData))\n self.index = 0", "def compress_dups(data, column):\n idx = defaultdict(list)\n for row in data:\n idx[row[column]].append(row)\n\n dedup = []\n\n for idx_row in sorted(idx.items()):\n dedup.append(avg_rows(idx_row[1]))\n return dedup", "def same_as(self, rows: List[Row], column: Column) -> List[Row]:\n return_list: List[Row] = []\n if not rows:\n return return_list\n cell_value = rows[0].values[column.name]\n for table_row in self.table_data:\n new_cell_value = table_row.values[column.name]\n if new_cell_value is None or not isinstance(new_cell_value, type(cell_value)):\n continue\n if new_cell_value == cell_value:\n return_list.append(table_row)\n return return_list", "def order_links_end_points(in_file,links_columns,links_columns_all_details,out_file):\n\n df = pd.read_csv(in_file)#.iloc[:,1:]\n # links_columns = [41,45,51,55]\n links_node_swapped_columns = links_columns[math.floor(len(links_columns)/2):] + links_columns[0:math.floor(len(links_columns)/2)]\n\n\n # links_columns_all_details = list(np.arange(41,61))\n links_node_swapped_columns_all_details = links_columns_all_details[math.floor(len(links_columns_all_details)/2):] + links_columns_all_details[0:math.floor(len(links_columns_all_details)/2)]\n\n\n for ix1, (Index, row1) in tqdm(enumerate(df.iterrows())):\n for ix2, (Index, row2) in enumerate(df[ix1+1:].iterrows()):\n\n\n if (row1[links_columns].as_matrix() == row2[links_node_swapped_columns].as_matrix()).all():\n # print('swapping',ix1,ix1 + 1 +ix2)\n # import ipdb; ipdb.set_trace()\n # print('Row2',row2)\n temp = []\n for i in range(len(links_columns_all_details)):\n\n if i < math.floor(len(links_columns_all_details)/2):\n temp.append(df.iat[ix1 + 1 + ix2, links_columns_all_details[i]])\n df.iat[ix1 + 1 + ix2, links_columns_all_details[i]] = df.iat[ix1 + 1 + ix2, links_node_swapped_columns_all_details[i]]\n else:\n df.iat[ix1 + 1 + ix2, links_columns_all_details[i]] = temp[i - math.floor(len(links_columns_all_details)/2)]\n\n # print('swapped',ix1,ix1 + 1 +ix2)\n # print('Row1', row1,'Row2', row2)\n # import ipdb; ipdb.set_trace()\n\n\n\n df.to_csv(out_file, index=False)\n\n return df", "def _apply_array_spatial12_lowfilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n return self._apply_array_spatial12_lowfilling_fast(h1e, h2e)\n else:\n return self._apply_array_spatial12_lowfilling_python(h1e, h2e)", "def clean_df(self, df, column_name):\r\n \r\n df[column_name] = df[column_name].fillna('').str.replace('\\n', ' ')\r\n return df", "def appforth(df, line):\n df.loc[-1]=line\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n return df", "def __clean_repeated_columns(self, df, column_type):\n for column in df.columns:\n if column_type in column.lower():\n # Fill main column with data from \"prefix + _\" type column names.\n df[column_type[:-1]].fillna(df[column], inplace=True)\n # Drop the \"prefix + _\" type column names.\n df.drop(column, axis=1, inplace=True)", "def fix_date_year(df, col_1, col_2):\n\n for idx, date in enumerate(df[col_1]):\n year1 = date.year\n year2 = df.loc[idx, col_2]\n if np.abs(year1-year2)>95:\n year1 -=100\n df.loc[idx, col_1]=df.loc[idx, col_1].replace(year=year1)\n return df", "def merge2(self, nums1, m, nums2, n): \n # 从nums1的第m个数字开始,用nums2的值来替换\n for i in range(n):\n nums1[m+i] = nums2[i]\n nums1.sort()", "def expand(table):\n t = []\n for r in table:\n for _ in range(r[1]):\n try:\n t.append((r[0], r[2]))\n except IndexError:\n t.append((r[0], None))\n return t", "def merge(line):\n # Iterate over input and create output with non-zeros slid to the left.\n slid = []\n for num in line:\n if num == 0:\n continue\n else:\n slid.append(num)\n for dummy_slot in range(len(line) - len(slid)):\n slid.append(0)\n\n # Iterate over slid_left and create output with tile pairs replaced with\n # a tile of twice the value and a zero tile.\n paired = []\n idx = 0\n while idx < len(slid):\n if idx == len(slid) - 1:\n paired.append(slid[idx])\n idx += 1\n elif slid[idx] == slid[idx + 1]:\n paired.append(slid[idx] * 2)\n paired.append(0)\n idx += 2\n else:\n paired.append(slid[idx])\n idx += 1\n \n # Slide the tiles in paired.\n merged = []\n for num in paired:\n if num == 0:\n continue\n else:\n merged.append(num)\n for dummy_slot in range(len(line) - len(merged)):\n merged.append(0)\n \n return merged", "def zero_one_card(df):\n unique_values = defaultdict()\n for col in df.columns:\n if df[col].nunique() < 2:\n unique_values[col] = df[col].nunique()\n if len(unique_values) > 0:\n printmd(str(\"* Columns: *\"+', '.join(list(unique_values.keys()))+\"* have less than two different values\"))\n for col in unique_values.keys():\n printmd(str('* *' + col + \"* has \" + str(df[col].nunique()) + ' differents values :' + str(df[col].unique())))\n else:\n printmd(\"* No columns have less than 2 different values\")", "def expandFromColumn(inputColumn,replaceList):\n \n import pandas as pd\n import re\n \n #necessary, due to escape nonsense\n inputColumn=inputColumn.replace(regex=True, to_replace='\\\\\\\\',value='/')\n \n replaceList['changeNum']=0\n replaceList['changeIndexes']=''\n\n for index, row in replaceList.iterrows():\n curReplaceVal=row[0]\n currentRegexExpression=re.compile(curReplaceVal)\n CurrentBoolVec=inputColumn.str.contains(currentRegexExpression,na=False)\n replaceList['changeIndexes'].iloc[index]=[i for i, x in enumerate(CurrentBoolVec) if x]\n replaceList['changeNum'].iloc[index]=len(replaceList['changeIndexes'].iloc[index])\n inputColumn=inputColumn.replace(regex=True, to_replace=currentRegexExpression,value=row[1])\n return inputColumn, replaceList;", "def merge(self, nums1, m, nums2, n):\n offset = len(nums1) - m\n i = 0\n while i < len(nums1) and nums2:\n num = nums1[i]\n num2 = nums2[0]\n\n if num2 < num:\n nums1.insert(i, num2)\n nums2.pop(0)\n i += 1\n\n if i >= len(nums1) and nums2 is not None:\n nums1[len(nums1) - offset:] = nums2\n while len(nums1) > m + n:\n nums1.pop()", "def fillup_x(self):\n assert not np.all(self.x == None)\n x_df = pd.DataFrame(self.x, columns=self.x_title)\n self.df = pd.concat([self.df, x_df], axis=1)", "def add_data_from_columns_into_rows(columns: list, fixed_rows: list):\n for column in range(len(max(columns))):\n for row in range(len(columns)):\n try:\n fixed_rows[column].append(columns[row][column])\n except IndexError:\n fixed_rows[column].append('')\n return fixed_rows", "def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n j = 0\n i = 0\n while i < m:\n if j >= n:\n break\n if nums1[i] == 0 and i >= m:\n \n nums1[i:] = nums2[j:]\n break\n else:\n if nums1[i] < nums2[j]:\n i+= 1\n else:\n\n nums1[i:] = [nums2[j]]+nums1[i:-1]\n j+=1\n i+=1\n m+=1", "def _add_column(self, column):\n if column is None:\n column = len(self._columns)\n\n if column in self._columns:\n raise ValueError(f\"Duplicate column name: {column}\")\n\n if isinstance(column, int):\n assert column >= len(self._columns)\n for empty in range(len(self._columns), column):\n self._add_column(empty)\n\n self._columns.append(column)\n for idx in self.index:\n row = self._data[idx]\n row.append(None)\n\n return len(self._columns) - 1", "def merge(line):\n # replace with your code from the previous mini-project\n l = len(line)\n s1 = [0]*l\n j = 0\n for i in range(l):\n if line[i] != 0:\n s1[j] = line[i]\n j += 1\n\n for k in range(l-1):\n if s1[k] == s1[k+1]:\n s1[k] *=2\n s1.pop(k+1)\n s1.append(0)\n\n return s1", "def _set_unique_and_null_vals(self):\n self.unique_vals = {}\n \n df_col = self.df[self.col]\n u_vals = pandas.unique( df_col[ df_col.notnull() ] )\n \n for val in u_vals:\n self.unique_vals[val] = np.where( df_col==val)[0]\n \n null_inds = np.where(self.df.isnull()[self.col]) [0]\n if null_inds.size:\n self.unique_vals['NULL__'] = null_inds", "def _insert_into_clean(self, entry):\n i = entry.hash\n new_entry = self.table[i]\n while new_entry.key is not None:\n i += self.second_hash(new_entry.key)\n new_entry = self.table[i]\n new_entry.key = entry.key\n new_entry.value = entry.value\n new_entry.hash = entry.hash\n self.used += 1\n self.filled += 1", "def fill_inside_of_the_blocks(nonogram, row):\n hints = nonogram.data.get_row_hints(row)\n endings = nonogram.limits.get_row_endings(row)\n origins = nonogram.limits.get_row_origins(row)\n\n sth_changed = False\n\n for i, hint in enumerate(hints):\n cols = range(endings[i] + 1 - hint, origins[i] + hint)\n changed = fill_range_in_row(nonogram, row, cols, 1)\n sth_changed = sth_changed or changed\n\n return sth_changed", "def merge(line):\n def combine_alike(line):\n \"\"\"\n Inner function that combines adjacent, equal numbers\n once and replaces the second number with a zero.\n \"\"\"\n for idx in range(len(line)-1):\n if line[idx] == line[idx+1] and line[idx] != 0:\n line[idx] *= 2\n line[idx+1] = 0\n return line\n \n merged = combine_alike([num for num in line if num!= 0])\n result = [num for num in merged if num!= 0]\n return result + [0]*(len(line)-len(result))", "def transform(self, X):\n\n X = X.copy()\n\n X[pd.isnull(X)] = self.fill\n\n return np.asarray(X)", "def merge(line):\n new_line = [0] * len(line)\n merged = [False] * len(line)\n pos = 0\n for item in line:\n if not item == 0:\n if new_line[pos - 1] == item and merged[pos - 1] == False:\n new_line[pos - 1] = item * 2\n merged[pos - 1] = True\n else:\n new_line[pos] = item\n pos += 1\n return new_line", "def setColumnColor(self, column, color = \"CCCCCC\"):\n\n\t\t\t\tfillObject = openpyxl.styles.PatternFill(start_color = color, end_color = color, fill_type = \"solid\")\n\n\t\t\t\tfor i, row in enumerate(self.thing.iter_rows(), start = 1):\n\t\t\t\t\tcell = self.getCell(row = i, column = column)\n\t\t\t\t\tcell.fill = fillObject", "def reset_to_last2_junction(self):\n del self._junction_index[-1]\n del self._dead_end_direction[-1]\n self.reset_to_last_junction()", "def set_column(self, column, values):\n values = to_list(values, size=self.size)\n\n if len(values) != self.size:\n raise ValueError(\n f\"Values length ({len(values)}) should match data length ({self.size})\"\n )\n\n if column not in self._columns:\n self._add_column(column)\n\n for index in self.index:\n self.set_cell(index, column, values[index])", "def filling_nan_values(df: pd.DataFrame) -> pd.DataFrame: \n ratio = df.count()/len(df) \n cols = ratio[ratio < 1].index\n for col in cols: \n print(f\"Filling Column:{col}\")\n df[col] = df[col].fillna(df[col].mean())\n return df", "def merge_columns(self_columns, other_columns):\n sorted_self_columns, sorted_other_columns = sorted(self_columns), sorted(other_columns)\n self_idx = other_idx = 0\n self_len, other_len = len(self_columns), len(other_columns)\n while self_idx < self_len and other_idx < other_len:\n curr_self_column, curr_other_column = sorted_self_columns[self_idx], sorted_other_columns[other_idx]\n if curr_self_column == curr_other_column:\n yield curr_self_column, curr_other_column\n self_idx += 1\n other_idx += 1\n elif curr_self_column < curr_other_column:\n yield curr_self_column, None\n self_idx += 1\n else:\n yield None, curr_other_column\n other_idx += 1\n while self_idx < self_len:\n yield sorted_self_columns[self_idx], None\n self_idx += 1\n while other_idx < other_len:\n yield None, sorted_other_columns[other_idx]\n other_idx += 1", "def merge(nums1, m, nums2, n):\r\n while len(nums1) != m and nums1[len(nums1)-1] == 0:\r\n nums1.pop()\r\n for each in nums2:\r\n nums1.append(each)\r\n nums1.sort()" ]
[ "0.6066896", "0.5588243", "0.5520393", "0.5153865", "0.5142474", "0.5100762", "0.50284475", "0.50032073", "0.4990765", "0.4952544", "0.49398243", "0.49170038", "0.4903504", "0.4887256", "0.48762384", "0.48469424", "0.48462567", "0.48410118", "0.47721502", "0.47698507", "0.47568175", "0.4755987", "0.47121626", "0.46946415", "0.46859962", "0.46799126", "0.4679654", "0.46685728", "0.46435165", "0.4633871", "0.46272057", "0.46266726", "0.46211508", "0.46115947", "0.46034238", "0.46012294", "0.45925233", "0.45918795", "0.45759243", "0.45730543", "0.45602632", "0.45581284", "0.45372522", "0.45300347", "0.45173723", "0.45166117", "0.45159134", "0.45096081", "0.45056036", "0.44981644", "0.44936815", "0.44891027", "0.44859755", "0.44710147", "0.44667777", "0.44632152", "0.44449374", "0.4435677", "0.4435338", "0.44326308", "0.44324198", "0.44286332", "0.44264585", "0.4404352", "0.43963972", "0.43957728", "0.439365", "0.43849853", "0.43804228", "0.43746564", "0.4372797", "0.43684366", "0.4366692", "0.43646517", "0.43585452", "0.43499735", "0.43466842", "0.43455508", "0.43431664", "0.4339803", "0.43356654", "0.4334153", "0.43326807", "0.4326872", "0.43245953", "0.43217868", "0.4318595", "0.43136042", "0.43126372", "0.43069154", "0.43048504", "0.42999148", "0.42995107", "0.4294924", "0.42895094", "0.42881355", "0.42874885", "0.4287334", "0.42872402", "0.42855346" ]
0.57042193
1
Returns true if player has 3 of spades in their hand.
def has_3_spades(self): if Card('3', 'spades') in self.hand: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_three_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 3:\n self.rank_per_hand['2'] = \"three of a kind\"\n return True\n return False", "def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 3:\n return (True, cards[c])\n return None", "def is_three_of_a_kind(hand):\n\tis_a_three_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tis_a_three_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_three_of_a_kind == True:\n\t\tif hand[j] == 3 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_three_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def has_pair(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n self.rank_per_hand['0'] = \"pair\"\n return True\n return False", "def is_3flush(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n suit1, suit2 = [card.suit for card in holecards]\n hand = tuple(chain(holecards, flop))\n suit_counts = Counter([card.suit for card in hand])\n\n for suit in suit_counts:\n if suit_counts[suit] == 3:\n if required_holecards == 2 and (suit1 == suit2 == suit):\n return True\n elif required_holecards == 1:\n if (suit1 == suit or suit2 == suit):\n return True\n elif required_holecards == 0:\n return True\n return False", "def still_in_hand(self):\n return len(self.hand.cards)!=0", "def is_full_house(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] != 0 and count[c] != 2 and count[c] != 3:\n return None\n triple = 0\n for k in count:\n if count[k] == 3:\n triple = cards[k]\n return (True, triple)", "def has_four_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 4:\n self.rank_per_hand['6'] = \"four of a kind\"\n return True\n return False", "def has_twopair(self):\n count = 0\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n count += 1\n if count >= 2:\n self.rank_per_hand['1'] = \"two pair\"\n return True\n return False", "def can_play(self) -> bool:\n purple_card = self.game.board.purple\n return (\n self.game.current_player != self\n and purple_card is not None\n and purple_card.space > len(self.game.board.yellow[self])\n )", "def has_cards(self):\n return self.hand.len() > 0", "def is_3straight(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n rank1, rank2 = sorted_numerical_ranks(holecards)\n hand = tuple(chain(holecards, flop))\n\n for subseq in rank_subsequences(hand):\n x, y, z = subseq\n if x == y-1 == z-2:\n if x == 1:\n # Special case for Ace playing low, to allow\n # for the `rank in subseq` check to work\n subseq.append(14)\n if required_holecards == 2:\n if rank1 in subseq and rank2 in subseq:\n return True\n elif required_holecards == 1:\n if rank1 in subseq or rank2 in subseq:\n return True\n elif required_holecards == 0:\n return True\n return False", "def is_soft_hand(self):\n is_soft = False\n for i in self.cards:\n if i.value == 'ACE':\n is_soft = True\n\n return is_soft", "def is_three_channeled(value):\n return len(value) == 3", "def has_won(self):\n coders_card = self.get_coders().get_amount()\n if coders_card > 3:\n return True\n else:\n return False", "def is_four_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 4:\n return (True, cards[c])\n return None", "def flush(hand):\n return len(set([suit for value, suit in hand])) == 1", "def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21", "def does_player_have_card(self, player, card):\n return card in self.hands[player]", "def is_four_of_a_kind(hand):\n\tis_a_four_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 4:\n\t\t\tis_a_four_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_four_of_a_kind == True:\n\t\tif hand[j] == 4 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_four_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True", "def is_full_house(hand):\n\tis_a_full_house = False\n\tnum_three_kind = 0\n\tnum_pair = 0\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tnum_three_kind += 1\n\t\telif hand[i] == 2:\n\t\t\tnum_pair += 1\n\t\ti += 1\n\tif num_three_kind ==1 and num_pair == 1:\n\t\tis_a_full_house = True\n\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_full_house == True:\n\t\tif (hand[j] == 2 or hand[j] == 3) and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_full_house:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def is_game_win(self):\n return not self.deck and not self.hand", "def is_card_playable(self, card):\n color_index = COLOR.index(card[0])\n return len(self.firework[color_index]) == int(card[1]) - 1", "def is_straight(hand):\n # same suite\n suite = hand[0][1]\n vals = []\n for c in hand:\n vals.append(cards[c[0]])\n # check if vals are consecutive or not\n if is_contiguous(vals):\n return True\n else:\n return False", "def is_pair(hand):\n\tis_a_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tis_a_pair = True\n\t\ti += 1 \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_pair == True:\n\t\tif hand[j] == 2 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_pair:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def make_card_wish(self, symbol, player):\n if player == self.current_player:\n if symbol in \"s c h d\":\n self.wait_for_card_wish = False\n self.card_wished = symbol\n self.choose_next_player()\n return True\n return False", "def is_royal_flush(hand):\n\n # same suit\n suite = hand[0][1]\n count = {c:0 for c in cards.keys()}\n for c in hand:\n if suite != c[1]:\n return False\n count[c[0]] += 1\n # all in same suit\n for c in 'T J Q K A'.split():\n if count[c] != 1:\n return False\n return True", "def is_straight(hand):\n\ti = 0\n\twhile i < 8:\n\t\tif hand[i] == 1 and hand[i+1] == 1 and hand[i+2] == 1 and hand[i+3] == 1 and hand[i+4] == 1:\n\t\t\treturn True, i + 4\n\t\ti += 1\n\treturn False", "def has_fullhouse(self):\n if self.has_pair() & self.has_three_of_a_kind():\n self.rank_per_hand['5'] = \"full house\"\n return True\n return False", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def won_game(self):\n for player in self.players:\n if len(player.cards) == 0:\n\n return True\n return False", "def flush_udacity(hand):\n suits = [s for r,s in hand]\n return len(set(suits)) == 1", "def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False", "def is_miss_deal(hand: list, mighty: Card) -> bool:\n point_card_count = 0\n for card in hand:\n if card.is_pointcard() and card != mighty:\n point_card_count += 1\n\n if point_card_count <= 1:\n return True\n else:\n return False", "def checkPlayerDies(self, player):\n\n listOfSpikesCoordinates = self._get_spikes()\n playerCoordinates = (player.positionRect.x, player.positionRect.y)\n\n if listOfSpikesCoordinates is not None:\n for spike in listOfSpikesCoordinates:\n if playerCoordinates == spike:\n return True", "def is_carrying_vespene(self) -> bool:\n return (\n self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGAS)\n or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASPROTOSS)\n or self.has_buff(BuffId.CARRYHARVESTABLEVESPENEGEYSERGASZERG)\n )", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def is_bust(self, hand_idx=0):\n if self.player_hand_value(hand_idx) > 21:\n return True\n else:\n return False", "def player_hand_contains_suit(self, user_id, suit):\n print \"player_hand_contains_suit(self, user_id, suit) \"\n print \" Checking if player hand contains expected suit: {}\".format(self.bot.leading_suit)\n for user_object in self.bot.current_game.players:\n if user_object.id == user_id:\n card_value = None\n card_suit = None\n for card_obj in user_object.cards_in_hand:\n if len(card_obj) == 2:\n card_value = str(card_obj[0])\n card_suit = card_obj[1]\n else:\n card_value = str(card_obj)\n card_suit = None\n if \"d_\" not in card_value and \"t_\" not in card_value and \"vm_\" not in card_value:\n if card_suit == suit:\n return True\n return False", "def is_card_in_other_hands(self, own_hand_index, card):\n for i, hand in enumerate(self.hands):\n if i == own_hand_index:\n continue\n if card in hand:\n return True\n return False", "def test_hand_has_three_of_a_kind(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_three_of_a_kind() == expected", "def discarded(self) -> bool:\n return (\n len(self.cards) == 13 - self.game.board.purple.space - self.discard_amount\n )", "def is_valid_play(play, curr_trick, hand):\n\tif len(curr_trick) == 0: # Player is lead\n\t\treturn True\n\telse:\n\t\tlead_suit = curr_trick[0][1]\n\t\tplay_suit = play[1]\n\n\t\tif play_suit == lead_suit:\n\t\t\treturn True\n\t\telse:\n\t\t\t# Check remaining cards in hand\n\t\t\tfor card in hand:\n\t\t\t\tsuit = card[1]\n\t\t\t\tif suit == lead_suit:\n\t\t\t\t\treturn False\n\t\t\t# no card in hand matches lead_suit\n\t\t\treturn True", "def is_hungry(self) -> bool:\n if self.eat_count <= 3:\n return True\n else:\n return False", "def check_cards(self, cards):\n if len(cards) != 3:\n return False\n\n match = 0\n card1 = cards[0][1]\n card2 = cards[1][1]\n card3 = cards[2][1]\n\n match += self.compare_element(card1, card2, card3, 'shape')\n match += self.compare_element(card1, card2, card3, 'colour')\n match += self.compare_element(card1, card2, card3, 'count')\n match += self.compare_element(card1, card2, card3, 'fill')\n\n return match == 4", "def has_straight(self):\n res = []\n self.suit_hist()\n for val in self.ranks.keys():\n res.append(val)\n res.sort()\n self.is_sequence(res, 3)", "def is_same_sign(self, cards):\n\n jokers = 0\n w_o_jokers = []\n for card in cards:\n if self.num_to_card(int(card)) == 0:\n jokers += 1\n else:\n w_o_jokers.append(int(card))\n\n w_o_jokers = sorted(w_o_jokers)\n print(\"whitout jokers: \", w_o_jokers)\n if w_o_jokers[0] <= 12: # if the cards are CLUBS\n if w_o_jokers[-1] > 12:\n return False\n if w_o_jokers[0] <= 25: # if the cards are DIAMONDS\n if w_o_jokers[-1] > 25:\n return False\n if w_o_jokers[0] <= 38: # HEARTS\n if w_o_jokers[-1] > 38:\n return False\n if w_o_jokers[0] <= 51:\n if w_o_jokers[-1] > 51:\n return False\n return True", "def flush(hand):\n checkflush = [s for r, s in hand]\n return checkflush.count(checkflush[1]) == 5", "def enough_players():\n return True", "def is_round_over(whose_turn,players):\n if ((len(players[whose_turn].hand.cards) == 0) and (players[whose_turn].has_discarded == True)):\n round_over = True\n else:\n round_over = False\n return round_over", "def has_won(self):\n return len(self.hand) == 0", "def hand_empty(self):\n return len(self.cards) == 0", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def has_flush(self):\n self.suit_hist()\n for val in self.suits.values():\n if val >= 5:\n return True\n return False", "def test_shared_cards_len(self):\n self.assertEqual(len(self.hand.sharedCards), 3)", "def __suitIsLured(self, suitId, prevRound=0):\n inList = self.currentlyLuredSuits.has_key(suitId)\n if prevRound:\n # only return true if the suit has been lured for at least\n # one entire round\n return inList and self.currentlyLuredSuits[suitId][0] != -1\n return inList", "def is_high_card(hand):\n\tis_a_high_card = True\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] > 1:\n\t\t\tis_high_card = False\n\t\ti += 1\n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_high_card == True:\n\t\tif hand[j] == 1 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_high_card:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def issolved(self) -> bool:\n if not self._pile:\n return True\n for c_card in self._pile:\n if not c_card.visible:\n return False\n return True", "def has_flush(self):\n self.suit_hist()\n for val in self.suits.values():\n if val >= 5:\n self.rank_per_hand['4'] = \"flush\"\n return True\n return False", "def is_suicide(self, stone_color, index):\n cardinal_indices = self.cardinal_indices(index)\n # First check to see if there are any immediate liberties\n for ci in cardinal_indices:\n stone = self.get(ci)\n if stone is None:\n # There is an empty liberty so the move is not suicide\n return False\n # No liberties, so all spaces around the stone are filled\n # Two conditions will save us, an enemy group being captured,\n # or a single friendly group having more than 1 liberty.\n for ci in cardinal_indices:\n stone = self.get(ci)\n # Adjacent group is friendly\n if stone.color == stone_color:\n # And we are not filling its last liberty\n if self.group_liberties(ci) > 1:\n return False\n # Adjacent group is foe\n else:\n # But we *are* filling its last liberty\n if self.group_liberties(ci) == 1:\n return False\n # If none of the above is true, this is an invalid move\n return True", "def cardPlayable(self, card):\n return self.field[Suit.toInt(card.getSuit()) - 1] == card.getValue() - 1", "def scalene(sides: list) -> bool:\n\n return validate_triangle(sides) and len(set(sides)) == 3", "def can_complete_three_in_row(self, row_positions, board):\n\n row = [board.get_piece(row_positions[0][0], row_positions[0][1]), board.get_piece(row_positions[1][0], row_positions[1][1]), board.get_piece(row_positions[2][0], row_positions[2][1])]\n\n if row.count(' ') == 1 and row.count(self._piece) == 2:\n self_winner = row.index(' ')\n else:\n self_winner = -1\n\n\n if row.count(' ') == 1 and row.count(self._piece) == 0:\n opponent_winner = row.index(' ')\n else:\n opponent_winner = -1\n \n return (self_winner, opponent_winner)", "def multi_player_support(self, num_of_players):\n if self.screen['columns'] / num_of_players > 40:\n return True\n else:\n return False", "def before_first_stich(self):\n return len(self.cards) == 9", "def in_suit3(list, list0):\n text = list.replace(\"-\", \"\")\n text0 = list0.replace(\"-\", \"\")\n if (\"-\" in list) and (\"-\" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):\n\n list1 = list.split(\"-\")\n x = int(list1[0])\n suit = set()\n suit.add(x)\n while x < int(list1[len(list1) - 1]):\n x += 1\n suit.add(x)\n suit.add(int(list1[len(list1) - 1]))\n\n list2 = list0.split(\"-\")\n y = int(list2[0])\n suit0 = set()\n suit0.add(y)\n while y < int(list2[len(list2) - 1]):\n y += 1\n suit0.add(y)\n suit0.add(int(list2[len(list2) - 1]))\n temp = [item for item in suit if item in suit0]\n if len(temp) > 0: return True\n\n return False", "def is_twenty_four_tone_complete(self):\n pcs = [x / 2.0 for x in range(24)]\n pcs = [int(x) if int(x) == x else x for x in pcs]\n return set(pcs).issubset(set(self._color_dictionary.keys()))", "def hitMe(hand, deck):\n if deck.cardsLeft == 0:\n return False\n hand.getCard(deck.drawCard())\n return True", "def is_winner(self, player: str) -> bool:\n total_result = self.current_state.hori_result + self.current_state.left_result + self.current_state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item == '2':\n p2_taken += 1\n if player == \"p1\":\n return float(p1_taken) >= total_line/2\n return float(p2_taken) >= total_line/2", "def isPlayed(self, item):\n userState = self.userState(item)\n return bool(userState.viewCount > 0) if userState.viewCount else False", "def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost", "def third_street ():\r\n global all_hands\r\n global deck\r\n global players\r\n #Set of all cards for third street draw \r\n third_street_draws = random.sample(deck, len(players)*3)\r\n #Remove drawn cards from deck\r\n for card in third_street_draws:\r\n deck.remove(card)\r\n #Deal 1 Card Each Player Until 3, then reveal third street.\r\n for player in players:\r\n hand = []\r\n for i in range(0,3):\r\n hand.append(third_street_draws[player+len(players)*i])\r\n all_hands.append(hand)\r\n if player == you:\r\n print(\"Your hand is: \", str(all_hands[you]))\r\n else:\r\n print(\"Player \", str(player+1), \"'s 3rd Street hand is: \", str(hand[2]))", "def check_for_game_won(self):\n all_moscuvites_captured = True\n king_captured = True\n king_escaped = True\n for piece in self.game_pieces:\n if piece.player == 2:\n all_moscuvites_captured = False\n elif piece.player == 3:\n king_captured = False\n king_coords = (piece.x,piece.y)\n escape_coords = [(0, 0), (0, 8),\n (8, 0), (8, 8)]\n if king_coords not in escape_coords:\n king_escaped = False\n if king_captured:\n return 2\n elif king_escaped or all_moscuvites_captured:\n return 1\n else:\n return 0", "def playerCanPlay(game, situation, player):\r\n return True", "def has_player(self, p: Player) -> bool:\n return p in self.players", "def can_split(self) -> bool:\n if len(self.cards) == 2 and self.cards[0].value == self.cards[1].value:\n return True\n else:\n return False", "def is_straight_flush(hand):\n # same suite\n suite = hand[0][1]\n vals = []\n for c in hand:\n if suite != c[1]:\n return False\n vals.append(cards[c[0]])\n # check if vals are consecutive or not\n if is_contiguous(vals):\n return (True, max(vals))\n else:\n return False", "def play_game(play_shoe, player_list, dealer, number_of_decks):\n # Check if the shoe is still valid (contains a cut card) if not,\n # create a new shoe\n play_shoe = shoe_check(play_shoe, number_of_decks)\n initial_deal(play_shoe, player_list, dealer)\n for player in player_list:\n user_play(play_shoe, player, dealer)\n dealer_play(play_shoe, dealer)\n display_results(check_results(player_list, dealer))\n if play_again():\n return True, play_shoe\n return False", "def is_straight_flush(hand):\n\tis_a_local_flush = False\n\tis_a_local_straight = False\n\tlocal_high_card = 0\n\ti = 16\n\twhile i >= 13:\n\t\tif hand[i] == 5:\n\t\t\tis_a_local_flush = True\n\t\ti -= 1\n\tif is_a_local_flush:\n\t\tj = 0\n\t\twhile j < 8:\n\t\t\tif hand[j] == 1 and hand[j + 1] == 1 and hand[j + 2] == 1 and hand[j + 3] == 1 and hand[j + 4] == 1:\n\t\t\t\tis_a_local_straight = True\n\t\t\t\tlocal_high_card = j + 4\n\t\t\tj += 1\n\tif is_a_local_flush and is_a_local_straight:\n\t\treturn True, local_high_card\n\treturn False", "def is_valid_deck(deck: List[int]) -> bool:\n check_deck = []\n check_deck.extend(deck)\n check_deck.sort()\n return len(check_deck) >= 3 and \\\n all(isinstance(item, int) for item in check_deck) \\\n and len(check_deck) == check_deck[-1]", "def still_playing_game(self):\n for player in self.players:\n if player.is_playing:\n return True\n return False", "def has_straight_flush(self):\n res = []\n self.suit_hist()\n for card in self.cards:\n if self.suits[card.suit] >= 5:\n res.append(card.rank)\n self.is_sequence(res, 7)", "def been_played(self, word):\n words = self.played_out or ''\n words = words.split(' ')\n\n return True if ((words.count(word) > 0) or \n (words.count( singularize(word) ) > 0) or \n (words.count( pluralize(word) ) > 0)) else False", "def has_siete_de_velo(self):\n for card in self._cards[\"oro\"]:\n if card.value == 7:\n return True\n\n return False", "def check_hand(self, player):\n\n total = player.score()\n if total > 21:\n status = 'bust'\n elif total == 21:\n status = 'win'\n else:\n status = 'okay'\n\n if self.verbose:\n print(total, 'points')\n \n return status", "def is_two_pair(hand):\n\tfaces_of_pairs = []\n\tis_a_two_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tfaces_of_pairs.append(i)\n\t\ti += 1\n\tif len(faces_of_pairs) == 2:\n\t\tis_a_two_pair = True\n\tfor fp in faces_of_pairs:\n\t\tprint(fp)\n\tif is_a_two_pair:\n\t\treturn True, faces_of_pairs[1]\n\telse:\n\t\treturn False", "def at_last_stich(self):\n return len(self.cards) == 1", "def verify_winner(self):\r\n return self.count_pegs() == 1", "def IsItem3State(self, item):\r\n\r\n return item.Is3State()", "def is_ok_three_lines(line1, line2, line3):\n card1 = line1[0]\n card2 = line1[1]\n card3 = line1[2]\n card4 = line2[0]\n card5 = line2[1]\n card6 = line2[2]\n\n card7 = line3[0]\n card8 = line3[1]\n card9 = line3[2]\n idents1 = [card.ident for card in line1]\n idents2 = [card.ident for card in line2]\n idents3 = [card.ident for card in line3]\n\n intersection = list(set(idents1) & set(idents2))\n if intersection:\n dprint(\"intersection 12\")\n return False\n\n intersection = list(set(idents1) & set(idents3))\n if intersection:\n return False\n\n intersection = list(set(idents2) & set(idents3))\n if intersection:\n return False\n\n print(\"??????????????\")\n show_triple(line1, line2, line3)\n print(\"??????????????\")\n\n if not is_ok_two_lines(line1, line2):\n return False\n if not is_ok_two_lines(line2, line3):\n return False\n\n return True", "def validate_cards(self, cards_list):\n return set(self.hand).issubset(set(cards_list))", "def EndHand(redtricks, blacktricks):\r\n if redtricks >= 3 and blacktricks == 1:\r\n return True\r\n elif blacktricks >= 3 and redtricks == 1:\r\n return True\r\n elif (redtricks + blacktricks) == 5:\r\n return True\r\n else:\r\n return False", "def player_hit(self):\n self.player.hit(self.deck)\n self.print_hands()\n \n if self.player.sum_cards() > 21:\n self.round_winner = True\n self.print_hands()\n print(\"BUST! Dealer wins.\")", "def player_has_won(self):\n return len(self._words_guessed) == self._num_words", "def is_horizontal_four(self, row, col):\r\n player = self.board[row][col]\r\n consecutiveL = 0\r\n consecutiveR = 0\r\n # count consecutive left discs\r\n i = col\r\n current = player\r\n while current == player:\r\n consecutiveL += 1\r\n i -= 1\r\n if i < 0:\r\n break\r\n current = self.board[row][i]\r\n\r\n # count consecutive right discs\r\n i = col\r\n current = self.board[row][i]\r\n while current == player:\r\n consecutiveR += 1\r\n i += 1\r\n if i >= 7:\r\n break\r\n current = self.board[row][i]\r\n\r\n # since (row, col) cell was counted twice, we reduce by 1\r\n if consecutiveL + consecutiveR - 1 >= 4:\r\n return True\r\n\r\n return False", "def isGameOver(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.grid[i][j].face == 'down':\n return False\n #if here then all cards must be face up\n return True", "def is_ok_line(line):\n card1 = line[0]\n card2 = line[1]\n card3 = line[2]\n\n if not is_coupled(card1.east, card2.west):\n return False\n if not is_coupled(card2.east, card3.west):\n return False\n return True", "def deck_has_cards(deck, cards):\n deck_dict = collections.defaultdict(int)\n for card in itertools.chain(deck.draw_pile, deck.discard_pile, deck.hand):\n deck_dict[card] += 1\n return deck_dict == cards", "def check_full_house(dice_list):\n roll_counts = [dice_list.count(value) for value in range(1, 7)]\n return 2 in roll_counts and 3 in roll_counts" ]
[ "0.72666436", "0.7080395", "0.70760804", "0.6595992", "0.65198547", "0.6504807", "0.6470795", "0.6453197", "0.63943964", "0.6391937", "0.6380108", "0.63546914", "0.63495284", "0.63185066", "0.62931806", "0.6260325", "0.61777973", "0.61756945", "0.61107355", "0.6093892", "0.60865194", "0.6032814", "0.60217035", "0.60090405", "0.5986928", "0.5974971", "0.59615004", "0.5937205", "0.5901767", "0.5898353", "0.58930653", "0.58891785", "0.5851097", "0.5849321", "0.5836648", "0.5787658", "0.57812995", "0.57802105", "0.57685983", "0.5743755", "0.5741915", "0.57355076", "0.5717875", "0.57170135", "0.5710494", "0.5698669", "0.5698435", "0.5696897", "0.56840074", "0.5679742", "0.56728", "0.5664815", "0.56618345", "0.5638342", "0.5638335", "0.5634101", "0.5620652", "0.5616434", "0.5609873", "0.56064945", "0.56017613", "0.5585795", "0.55834186", "0.55582696", "0.5553376", "0.5528218", "0.55216336", "0.55069995", "0.55020386", "0.5486131", "0.5475107", "0.5453296", "0.54445004", "0.54437375", "0.5440602", "0.54349256", "0.53995913", "0.5396965", "0.5395423", "0.53916055", "0.5387889", "0.53853154", "0.53851503", "0.5382429", "0.53777915", "0.5368128", "0.5352952", "0.5346107", "0.5341501", "0.53241396", "0.5315627", "0.53152597", "0.53087926", "0.5295733", "0.5282761", "0.5279858", "0.52761334", "0.52723163", "0.5267721", "0.5256944" ]
0.89362204
0
Return all components that match the given type and filter
def queryComponent(type=None, filter=None, all=0):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_components(self, filter_type=None):\n\n if filter_type is None:\n out = self.components\n elif isinstance(filter_type, str):\n out = {}\n cls = co.str_to_comp(filter_type)\n for comp in self.get_components():\n if isinstance(self.components[comp], cls):\n out[comp] = self.components[comp]\n else:\n out = {}\n for comp in self.get_components():\n if isinstance(self.components[comp], filter_type):\n out[comp] = self.components[comp]\n\n return out", "def type_filter(self, items, types=None):", "def _filter(self, location, component=\"Hosting\", compute_type=None):\n filters = [\n [\"TERM_MATCH\", \"location\", location],\n [\"TERM_MATCH\", \"productFamily\", \"ML Instance\"],\n [\"TERM_MATCH\", \"currentGeneration\", \"Yes\"],\n [\"TERM_MATCH\", \"component\", component]\n ]\n if compute_type:\n filters.append([\"TERM_MATCH\", \"computeType\", compute_type])\n return [{\n 'Type': x[0],\n 'Field': x[1],\n 'Value': x[2]\n } for x in filters]", "def search_items(self, filter_text, type_filter=None):\n output = []\n\n for item in self._all_items:\n\n if type_filter:\n if item.match(filter_text) and item.resource_type == type_filter:\n output.append(item)\n else:\n if item.match(filter_text):\n output.append(item)\n\n return output", "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return [c for c in self._components if isinstance(c, component_type)]", "def filter(self, filters):", "def get_components(self,filt):\n comps = [self.components[i] for i in xrange(len(self.header)) if filt == self.header[i]]\n return comps", "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return self._manager.get_components_by_type(component_type)", "def getFilter(self, type: int) -> int:\n ...", "def by_type(cls, typ='creditcard'):\n return Filter('type', values=(typ,), operator=Filter.OPERATOR['EQUAL'])", "def search_filter(query_params, query):\n if query_params.get('type') is not None:\n query = query.filter(search.c.kind == query_params.get('type'))\n return query", "def type_filter(self, items, types=None):\n if not types:\n return items\n allowed_items = []\n for item in items:\n if item.portal_type not in types:\n continue\n allowed_items.append(item)\n return allowed_items", "def filter_evaluations_by_type(self, type_):\n from .evaluation import Evaluation\n from .code_component import CodeComponent\n\n joined_eval = join(\n Evaluation.t, CodeComponent.t,\n ((Evaluation.m.trial_id == CodeComponent.m.trial_id) &\n (Evaluation.m.code_component_id == CodeComponent.m.id))\n )\n joined = join(\n Activation.t, joined_eval,\n ((Evaluation.m.trial_id == Activation.m.trial_id) &\n (Evaluation.m.activation_id == Activation.m.id))\n )\n query = (\n select([CodeComponent.m.name, Evaluation.m.repr])\n .select_from(joined)\n .where((Activation.m.trial_id == self.trial_id) &\n (Activation.m.id == self.id) &\n (CodeComponent.m.type == type_))\n )\n for result in relational.session.execute(query):\n yield result", "def all(self, type_filter=None):\n res = []\n if type_filter is None or isinstance(self, type_filter):\n res.append(self)\n for v in self._all_subnodes():\n if isinstance(v, IDLNode):\n res.extend(v.all(type_filter))\n elif isinstance(v, list):\n for item in v:\n if isinstance(item, IDLNode):\n res.extend(item.all(type_filter))\n return res", "def filter_queries_by_nlp_component(\n query_list: ProcessedQueryList, component_type: str, component_name: str\n ):\n\n filtered_queries = []\n filtered_queries_indices = []\n for index, query in enumerate(query_list.processed_queries()):\n if getattr(query, component_type) == component_name:\n filtered_queries_indices.append(index)\n filtered_queries.append(query)\n return filtered_queries_indices, filtered_queries", "def search(self, filtro):\n return [nota for nota in self.notas if nota.match(filtro)]", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def get_filters(self):", "def _getRecords(self, record_type, filters):\n if not filters:\n # Always return a copy for consistency\n return list(self._dump_data[record_type])\n response = self._dump_data[record_type]\n for f in filters:\n response = [r for r in response if f(r)]\n return response", "def filter_geom(geom, _type):\n return list(filter(lambda x: isinstance(x, _type), geom))", "def get_objects(filter_rule=\"**\", obj_type=\"*\"):\n objects = ix.api.OfObjectVector()\n project_root = ix.application.get_factory().get_project()\n ix.application.get_matching_objects(objects, filter_rule, project_root,\n obj_type)\n return objects", "def filter(*args, name: Union[AnyStr, bool]=\"\", type: Union[AnyStr, bool]=\"\", q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def get_type_filters(self, list_type: ListType) -> List[TypeFilter]:\n if hasattr(self, \"json\") and isinstance(self.json, dict):\n type_filters_raw = self.json.get(\"filter\", None)\n\n if type_filters_raw is not None:\n if isinstance(type_filters_raw, str):\n type_filters_raw = loads(type_filters_raw)\n\n if not isinstance(type_filters_raw, list):\n type_filters_raw = [type_filters_raw]\n\n try:\n type_filters: List[TypeFilter] = AvailableTypeFilters.from_string_list(type_filters_raw)\n return type_filters\n except UnknownTypeFilter as e:\n # Import logger here to prevent circular dependency on module import\n message = \"Received unknown type filter: '{0}'\".format(e.unknown_type_filter)\n logger.error(self.request_id, message, exc_info=e)\n raise InvalidUsage(message)\n\n return list_type.to_type_filters()", "def get_filter_types(verbose=False):\n if verbose:\n pprint(filter_types)\n return filter_types", "def test_api_type_filtering(api_client, by_type, by_state):\n response = api_client.get(path='/breweries', params={'by_type': by_type, 'by_state': by_state})\n assert response.json() != []\n assert response.ok", "def get_events(self, type_filter=None):\n\n if type_filter:\n filtered_events = self.__events.get(type_filter, [])\n else:\n filtered_events = [ev for ev_type_list in self.__events.values() for ev in ev_type_list]\n\n return filtered_events", "def fetch(self, compute_type=None):\n has_next_page = True\n next_token = None\n results = []\n while has_next_page:\n params = {\n \"ServiceCode\": self.SERVICE_CODE,\n \"Filters\": self._filter(self.location, compute_type=compute_type)\n }\n if next_token:\n params[\"NextToken\"] = next_token\n response = self.pricing.get_products(**params)\n results += self._format(response)\n next_token = response.get(\"NextToken\")\n has_next_page = next_token is not None\n results = self.filters.apply(results)\n return results", "def extract_filter_list(self, filter_type, elements):\n titleLabel = QLabel(filter_type)\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n for element in elements:\n nextLabel = QLabel(element)\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter/3), counter % 3, alignment=Qt.AlignCenter)\n counter += 1", "def search(self, filter: str = None) -> dict:\n r = requests.get(self.url, headers=self.headers)\n\n if filter:\n data = r.json()\n return filter_list(data=data, filter_by=filter)\n\n return r.json()", "def filter(self, *args, **kwargs):", "def _get_tuya_devices_filtered(self, types, exclude_mode=False, type_prefix=True):\n config_list = {}\n types_filter = set(types)\n tuya = self.hass.data[DOMAIN][TUYA_DATA]\n devices_list = tuya.get_all_devices()\n for device in devices_list:\n dev_type = device.device_type()\n exclude = (\n dev_type in types_filter\n if exclude_mode\n else dev_type not in types_filter\n )\n if exclude:\n continue\n dev_id = device.object_id()\n if type_prefix:\n dev_id = f\"{dev_type}-{dev_id}\"\n config_list[dev_id] = f\"{device.name()} ({dev_type})\"\n\n return config_list", "def ls(tesserae, order_by, order_type, filter_types):\n try:\n return tesserae.ls(order_by, order_type, set([x.strip() for x in filter_types.split(\",\")]) if filter_types else set())\n except TesseraError, e:\n sys.stderr.write(\"Error: %s\\n\" % str(e))\n return False", "def itemFilterType(*args, text: Union[AnyStr, bool]=\"\", type: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def visitCriteria(self, ctx: ApiQLParser.CriteriaContext):\n return lmap(lambda c: c.accept(self), ctx.getChildren(self.filter_ignored))", "def search(self, filters=None):\n raise NotImplementedError", "def type_search(self, queryset, name, value):\n qs_filter = Q(type=ROUTING_POLICY_TYPE_IMPORT_EXPORT)\n for v in value:\n qs_filter |= Q(type=v)\n return queryset.filter(qs_filter)", "def filter(self, cls):\n return ElementList([x for x in self._elements if isinstance(x, cls)])", "def get_all_items(model, type):\n if(type == \"office\"):\n return model.get_all_offices()\n elif(type == \"party\"):\n return model.get_all_parties()\n return []", "def filter(full_poi_list, type_of_poi):\n pois = []\n if type_of_poi == \"all\":\n for i in full_poi_list:\n entry = i[0]\n pois.append(entry)\n if type_of_poi == \"gym\":\n for i in full_poi_list:\n if i[1] == 2:\n entry = i[0]\n pois.append(entry)\n return pois", "def search_project_or_study(obj_type):\n\n matches = []\n response = None\n\n try:\n if obj_type not in set([\"projects\", \"studies\"]):\n raise Exception(\"Invalid object type specified\")\n\n possible_filters = filters_d[obj_type]\n \n for f in file_dict[obj_type][\"valid\"].values():\n json_file = data_dir + f\n json_s = open(json_file, \"r\").read()\n json_obj = json.loads(json_s)\n add_to_matches = True\n\n for filter_name in possible_filters:\n filter_val = request.args.get(filter_name)\n if filter_val:\n if json_obj[filter_name] != filter_val:\n add_to_matches = False\n \n if add_to_matches:\n matches.append(json_s)\n\n response_body = \"[\" + \",\".join(matches) + \"]\"\n response = get_response(response_body, status=200)\n\n except Exception as e:\n print(\"bad request\")\n response_body = '''{\"message\": \"invalid resource '%s'\"}''' % obj_type\n response = get_response(response_body, status=400)\n\n return response", "def test_filter_mixed_function(self):\n for none_type in (False, True):\n for all_type in (False, True):\n for any_type in (False, True, None):\n result = none_type is False and all_type is True \\\n and (any_type is None or any_type is True)\n self._test_filter(none_type, all_type, any_type, result)", "def itemFilter(*args, byBin: Union[AnyStr, List[AnyStr], bool]=\"\", byName: Union[AnyStr,\n bool]=\"\", byScript: Union[AnyStr, bool]=\"\", byType: Union[AnyStr, List[AnyStr],\n bool]=\"\", category: Union[AnyStr, List[AnyStr], bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", clearByBin: bool=True, clearByType: bool=True,\n difference: Union[List[AnyStr, AnyStr], bool]=None, exists: bool=True,\n intersect: Union[List[AnyStr, AnyStr], bool]=None, listBuiltInFilters: bool=True,\n listOtherFilters: bool=True, listUserFilters: bool=True, negate: bool=True,\n parent: Union[AnyStr, bool]=\"\", pythonModule: Union[AnyStr, bool]=\"\",\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, uniqueNodeNames: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def node_type_filter(node_list, *filter_types):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.node_type_filter\")\n\n flg.info(\"Filtering Node List\")\n\n filtered_list = []\n for node in node_list:\n node_type = mc.nodeType(node)\n flg.debug(\"Node, {0}, is of type, {1}\".format(node, node_type))\n if node_type not in filter_types:\n flg.debug(\"Node kept\")\n filtered_list.append(node)\n else:\n flg.debug(\"Node filtered\")\n flg.info(\"Returning Filtered List\")\n return filtered_list", "def by_type(self, type):\n return self.filter(related_type__title=type)", "def filter_all(_):\n return True", "def all(self):\n\t\timport revitron\n\t\tdb = revitron.DB\n\t\tf = db.LogicalOrFilter(\n\t\t db.ElementIsElementTypeFilter(False),\n\t\t db.ElementIsElementTypeFilter(True)\n\t\t)\n\n\t\tself.collector = self.collector.WherePasses(f)\n\t\treturn self", "def by_card_type(cls, card_type):\n return Filter('card_type', values=(card_type,), operator=Filter.OPERATOR['EQUAL'])", "def filter_room_type(self, criterias, index, bool_room_type):\n # if the user input doesn't pass verification, don't filter by this criteria\n # so always return True\n if bool_room_type == False:\n return True\n\n criterias = {key: self.convert_room_type_input(\n val) for key, val in criterias.items()}\n listing_room_type = self.process_room_type(\n self.data[index]['roomType'])\n bool_bed = criterias['Bedrooms'][1](\n listing_room_type['Bedrooms'], criterias['Bedrooms'][0])\n bool_bath = criterias['Bathrooms'][1](\n listing_room_type['Bathrooms'], criterias['Bathrooms'][0])\n return bool_bed & bool_bath", "def get_object_from_filter(obj, components):\n\n components = components[:]\n while len(components) > 2:\n obj = getattr(obj, components.pop(0))\n if len(components) == 2:\n if components[-1] != \"regex\":\n if not hasattr(obj, f\"__{components[-1]}__\"):\n obj = getattr(obj, components[0])\n return obj", "def _set_runtime_filters(self):\n runtime_filters = []\n if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):\n raise TypeError(\n '%s: filters must be a sequence of tuple with length=3'\n ' got %r instead' % (self.__class__.__name__, self.filters))\n\n for filter_type, filter_operator, filter_value in self.filters:\n if isinstance(filter_type, ValueProvider):\n filter_type = filter_type.get()\n if isinstance(filter_operator, ValueProvider):\n filter_operator = filter_operator.get()\n if isinstance(filter_value, ValueProvider):\n filter_value = filter_value.get()\n runtime_filters.append((filter_type, filter_operator, filter_value))\n\n return runtime_filters or ()", "def filter_features_by_type(feats: FeaturesTuple, feat_type: FeatureTypeTypes) -> FeaturesTuple:\n return tuple([feat for feat in feats if type(feat) == feat_type])", "def _filter(self, lst):\n\n lst = list(set(lst)) # removes duplicate items\n if lst is None:\n return []\n arr = []\n for item in lst:\n for typ in [str(g) for g in self.__class__.OBJ_TYPES]:\n if cmds.objectType(item) == typ:\n arr.append(item)\n\n arr.sort(key=lambda x: x.count('|'))\n return arr[::-1] # reverse list", "def filter_by_type(x, _request_type_list=None):\n if _request_type_list:\n for request_type in _request_type_list:\n if x[\"request_type\"] == request_type:\n return True\n return False\n return True", "def components(self, predicate=None):\n \n if predicate is None:\n return self._get(\"components\").json()\n else:\n return self._get(\"components/search\", params={\"predicate\":predicate}).json()", "def filter_transactions_by_type(self, request):\n transactions = Transaction.objects.all()\n type = request.data[\"type\"]\n if type == \"in\" :\n transactions = transactions.filter(amount__gt=0)\n else:\n transactions = transactions.filter(amount__lt=0)\n serializer = TransactionSerializer(transactions, many=True)\n return Response(serializer.data)", "def filter_by_class(objects, cls):\n if cls is not None:\n filtered = []\n classes = cls if isinstance(cls, tuple) else (cls,)\n for o in objects:\n valid = False\n for c in classes:\n try:\n if o.is_valid(c):\n valid = True\n break\n except AttributeError:\n continue\n if valid:\n filtered.append(o)\n return filtered\n else:\n return list(objects)", "def get_recipes_by_types(self, recipe_type): \n\t\tfor key, val in self.recipes_list.items():\n\t\t\tif key == recipe_type:\n\t\t\t\tfor a, b in val.items():\n\t\t\t\t\tprint(str(b))", "def get_items_of_type(self, item_type):\n return (item for item in self.items if item.get_type() == item_type)", "def filter(data, mask, **kwargs):\n return Component(\n \"Filter\",\n arguments={\n 'data': Component.of(data),\n 'mask': Component.of(mask)\n },\n options={\n \n },\n constraints=kwargs)", "def attribute_search(self, attribute, filters):\n for i in self.response_info['results']:\n if filters in i[attribute]:\n self.output.append(i)\n self.counter += 1", "def get_search(self, type, params, batch=False):\n path = 'search'\n args = {'type': type}\n args.update(params)\n return self.make_request(path, 'GET', args, batch=batch)", "def filter(self, filters:list)->list:\n for item in self.list:\n use_item = True\n for filter in filters:\n filter_key, filter_value, filter_type = filter\n if filter_type == \"<\" and item[filter_key] >= filter_value:\n use_item = False\n break\n elif filter_type == \">\" and item[filter_key] <= filter_value:\n use_item = False\n break\n elif filter_type == \"<=\" and item[filter_key] > filter_value:\n use_item = False\n break\n elif filter_type == \">=\" and item[filter_key] < filter_value:\n use_item = False\n break\n elif filter_type == \"=\" and not item[filter_key] == filter_value:\n use_item = False\n break\n if use_item:\n yield item", "def _build_filters(self, criteria: Q):\n # Decide the function based on the connector type\n func = and_ if criteria.connector == criteria.AND else or_\n params = []\n for child in criteria.children:\n if isinstance(child, Q):\n # Call the function again with the child\n params.append(self._build_filters(child))\n else:\n # Find the lookup class and the key\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n\n # Instantiate the lookup class and get the expression\n lookup = lookup_class(stripped_key, child[1], self.model_cls)\n if criteria.negated:\n params.append(~lookup.as_expression())\n else:\n params.append(lookup.as_expression())\n\n return func(*params)", "def matching_blocks_by_type(self):\n try:\n block_structure = get_course_in_cache(self.course_key)\n matching_blocks = block_structure.topological_traversal(\n filter_func=self.block_type_filter,\n yield_descendants_of_unyielded=True,\n )\n self.course_block_structure = block_structure\n except item_not_found_error():\n return []\n\n return list(matching_blocks)", "def _filter_for_panel( item, item_type, filters, context ):\n def _apply_filter( filter_item, filter_list ):\n for filter_method in filter_list:\n try:\n if not filter_method( context, filter_item ):\n return False\n except Exception as e:\n raise MessageException( \"Toolbox filter exception from '%s': %s.\" % ( filter_method.__name__, e ) )\n return True\n if item_type == panel_item_types.TOOL:\n if _apply_filter( item, filters[ 'tool' ] ):\n return item\n elif item_type == panel_item_types.LABEL:\n if _apply_filter( item, filters[ 'label' ] ):\n return item\n elif item_type == panel_item_types.SECTION:\n # Filter section item-by-item. Only show a label if there are\n # non-filtered tools below it.\n\n if _apply_filter( item, filters[ 'section' ] ):\n cur_label_key = None\n tools_under_label = False\n filtered_elems = item.elems.copy()\n for key, section_item_type, section_item in item.panel_items_iter():\n if section_item_type == panel_item_types.TOOL:\n # Filter tool.\n if _apply_filter( section_item, filters[ 'tool' ] ):\n tools_under_label = True\n else:\n del filtered_elems[ key ]\n elif section_item_type == panel_item_types.LABEL:\n # If there is a label and it does not have tools,\n # remove it.\n if cur_label_key and ( not tools_under_label or not _apply_filter( section_item, filters[ 'label' ] ) ):\n del filtered_elems[ cur_label_key ]\n\n # Reset attributes for new label.\n cur_label_key = key\n tools_under_label = False\n\n # Handle last label.\n if cur_label_key and not tools_under_label:\n del filtered_elems[ cur_label_key ]\n\n # Only return section if there are elements.\n if len( filtered_elems ) != 0:\n copy = item.copy()\n copy.elems = filtered_elems\n return copy\n\n return None", "def filter_or(filters):\n def filt(item):\n for f in filters:\n if f(item):\n return True\n return False\n return filt", "def _build_filter_part(self, cls, filters, order_by=None, select=None):\r\n import types\r\n query_parts = []\r\n\r\n order_by_filtered = False\r\n\r\n if order_by:\r\n if order_by[0] == \"-\":\r\n order_by_method = \"DESC\";\r\n order_by = order_by[1:]\r\n else:\r\n order_by_method = \"ASC\";\r\n\r\n if select:\r\n if order_by and order_by in select:\r\n order_by_filtered = True\r\n query_parts.append(\"(%s)\" % select)\r\n\r\n if isinstance(filters, str) or isinstance(filters, unicode):\r\n query = \"WHERE %s AND `__type__` = '%s'\" % (filters, cls.__name__)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n query += \" ORDER BY itemName() %s\" % order_by_method\r\n elif order_by != None:\r\n query += \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n return query\r\n\r\n for filter in filters:\r\n filter_parts = []\r\n filter_props = filter[0]\r\n if type(filter_props) != list:\r\n filter_props = [filter_props]\r\n for filter_prop in filter_props:\r\n (name, op) = filter_prop.strip().split(\" \", 1)\r\n value = filter[1]\r\n property = cls.find_property(name)\r\n if name == order_by:\r\n order_by_filtered = True\r\n if types.TypeType(value) == types.ListType:\r\n filter_parts_sub = []\r\n for val in value:\r\n val = self.encode_value(property, val)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts_sub.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts_sub.append(self._build_filter(property, name, op, val))\r\n filter_parts.append(\"(%s)\" % (\" OR \".join(filter_parts_sub)))\r\n else:\r\n val = self.encode_value(property, value)\r\n if isinstance(val, list):\r\n for v in val:\r\n filter_parts.append(self._build_filter(property, name, op, v))\r\n else:\r\n filter_parts.append(self._build_filter(property, name, op, val))\r\n query_parts.append(\"(%s)\" % (\" or \".join(filter_parts)))\r\n\r\n\r\n type_query = \"(`__type__` = '%s'\" % cls.__name__\r\n for subclass in self._get_all_decendents(cls).keys():\r\n type_query += \" or `__type__` = '%s'\" % subclass\r\n type_query +=\")\"\r\n query_parts.append(type_query)\r\n\r\n order_by_query = \"\"\r\n\r\n if order_by:\r\n if not order_by_filtered:\r\n query_parts.append(\"`%s` LIKE '%%'\" % order_by)\r\n if order_by in [\"__id__\", \"itemName()\"]:\r\n order_by_query = \" ORDER BY itemName() %s\" % order_by_method\r\n else:\r\n order_by_query = \" ORDER BY `%s` %s\" % (order_by, order_by_method)\r\n\r\n if len(query_parts) > 0:\r\n return \"WHERE %s %s\" % (\" AND \".join(query_parts), order_by_query)\r\n else:\r\n return \"\"", "def filter_scalings(scaling_list, scaling_type):\n return filter(\n lambda _f: True if scaling_type in _f[\"runname\"] else False,\n scaling_list)", "def get_objects_by_type(self, *types) -> List[TgnObject]:\n if not types:\n return list(self.objects.values())\n types_l = [o.lower() for o in types]\n return [o for o in self.objects.values() if o.type.lower() in types_l]", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def _base_proxies_filter(self, category: str, filters: list) -> list:\n\n data_filtered = []\n \n if category == 'country':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=0, filters=filters)\n )\n \n elif category == 'anonymity':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=1, filters=filters)\n )\n\n elif category == 'protocol':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=2, filters=filters)\n )\n \n elif category == 'google_passed':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=3, filters=filters)\n )\n\n return data_filtered", "def get_node_components(self, node=None, filter_type=None):\n if node is not None and node not in self.get_nodes():\n raise KeyError('{} is not an exiting node'.format(node))\n\n if node is not None:\n return self.components[node].get_components(filter_type=filter_type)\n else:\n out = {}\n for node_name in self.get_nodes():\n for comp_name, comp in self.components[\n node_name].get_components(\n filter_type=filter_type).items():\n out[comp_name] = comp\n return out", "def getResultDefs(self, type=None):\n results = self.results.values()\n\n if type:\n results = filter(lambda result: result.type == type, results)\n\n return results", "def dependency_filter(dependencies,start=0,end=-1,filter_val=None,filter_vals=[],field=None,filter_range='dependent'):\n return [getattr(i, field) if field else i for i in dependencies if \n (start == 0 or getattr(i, filter_range).idx >= start) and \n (end == -1 or getattr(i, filter_range).idx < end) and \n ((filter_val == None and not filter_vals) or i.type in filter_vals + [filter_val] or (filter_val[-1]=='*' and i.type.startswith(filter_val[0:-1])))\n ]", "def get_contract_filters(*contracts):\n return [generate_filter(filter_text) for filter_text in contracts]", "def ListMatchingComponents(self, policy_type):\n base_name = self.GetBaseFilename(policy_type)\n files = glob.glob('%s_*.*' % base_name)\n len_base_name = len(base_name) + 1\n return [ file[len_base_name:file.rfind('.')] for file in files ]", "def types_query(owner_name):\n query = Products.query.with_entities(Products.type_name.label('Type'))\\\n .filter_by(owner_name=owner_name)\\\n .distinct()\n return query", "def test_get_component_descriptors_by_type_using_get(self):\n pass", "def servicemanage_type_get_all(context, inactive=False, filters=None):\n filters = filters or {}\n\n read_deleted = \"yes\" if inactive else \"no\"\n rows = model_query(context, models.ServiceManageTypes,\n read_deleted=read_deleted).\\\n options(joinedload('extra_specs')).\\\n order_by(\"name\").\\\n all()\n\n # TODO(sirp): this patern of converting rows to a result with extra_specs\n # is repeated quite a bit, might be worth creating a method for it\n result = {}\n for row in rows:\n result[row['name']] = _dict_with_extra_specs(row)\n\n return result", "def filter(self, *args):\n return _libsbml.ElementFilter_filter(self, *args)", "def _filter(self, filter_condition):\n def _inner_filter(item: list):\n return self._default_filter(item, filter_condition)\n\n self._result = list(filter(_inner_filter, self._data))", "def _filter_types_index(self, types_index, resource_type_id):\n entities = []\n if resource_type_id in types_index:\n entities.extend(types_index[resource_type_id])\n return entities", "def get_fields_by_type(self, field_type):\n for field in self.fields:\n if field.get_field_type() == field_type:\n yield field", "def search(self, filter):\n return [note for note in self.notes if note.match(filter)]", "def get_estimators(type_filter='all'):\n\n if type_filter not in ['all', 'classifier', 'transformer', 'cluster']:\n # TODO: make this exception more specific\n raise Exception(\"type_filter should be element of \"\n \"['all', 'classifier', 'transformer', 'cluster']\")\n\n all_classes = _get_all_classes()\n\n # Filter out those that are not a subclass of `sklearn.BaseEstimator`\n all_classes = [c for c in set(all_classes)\n if issubclass(c[1], BaseEstimator)]\n\n # get rid of abstract base classes\n all_classes = filter(lambda c: not is_abstract(c[1]), all_classes)\n\n # only keep those that are from tslearn\n all_classes = filter(lambda c: not is_sklearn(c[1]), all_classes)\n\n # Now filter out the estimators that are not of the specified type\n filters = {\n 'all': [ClassifierMixin, RegressorMixin,\n TransformerMixin, ClusterMixin],\n 'classifier': [ClassifierMixin],\n 'transformer': [TransformerMixin],\n 'cluster': [ClusterMixin]\n }[type_filter]\n filtered_classes = []\n for _class in all_classes:\n if any([issubclass(_class[1], mixin) for mixin in filters]):\n filtered_classes.append(_class)\n\n # Remove duplicates and return the list of remaining estimators\n return sorted(set(filtered_classes), key=itemgetter(0))", "def _queryset(self):\n return self.type.objects.filter(id__in=self.ids)", "def __init__(self, type: int, filter: int):\n ...", "def sol_type_filter(opps, types):\n global _total_skipped, _total_kept\n filtered_opps = []\n for opp in opps:\n if opp['type']['value'] in types:\n filtered_opps.append(opp)\n _total_kept += 1\n else:\n _total_skipped += 1\n\n logger.debug(\"Total skip stats so far: Skipping {} out of {} due to solicitation type\".format(_total_skipped, _total_kept + _total_skipped))\n return filtered_opps", "def filter_by_query_params(self, request):\n items = self\n company = request.GET.get('company', None)\n main_contractor = request.GET.get('main_contractor', None)\n main_sub_contractor = request.GET.get('main_sub_contractor', None)\n client = request.GET.get('client', None)\n q = request.GET.get('q', None)\n sort_by = request.GET.get('sort_by', None)\n str = request.GET.get('str', None)\n\n # filter\n if main_contractor:\n items = items.filter(main_contractor=main_contractor).distinct()\n if main_sub_contractor:\n items = items.filter(main_sub_contractor=main_sub_contractor).distinct()\n if client:\n items = items.filter(client=client).distinct()\n if company:\n items = items.filter(companies_linked__in=[company]).distinct()\n # sort\n if q == 'asc' and sort_by:\n items = items.order_by(sort_by).distinct()\n\n if q == 'des' and sort_by:\n items = items.order_by('-' + sort_by).distinct()\n\n if str:\n # str = str.strip().lower()\n items = items.filter(Q(reference_no__icontains=str) |\n Q(name__icontains=str)).distinct()\n return items", "def filter(self, filter_dict):\n pass", "def test_filter_services_by_components(self):\n\n service1 = sample_services(user=self.user, title='Mechanical Work 1')\n service2 = sample_services(user=self.user, title='Mechanical Work 2')\n service3 = sample_services(user=self.user, title='Mechanical Work 3')\n\n component1 = sample_componenets(user=self.user, name='Mech1')\n component2 = sample_componenets(user=self.user, name='Mech2')\n component3 = sample_componenets(user=self.user, name='mech3')\n\n service1.components.add(component1)\n service2.components.add(component2)\n service3.components.add(component3)\n\n res = self.client.get(\n SERVICES_URL,\n {'tags': f'{component1.id},{component2.id},{component3.id}'}\n )\n\n serializer1 = ServiceSerializer(service1)\n serializer2 = ServiceSerializer(service2)\n serializer3 = ServiceSerializer(service3)\n\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)", "def filt(rec):\n return True # Show everything", "def filter(self, viewer, parent, elements):\n\n return [e for e in elements if self.select(viewer, parent, e)]", "def filter(self, filters):\r\n # because http.Request needs params to be a dict of strings to strings\r\n # (roughly) and since BitBucket wants repeated parameters to express\r\n # OR, we'll do the quoting by hand ourselves\r\n def flatten_conditions(filters):\r\n for key, val in filters.items():\r\n if isinstance(val, (list, tuple)):\r\n for v in val:\r\n yield (port.to_b(key), port.to_b(v))\r\n else:\r\n yield (port.to_b(key), port.to_b(val))\r\n\r\n to_encode = tuple(flatten_conditions(filters))\r\n qs = port.urlencode(to_encode)\r\n\r\n url = '{0}/?{1}'.format(self.get_url(), qs)\r\n return http.Request('GET', url), parsers.parse_json", "def doFiltering(self, searchfunc, filters=None):\n F=[]\n for f in self.filters:\n F.append(f.getFilter())\n #print F\n sets = []\n for f in F:\n col, val, op, boolean = f\n names = searchfunc(col, val, op)\n sets.append((set(names), boolean))\n names = sets[0][0]\n for s in sets[1:]:\n b=s[1]\n if b == 'AND':\n names = names & s[0]\n elif b == 'OR':\n names = names | s[0]\n elif b == 'NOT':\n names = names - s[0]\n names = list(names)\n self.updateResults(len(names))\n return names", "def test_get_component_descriptors_by_types_using_get(self):\n pass", "def filter_inspection_type(data, inspection_type):\n return [row for row in data if row['inspection_type'] == inspection_type]", "def find(cls, **filters):\n return cls.query.filter_by(**filters).all()", "def filter_items(self, filter_data: Dict[str, str] = None) -> List[WalletItem]:\n filtered_items = self.items\n for key, value in filter_data.items():\n if key == \"category\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.category, re.IGNORECASE)]\n if key == \"account\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.account, re.IGNORECASE)]\n if key == \"notes\" in filter_data:\n filtered_items = [item for item in filtered_items\n if re.search(value, item.notes, re.IGNORECASE)]\n if key == \"amt_min\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount >= value]\n if key == \"amt_max\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount <= value]\n if key == \"begin_date\":\n try:\n begin_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if begin_date <= item.date]\n except ValueError as ex:\n print(ex)\n exit(1)\n if key == \"end_date\":\n try:\n end_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if item.date <= end_date]\n except ValueError as ex:\n print(ex)\n exit(1)\n return filtered_items" ]
[ "0.7252907", "0.6754801", "0.6730381", "0.66894734", "0.6564939", "0.6288751", "0.6273336", "0.62642753", "0.6115402", "0.5971226", "0.59191287", "0.5848749", "0.58480895", "0.5832433", "0.5759755", "0.5755212", "0.56894547", "0.56826574", "0.5639281", "0.56076664", "0.5592761", "0.55915564", "0.5583345", "0.54409814", "0.5439224", "0.5420898", "0.5412727", "0.5404295", "0.53887784", "0.53765273", "0.5369036", "0.5367627", "0.5361758", "0.53568834", "0.5348402", "0.53368133", "0.5330973", "0.53249484", "0.53220046", "0.5321912", "0.5321202", "0.53184074", "0.531246", "0.5274514", "0.52638537", "0.52604777", "0.5258013", "0.52559274", "0.5255614", "0.52424014", "0.52345604", "0.5222513", "0.5203605", "0.52017033", "0.5200359", "0.51820076", "0.5176559", "0.5176214", "0.5170003", "0.5165079", "0.5160561", "0.5159679", "0.5156709", "0.51528263", "0.51487833", "0.5141232", "0.51384294", "0.5137739", "0.5137098", "0.51285505", "0.51283485", "0.51208675", "0.50999403", "0.50944513", "0.5088926", "0.5088843", "0.50855887", "0.50756794", "0.5074656", "0.50723374", "0.50682783", "0.5067265", "0.50521946", "0.505053", "0.50497985", "0.5049078", "0.50474995", "0.50459003", "0.5045165", "0.50380456", "0.5037114", "0.50220513", "0.5018538", "0.50116086", "0.5011279", "0.5009123", "0.49976364", "0.49874136", "0.49773327", "0.4975905" ]
0.74276376
0
Inform a service component that it is providing a service Called when an immediatelycontaining service manager binds this object to perform the named service.
def bound(name):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service(self, service):\n \n self._service = service", "def register_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_register_service_description()\n self._register(description, \"registering agent's service on the SOEF.\")", "def service(self):\n pass", "def register_service(service, iface, name):", "def _hs_service(self, service_name, address, port, properties):\n identifier = service_name.split(\".\")[0]\n name = properties.get(\"Name\")\n hsgid = properties.get(\"hG\")\n service = conf.DmapService(identifier, hsgid, port=port, properties=properties)\n self._handle_service(address, name, service)", "def on_service_arrival(self, svc_ref):\n with self._lock:\n new_ranking = svc_ref.get_property(SERVICE_RANKING)\n if self._current_ranking is not None:\n if new_ranking > self._current_ranking:\n # New service with better ranking: use it\n self._pending_ref = svc_ref\n old_ref = self.reference\n old_value = self._value\n\n # Clean up like for a departure\n self._current_ranking = None\n self._value = None\n self.reference = None\n\n # Unbind (new binding will be done afterwards)\n self._ipopo_instance.unbind(self, old_value, old_ref)\n else:\n # No ranking yet: inject the service\n self.reference = svc_ref\n self._value = self._context.get_service(svc_ref)\n self._current_ranking = new_ranking\n self._pending_ref = None\n\n self._ipopo_instance.bind(self, self._value, self.reference)", "def service(self) -> BaseService:", "def add_service(self, service):\n self.app.add_service(service)", "def find_service(iface, context, name):", "def register_service(self, service, iface, name=''):\n key = 'component' if not callable(service) else 'factory'\n kwargs = {'provided': iface, key: service, 'name': name}\n self._component.registerUtility(**kwargs)", "def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )", "def service_handler(service):\n entity_id = ENTITY_ID_FORMAT.format(service.service)\n script = component.entities.get(entity_id)\n if script:\n script.turn_on()", "def _bind_to_service(self):\n if self._service_dn:\n # bind with the service_dn\n self._server.simple_bind_s(self._service_dn, self._service_password)\n else:\n # force a connection without binding\n self._server.whoami_s()", "def offerService(self, serviceName):\n service = Service(serviceName)\n self.serviceTable[serviceName] = service\n return service", "def get_service(self):", "def set_service(service_name, reference):\n Container.services[service_name] = reference", "def servicesChanged(self) -> None:\n ...", "def service(self, block, service_name):\n declaration = block.service_declaration(service_name)\n if declaration is None:\n raise NoSuchServiceError(f\"Service {service_name!r} was not requested.\")\n service = self._services.get(service_name)\n if service is None and declaration == \"need\":\n raise NoSuchServiceError(f\"Service {service_name!r} is not available.\")\n return service", "def _remember_service_name(self, event):\n service_name = event[\"arguments\"][\"service_name\"]\n # We've added logging of the service_handle to the API signature in\n # the Monitor, but for backwards compatibility we'll keep it as\n # follows for now.\n service_handle = \"0x%08x\" % event[\"return_value\"]\n self.services[service_handle] = service_name", "def __init__(self, service_name):\n self.service_name = service_name", "def set_service_name(name):\n emit(UPDATE_SERVICE_SIGNAL, BREADCRUMB_SENDER, name=name)", "def register_service(self, service, name):\n assert service._remote_service, \"Services should be decorated correctly.\"\n \n prepare_remote_service(service)\n self._services[name] = service", "def _bind_agent(self, field, service, svc_ref):\n # Tell it to handle remaining components\n service.handle(self._remaining)\n self._remaining.clear()", "def future_supported_service(service_name):\n print('Service {} linked.'.format(service_name))\n pass", "def register_service(self, service: str, cb: Callable, **kwargs: Optional[Any]) -> None:\n self._check_service(service)\n d, s = service.split(\"/\")\n self.logger.debug(\"register_service: %s/%s, %s\", d, s, kwargs)\n\n namespace = self._get_namespace(**kwargs)\n\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n\n kwargs[\"__name\"] = self.name\n\n self.AD.services.register_service(namespace, d, s, cb, __async=\"auto\", **kwargs)", "def register_service(self, name, command):\n service_name = command['service_name']\n try:\n service_type = self.get_interface_type(command['interface_type'], '.srv')\n self.srv_clients[service_name] = self.AsyncServiceProxy(\n self,\n service_name,\n service_type)\n\n if service_name in self.offline_services:\n self.offline_services.remove(service_name)\n except JoyTeleopException:\n if service_name not in self.offline_services:\n self.offline_services.append(service_name)", "def _registerService(self, callerId, service, serviceApi, callerApi):\n if service not in self.FilterServices:\n # The type of the service is not included in the XMLRPC call\n self.__docWriter.addService(callerId, service, \"TODO: type\")", "def startService(self):\n super(MasterService, self).startService()\n self.dispatcher.startDispatching()", "def bind(self, svc, svc_ref):\n with self._lock:\n if ORDER_HANDLER in svc_ref.get_property(pelix.OBJECTCLASS):\n targets = svc_ref.get_property(ORDER_TARGETS)\n if isinstance(targets, (list, tuple)):\n for target in targets:\n self._target_handlers.setdefault(target, []).append(svc)\n\n else:\n self._target_handlers.setdefault(str(targets), []).append(svc)", "def namingService(self):\n return _libSALOME_LifeCycleCORBA.SALOME_LifeCycleCORBA_namingService(self)", "def _call_service(hass, entity, service):\n _LOGGER.debug('Command: %s %s', entity, service)\n hass.services.call(\n 'homeassistant', service, {'entity_id': entity})", "def add_service(self, zeroconf, service_type, name):\n self.pending.add(\n asyncio.ensure_future(self._internal_add(zeroconf, service_type, name))\n )", "def initService(self):", "def add(self, service: AbstractService):\n self.services.append(service)", "def unrecognised_service(service_name):\n print('Service {} not (yet) supported.'.format(service_name))\n pass", "def register_service_agent(cm, sc, conf, rpcmgr):\n\n service_type = lb_const.SERVICE_TYPE\n cm.register_service_agent(service_type, rpcmgr)", "def addService(self, interfaceClass: java.lang.Class, service: object) -> None:\n ...", "def _service_task(self):\n pass", "def service(self, service: IBMExperimentService) -> None:\n self._set_service(service)", "def setService(self, service):\n self.__service = service\n self.__buttons.setDisabled(False)\n self.name.setText(service.data.name)\n self.threadable.setChecked(service.data.threadable)\n self.min_cores.setValue(service.data.min_cores)\n self.max_cores.setValue(service.data.max_cores)\n self.min_memory.setValue(service.data.min_memory // 1024)\n self.min_gpu_memory.setValue(service.data.min_gpu_memory // 1024)\n self._tags_w.set_tags(service.data.tags)\n self.timeout.setValue(service.data.timeout)\n self.timeout_llu.setValue(service.data.timeout_llu)\n self.min_memory_increase.setValue(service.data.min_memory_increase // 1024)\n self.__service = service.data", "def fastlyservice(args):\n pprint(api.service(service_id).attrs)", "async def on_terncy_svc_add(event):\n dev_id = event.data[\"dev_id\"]\n _LOGGER.info(\"found terncy service: %s %s\", dev_id, event.data)\n host = event.data[\"ip\"]\n if dev_id == tern.dev_id and not tern.is_connected():\n tern.host = host\n _LOGGER.info(\"start connection to %s %s\", dev_id, tern.host)\n\n hass.async_create_task(setup_terncy_loop())", "def _get_service(self, service_name):\n if self._service:\n return self._service\n res = self._cc.services().get_by_name(service_name, name='label')\n self._service = res.resource\n return self._service", "def addService(self, service):\n\t\tself.services.append(service)\n\t\treturn self", "def _non_hs_service(self, service_name, address, port, properties):\n identifier = service_name.split(\".\")[0]\n name = properties.get(\"CtlN\")\n service = conf.DmapService(identifier, None, port=port, properties=properties)\n self._handle_service(address, name, service)", "def update_service(self, service_id, service_ref):\n raise exception.NotImplemented() # pragma: no cover", "def save(self):\n if len(str(self.name.text())) < 3:\n QtWidgets.QMessageBox.critical(self, \"Error\",\n \"The service name must be at least 3 characters.\")\n return\n\n if not str(self.name.text()).isalnum():\n QtWidgets.QMessageBox.critical(self, \"Error\", \"The service name must alphanumeric.\")\n return\n\n if self.min_memory_increase.value() <= 0:\n QtWidgets.QMessageBox.critical(self, \"Error\",\n \"The minimum memory increase must be more than 0 MB\")\n return\n\n service = opencue.wrappers.service.Service()\n if self.__service:\n service.data.id = self.__service.data.id\n service.setName(str(self.name.text()))\n service.setThreadable(self.threadable.isChecked())\n service.setMinCores(self.min_cores.value())\n service.setMaxCores(self.max_cores.value())\n service.setMinMemory(self.min_memory.value() * 1024)\n service.setMinGpuMemory(self.min_gpu_memory.value() * 1024)\n service.setTimeout(self.timeout.value())\n service.setTimeoutLLU(self.timeout_llu.value())\n service.setMinMemoryIncrease(self.min_memory_increase.value() * 1024)\n service.setTags(self._tags_w.get_tags())\n\n self.saved.emit(service)", "def enable_service(self, service):\n svc = self.service_path % service\n ret = self.rclient.put(svc)\n if ret.status != restclient.Status.ACCEPTED:\n exception_msg = (_(\"Cannot enable %s service.\") % service)\n raise exception.ShareBackendException(msg=exception_msg)", "def register_service(self, service):\n for message_handler in service.iter_message_handlers():\n self.message_handlers[message_handler.name] = message_handler", "def _register(service, notifier=None):\n\n full_name = service.iden\n slot = service_store[full_name]\n try:\n slot['msg'] = 'Async image creation started'\n slot['stage'] = 2\n service_store[full_name] = slot\n\n service.make_image()\n\n slot['msg'] = 'Image for service created'\n slot['stage'] = 3\n service_store[full_name] = slot\n\n service.start_workers()\n\n slot['msg'] = 'Workers started'\n slot['stage'] = 4\n service_store[full_name] = slot\n\n service.check_health()\n\n slot['msg'] = 'Service ready'\n slot['stage'] = 5\n slot['slot'] = 'ready'\n slot['service'] = service\n service_store[full_name] = slot\n\n result = ok\n data = service\n except Exception as exc:\n slot['msg'] = 'Error: {}'.format(exc)\n slot['slot'] = 'error'\n service_store[full_name] = slot\n\n result = error\n data = str(exc)\n\n if service.notify and notifier is not None:\n notifier(service.notify, result, data)", "def turn_on_service(service):\n # We could turn on script directly here, but we only want to offer\n # one way to do it. Otherwise no easy way to call invocations.\n for script in component.extract_from_service(service):\n turn_on(hass, script.entity_id)", "def service(self):\n return self._service", "def service(self):\n return self._service", "def definition_of_services(self):\r\n return True", "def start_as_service(self):\n from ..program_manager import ProgramManager\n send_action(ProgramManager.NAME, 'start', self.name)", "def hostsplit_service(self):\n self.which_owner()\n self.which_security()\n\n for service, value in self.service_discovery.items():\n self.details[\"services\"][service] = self.which_service(service, **value)", "def setContextAsService(self, contextAsService):\n pass", "def __getitem__(self, name):\r\n return Service(self, name)", "def service_status(self, service_status):\n\n self._service_status = service_status", "def service(self, service_name: str, **kw):\n for (_, name, _) in pkgutil.iter_modules([str(SERVICES_PATH)]):\n if name == service_name:\n log.debug(\"Importing service %s\", name)\n module = import_module(\"..services.{}\".format(name), __name__)\n svc = module.Service(client=self, **kw) # type: ignore\n return svc\n raise ServiceNotFound()", "def svc_provider(self, svc_provider):\n\n self._svc_provider = svc_provider", "def _mrp_service(self, _, address, port, properties):\n identifier = properties.get(\"UniqueIdentifier\")\n name = properties.get(\"Name\")\n service = conf.MrpService(identifier, port, properties=properties)\n self._handle_service(address, name, service)", "def publishService(self, name, stype, port, domain=\"\", host=\"\"):\n if name in self.published:\n return\n\n if not self.bus:\n self.bus = dbus.SystemBus()\n\n server = dbus.Interface(\n self.bus.get_object(\n avahi.DBUS_NAME,\n avahi.DBUS_PATH_SERVER),\n avahi.DBUS_INTERFACE_SERVER)\n\n g = dbus.Interface(\n self.bus.get_object(avahi.DBUS_NAME,\n server.EntryGroupNew()),\n avahi.DBUS_INTERFACE_ENTRY_GROUP)\n\n g.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC,dbus.UInt32(0),\n name, stype, domain, host,\n dbus.UInt16(port), \"\")\n\n g.Commit()\n self.published[name] = g", "def get_service(self):\n return self.__service", "def service_mange(self, room, service):\n self.room[room] = service\n return True", "async def async_service_handler(service):\n _LOGGER.info(\"%s service called\", service.service)\n method = SERVICE_TO_METHOD.get(service.service)\n if not method:\n _LOGGER.warning(\"Unknown service method %s\", service.service)\n return\n\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n component = hass.data.get(SWITCH_DOMAIN)\n if entity_ids:\n target_switches = [component.get_entity(entity) for entity in entity_ids]\n else:\n return\n\n method_name = method[\"method\"]\n _LOGGER.debug(\"Service handler: %s %s\", method_name, params)\n\n for entity in target_switches:\n if not hasattr(entity, method_name):\n _LOGGER.error(\"Service not implemented: %s\", method_name)\n return\n await getattr(entity, method_name)(**params)", "def custom_service_endpoint(self) -> global___Snippet.ClientInitialization.ServiceEndpoint:", "def service_code(self, service_code):\n \n self._service_code = service_code", "def register_service(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def get_service():\n if not hasattr(g, 'service'):\n g.service = Service()\n return g.service", "def test_startService(self):\n port = self.port(description=u'foo')\n port.privilegedStartService()\n self.assertTrue(self._service.privilegedStarted)\n port.startService()\n self.assertTrue(self._service.started)", "def service(self):\n self.serviceConnects()\n self.serviceQueries()", "def on_service_departure(self, svc_ref):\n with self._lock:\n if svc_ref is self.reference:\n # Injected service going away...\n service = self._value\n\n # Clear the instance values\n self._current_ranking = None\n self._value = None\n self.reference = None\n\n if self.requirement.immediate_rebind:\n # Look for a replacement\n self._pending_ref = self._context.get_service_reference(\n self.requirement.specification, self.requirement.filter\n )\n else:\n self._pending_ref = None\n\n self._ipopo_instance.unbind(self, service, svc_ref)", "def test_add_virtual_service(self):\n pass", "def register(service_class, args, namespace, user_code, notifier=None):\n try:\n user = g.user\n except RuntimeError:\n user = 'anonymous'\n service = service_class(\n namespace=namespace, code_dir=user_code,\n users={user: ['POST', 'PUT', 'DELETE']},\n **dict(args))\n try:\n slot = service_store[service.iden]['slot']\n except KeyError:\n slot = 'free'\n\n # make sure to only use free or errored out slots\n if slot not in ('free', 'error'):\n raise APIException(\"service slot not available: {}\\n\"\n \"Current state: {}\"\n .format(service.iden, slot), 400)\n\n service_store[service.iden] = {\n 'slot': 'busy',\n 'msg': 'Empty service created',\n 'stage': 1,\n 'total_stages': 5,\n 'service': None\n }\n\n _async_register(service, notifier)\n return service", "def ServiceRequest(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n pass", "def service_instance(self):\n return self.service_class(self)", "def _spawn_service_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n\n listen_name = get_safe(config, \"process.listen_name\") or process_instance.name\n log.debug(\"Service Process (%s) listen_name: %s\", name, listen_name)\n process_instance._proc_listen_name = listen_name\n\n # Service RPC endpoint\n rsvc1 = self._create_listening_endpoint(node=self.container.node,\n from_name=listen_name,\n process=process_instance)\n # Named local RPC endpoint\n rsvc2 = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n # cleanup method to delete process queue\n cleanup = lambda _: self._cleanup_method(process_instance.id, rsvc2)\n\n # Start an ION process with the right kind of endpoint factory\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=[rsvc1, rsvc2],\n proc_name=process_instance._proc_name,\n cleanup_method=cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_service_process for %s\" % \",\".join((listen_name, process_instance.id)))\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n self._process_init(process_instance)\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n return process_instance", "def get_service(self, service_id):\n raise exception.NotImplemented() # pragma: no cover", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "def saved(self, service):\n if not self.__show:\n msg = QtWidgets.QMessageBox()\n msg.setText(\"You are about to modify a facility wide service configuration. \"\n \"Are you in PSR-Resources?\")\n msg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\n msg.setDefaultButton(QtWidgets.QMessageBox.No)\n if msg.exec_() == QtWidgets.QMessageBox.No:\n return\n\n if self.__new_service:\n if self.__show:\n self.__show.createServiceOverride(service.data)\n else:\n opencue.api.createService(service.data)\n else:\n service.update()\n\n self.refresh()\n self.__new_service = False\n\n for i in range(0, self.__service_list.count()):\n item = self.__service_list.item(i)\n if item:\n if str(item.text()) == service.name():\n self.__service_list.setCurrentRow(i, QtCore.QItemSelectionModel.Select)\n break", "def _call_service(self, action):\n conf_service = action.get(CONF_SERVICE, action.get(CONF_SERVICE_OLD))\n self._last_action = action.get(CONF_ALIAS, conf_service)\n _LOGGER.info(\"Executing script %s step %s\", self._name,\n self._last_action)\n domain, service = split_entity_id(conf_service)\n data = action.get(CONF_SERVICE_DATA, {})\n self.hass.services.call(domain, service, data)", "def _patch(self, _) -> None:\n if not self.charm.unit.is_leader():\n return\n\n client = Client()\n try:\n if self.service_name != self._app:\n self._delete_and_create_service(client)\n client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)\n except ApiError as e:\n if e.status.code == 403:\n logger.error(\"Kubernetes service patch failed: `juju trust` this application.\")\n else:\n logger.error(\"Kubernetes service patch failed: %s\", str(e))\n else:\n logger.info(\"Kubernetes service '%s' patched successfully\", self._app)", "def services(self, services):\n\n self._services = services", "def services(self, services):\n\n self._services = services", "def test_modify_virtual_service(self):\n pass", "def services(**kwargs):\n pass", "def register(self, service_name, service_addr, service_ttl):\n raise NotImplementedError", "def service_bus_server():\n pass", "def __init__(self, task_id: int) -> None:\n BaseModifierHandler.__init__(self, task_id, HandlerNames.enable_service)", "async def reload_service_handler(service: ServiceCall) -> None:\n auto = [e for e in component.entities if not e.user_defined]\n\n if (conf := await component.async_prepare_reload()) is None:\n return\n await _async_process_config(hass, conf)\n\n await component.async_add_entities(auto)\n\n await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)", "def addServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...", "def with_service_binding(self, alias: str, service: \"Container\") -> \"Container\":\n _args = [\n Arg(\"alias\", alias),\n Arg(\"service\", service),\n ]\n _ctx = self._select(\"withServiceBinding\", _args)\n return Container(_ctx)", "async def async_service_handler(service: ServiceCall) -> None:\n api_endpoint = MAP_SERVICE_API[service.service]\n\n data = service.data.copy()\n addon = data.pop(ATTR_ADDON, None)\n slug = data.pop(ATTR_SLUG, None)\n payload = None\n\n # Pass data to Hass.io API\n if service.service == SERVICE_ADDON_STDIN:\n payload = data[ATTR_INPUT]\n elif api_endpoint.pass_data:\n payload = data\n\n # Call API\n # The exceptions are logged properly in hassio.send_command\n with suppress(HassioAPIError):\n await hassio.send_command(\n api_endpoint.command.format(addon=addon, slug=slug),\n payload=payload,\n timeout=api_endpoint.timeout,\n )", "def resolve_service(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def service():\n conf = template('remote/addok.service', **config)\n put(conf, '/etc/systemd/system/addok.service')\n systemctl('enable addok.service')", "def pre_service_instance_update(self, resource_id, resource_dict):\n pass", "def print_service_available():\n if WithingsDataManager.service_available is not True:\n _LOGGER.info(\"Looks like the service is available again\")\n WithingsDataManager.service_available = True\n return True", "def startService(self):\n self.world.start()", "async def async_service_handler(service):\n api_command = MAP_SERVICE_API[service.service][0]\n data = service.data.copy()\n addon = data.pop(ATTR_ADDON, None)\n snapshot = data.pop(ATTR_SNAPSHOT, None)\n payload = None\n\n # Pass data to Opp.io API\n if service.service == SERVICE_ADDON_STDIN:\n payload = data[ATTR_INPUT]\n elif MAP_SERVICE_API[service.service][3]:\n payload = data\n\n # Call API\n try:\n await oppio.send_command(\n api_command.format(addon=addon, snapshot=snapshot),\n payload=payload,\n timeout=MAP_SERVICE_API[service.service][2],\n )\n except OppioAPIError as err:\n _LOGGER.error(\"Error on Opp.io API: %s\", err)", "def test_service(self):\n port = self.port(store=self.store)\n installOn(port, self.store)\n\n self.assertEqual(\n list(self.store.powerupsFor(IService)),\n [port])" ]
[ "0.6836905", "0.6561478", "0.63746643", "0.62869096", "0.61396533", "0.61276996", "0.6079517", "0.6064858", "0.60322297", "0.60286254", "0.6019076", "0.6012391", "0.60076535", "0.5998224", "0.5997815", "0.5986478", "0.5975217", "0.59336156", "0.59249896", "0.5881597", "0.58615196", "0.58580655", "0.58483875", "0.5814194", "0.581376", "0.5797374", "0.57731545", "0.5772075", "0.5763532", "0.575385", "0.573631", "0.57260245", "0.57047886", "0.56880826", "0.5669847", "0.56584334", "0.5648049", "0.5631191", "0.5621488", "0.55993927", "0.55987304", "0.55738884", "0.55668145", "0.5555359", "0.55510336", "0.5521098", "0.5519171", "0.55183053", "0.5516355", "0.5514428", "0.5505173", "0.54962254", "0.54962254", "0.54943997", "0.54929405", "0.5469701", "0.54659134", "0.5460516", "0.5458643", "0.5457313", "0.54508376", "0.54308", "0.5421372", "0.54053247", "0.53941655", "0.5388375", "0.53879446", "0.5384783", "0.5382371", "0.5381731", "0.53777194", "0.53759074", "0.53754103", "0.53684586", "0.5359594", "0.5352799", "0.53446585", "0.5341798", "0.53377676", "0.53373003", "0.53293157", "0.53075236", "0.5304401", "0.52977943", "0.52977943", "0.528649", "0.52824956", "0.5282316", "0.52672535", "0.52660793", "0.5265604", "0.52650285", "0.52617615", "0.52342385", "0.5232075", "0.522601", "0.5205359", "0.52045465", "0.51994085", "0.51990664", "0.519834" ]
0.0
-1
Inform a service component that it is no longer providing a service Called when an immediatelycontaining service manager unbinds this object from performing the named service.
def unbound(name):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_service_departure(self, svc_ref):\n with self._lock:\n if svc_ref is self.reference:\n # Injected service going away...\n service = self._value\n\n # Clear the instance values\n self._current_ranking = None\n self._value = None\n self.reference = None\n\n if self.requirement.immediate_rebind:\n # Look for a replacement\n self._pending_ref = self._context.get_service_reference(\n self.requirement.specification, self.requirement.filter\n )\n else:\n self._pending_ref = None\n\n self._ipopo_instance.unbind(self, service, svc_ref)", "def _unregister_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_unregister_service_description()\n oef_search_dialogues = cast(\n OefSearchDialogues, self.context.oef_search_dialogues\n )\n oef_search_msg, _ = oef_search_dialogues.create(\n counterparty=self.context.search_service_address,\n performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,\n service_description=description,\n )\n self.context.outbox.put_message(message=oef_search_msg)\n self.context.logger.info(\"unregistering service from SOEF.\")", "def unregister(self, service_name, service_addr):\n raise NotImplementedError", "def remove_service(self, zeroconf, service_type, name):", "def disownService(self, name):\r\n _service = self.hendrix.getServiceNamed(name)\r\n _service.disownServiceParent()\r\n return _service.factory", "def unregister_service(self, name):\n self._services.remove(name)", "def unregister_service(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def stop_service(self):\n\n logger = logging.getLogger(self.dkr_name)\n logger.info(\"Tearing down service\")\n\n try:\n self.dkr_service.remove()\n except:\n logging.warning(\"Failed to stop service {}\".format(self.dkr_name))\n pass", "def unpublishService(self, name):\n self.published[name].Reset()\n del self.published[name]", "def deregisterService(self, serviceName):\n res = internals.blpapi_ProviderSession_deregisterService(\n self.__handle, serviceName)\n return res == 0", "def unbind(self, svc, svc_ref):\n with self._lock:\n if ORDER_HANDLER in svc_ref.get_property(pelix.OBJECTCLASS):\n targets = svc_ref.get_property(ORDER_TARGETS)\n if isinstance(targets, (list, tuple)):\n for target in targets:\n associates = self._target_handlers.get(target, None)\n if svc in associates:\n del associates[svc]\n\n else:\n associates = self._target_handlers.get(str(targets), None)\n if svc in associates:\n del associates[svc]", "def disable(self):\n self.registrar.unregister_service(\"say\", namespace=__name__)", "async def async_will_remove_from_hass(self):\n await super().async_will_remove_from_hass()\n for service in self._device.device_services:\n service.unsubscribe_callback(self.entity_id)", "def unregister(self, name):\r\n\r\n if name in self.components:\r\n logger.debug(\"Unregistering Component: %s\", name)\r\n self.stop([name])\r\n del self.components[name]", "def removeServiceListener(self, listener: ghidra.framework.plugintool.util.ServiceListener) -> None:\n ...", "def unregister(self, name):\r\n raise NotImplementedError", "def stopService(self):\n connectionService = self.getServiceNamed(self.connectionServiceName)\n # Note: removeService() also calls stopService()\n yield self.removeService(connectionService)\n # At this point, all outstanding requests have been responded to\n yield super(CalDAVService, self).stopService()\n self.logObserver.stop()", "def stopService(self):\n self.world.stop()", "def deregister(self, service_id):\n return self.agent.http.get(\n lambda x: x.code == 200,\n '/v1/agent/service/deregister/%s' % service_id)", "async def async_will_remove_from_hass(self) -> None:\n self._nobo.deregister_callback(self._after_update)", "def stop(self, context):\n # Unregister the service\n self.__registration.unregister()\n self.__registration = None", "def unbind(cls, name: str):\n if cls.instance() is None:\n return\n\n if not name in cls.instance().m_axis_bindings and not name in cls.instance().m_button_bindings:\n print( 'Unable to unbind: {}. Name not bound to axis or button.'.format( name ) )\n return\n\n if name in cls.instance().m_axis_bindings:\n axis = cls.instance().m_axis_bindings[ name ].axis\n del cls.instance().m_axis_bindings[ name ]\n del cls.instance().m_axis_name_table[ axis ]\n if name in cls.instance().m_button_bindings:\n button = cls.instance().m_button_bindings[ name ].button\n del cls.instance().m_button_bindings[ name ]\n del cls.instance().m_button_name_table[ button ]", "async def _async_unsubscribe_service(self, sid: str) -> None:\n assert self._event_handler\n\n try:\n await self._event_handler.async_unsubscribe(sid)\n except UpnpError as err:\n _LOGGER.debug(\"Failed unsubscribing from: %s, reason: %r\", sid, err)\n except KeyError:\n _LOGGER.warning(\n \"%s was already unsubscribed. AiohttpNotifyServer was \"\n \"probably stopped before we could unsubscribe.\",\n sid,\n )", "def removeService(self, interfaceClass: java.lang.Class, service: object) -> None:\n ...", "def unregister(self, target, hostname, listener_type):", "def __del__(self):\n\n if hasattr(self, '_socket') and self._socket is not None:\n try:\n self.unbind()\n except (exceptions.PDUError, exceptions.ConnectionError) as error:\n if len(getattr(error, 'args', tuple())) > 1:\n logging.warning('({0}) {1}. Ignored'.format(error.args[1], error.args[0]))\n else:\n logging.warning('{error}. Ignored'.format(error=error))\n self.disconnect()", "async def unlistened(self, value=None):\n pass", "def unsubscribe(self):\n pass # pragma: no cover", "def at_removed(self, host):\n if host != self.host:\n raise ComponentRegisterError(\"Component attempted to remove from the wrong host.\")\n self.host = None", "async def on_terncy_svc_remove(event):\n dev_id = event.data[\"dev_id\"]\n _LOGGER.info(\"terncy svc remove %s\", dev_id)\n if not tern.is_connected():\n await tern.stop()", "def unbind_receive_notification(self, mtype, declare=True):\n if self._callable:\n del self._notification_bindings[mtype]\n if declare:\n self._declare_subscriptions()\n else:\n raise SAMPClientError(\"Client not callable.\")", "def deregister(self):\n self.callback = None", "def unlisten(cls, name: str):\r\n cls.Unlisten(name)", "def unsubscribe(self):\r\n self._unregister()", "def unbind_receive_response(self, msg_tag):\n if self._callable:\n del self._response_bindings[msg_tag]\n else:\n raise SAMPClientError(\"Client not callable.\")", "def stopService(self):\n super(_SiteScheduler, self).stopService()\n if self.timer is not None:\n self.timer.cancel()\n self.timer = None", "def stop(self):\n rospy.wait_for_service(self._service_name)\n try:\n trigger = rospy.ServiceProxy(self._service_name, Trigger)\n resp = trigger(on=False)\n return resp.success, resp.message\n except rospy.ServiceException as e:\n rospy.logerr('%s service call failed: %s' % (self, e))", "def stop(name):\n __salt__[\"file.touch\"](\"{}/down\".format(_service_path(name)))\n cmd = \"svc -d {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)", "def unpause_physics_service(self):\n return self._unpause_physics", "def unsubscribe(self, name, callback_function):\n # Remove the callback from _callbacks.\n if self._callbacks.has_key(name):\n if callback_function in self._callbacks[name]:\n self._callbacks[name].remove(callback_function)\n if len(self._callbacks[name]) == 0:\n self._callbacks.pop(name)\n else:\n raise PresenceException('This function is not registered to receive callbacks.')\n else:\n raise PresenceException('Unknown service name. No callback handler exists.')", "def _async_device_unavailable(\n _service_info: bluetooth.BluetoothServiceInfoBleak,\n ) -> None:\n push_lock.reset_advertisement_state()", "def unregister(self, service_name, service_addr, addr_cls=None):\n addr_cls = addr_cls or PlainAddress\n etcd_delete = True\n if addr_cls != PlainAddress:\n etcd_delete = False\n\n for service_name in service_name:\n key = self._form_service_key(service_name, service_addr)\n if etcd_delete:\n self._client.delete(key)\n else:\n self._client.put(addr_cls(service_addr).delete_value())\n\n self._services.get(service_addr, {}).discard(service_name)", "def _unregister(self):\r\n if hasattr(self, '_registered') and self._registered:\r\n self._conn.unregisterInterface(self._iTag, self)\r\n self._registered = False", "def stop_notify(self):\n raise NotImplementedError", "def StopControlService(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def unsubscribe(self, item_name):\n self.subscribed = None", "def unbind_receive_call(self, mtype, declare=True):\n if self._callable:\n del self._call_bindings[mtype]\n if declare:\n self._declare_subscriptions()\n else:\n raise SAMPClientError(\"Client not callable.\")", "def deregister_service(self, service: str, **kwargs: Optional[Any]) -> bool:\n self._check_service(service)\n d, s = service.split(\"/\")\n self.logger.debug(\"deregister_service: %s/%s, %s\", d, s, kwargs)\n\n namespace = self._get_namespace(**kwargs)\n\n if \"namespace\" in kwargs:\n del kwargs[\"namespace\"]\n\n kwargs[\"__name\"] = self.name\n\n return self.AD.services.deregister_service(namespace, d, s, **kwargs)", "def stop_service(dauth_directory: DauthDirectoryConnection) -> None:\n print(dauth_directory.stop_service())", "def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]", "def _non_hs_service(self, service_name, address, port, properties):\n identifier = service_name.split(\".\")[0]\n name = properties.get(\"CtlN\")\n service = conf.DmapService(identifier, None, port=port, properties=properties)\n self._handle_service(address, name, service)", "def __del__(self):\n self.unsubscribe()", "def stop(self):\n for service_id in self.keys():\n self[service_id].stop()\n del self[service_id]\n\n self._stopped = True", "def unregister(self, old):\n raise NotImplementedError", "def unregister(self):\r\n self._unregister()", "def _unregister_agent(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_location_description()\n oef_search_dialogues = cast(\n OefSearchDialogues, self.context.oef_search_dialogues\n )\n oef_search_msg, _ = oef_search_dialogues.create(\n counterparty=self.context.search_service_address,\n performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,\n service_description=description,\n )\n self.context.outbox.put_message(message=oef_search_msg)\n self.context.logger.info(\"unregistering agent from SOEF.\")", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def unsubscribe(receiver):", "def service_absent(name, profile=None, **connection_args):\n ret = {\n \"name\": name,\n \"changes\": {},\n \"result\": True,\n \"comment\": 'Service \"{}\" is already absent'.format(name),\n }\n\n # Check if service is present\n role = __salt__[\"keystone.service_get\"](\n name=name, profile=profile, **connection_args\n )\n if \"Error\" not in role:\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"comment\"] = 'Service \"{}\" will be deleted'.format(name)\n return ret\n # Delete service\n __salt__[\"keystone.service_delete\"](\n name=name, profile=profile, **connection_args\n )\n ret[\"comment\"] = 'Service \"{}\" has been deleted'.format(name)\n ret[\"changes\"][\"Service\"] = \"Deleted\"\n\n return ret", "def _unbind_event(\r\n self, handler: Handler, event_type: EventType,\r\n handlers_dict: Dict[str, HandlerData]) -> None:\r\n from apysc.event.handler import append_unbinding_expression\r\n from apysc.event.handler import get_handler_name\r\n self_instance: VariableNameInterface = \\\r\n self._validate_self_is_variable_name_interface()\r\n name: str = get_handler_name(handler=handler, instance=self)\r\n if name in handlers_dict:\r\n del handlers_dict[name]\r\n append_unbinding_expression(\r\n this=self_instance, handler_name=name,\r\n event_type=event_type)", "def disable(self):\n self.registrar.unregister_service(\"map\", namespace=__name__)\n self.registrar.unregister_service(\"directions\", namespace=__name__)", "def stop_subscription(self, event):\r\n _LOGGER.info(\"Shutting down subscriptions\")\r\n asyncio.ensure_future(self.service_panel_stop(event), loop=self.hass.loop)", "def unregister(target: str) -> bool:\n ...", "def unsubscribe(observer):", "def unsubscribe(observer):", "def stop_subscription(self, event):\r\n _LOGGER.debug(\"Shutting down subscriptions\")\r\n asyncio.ensure_future(self.service_panel_stop(event), loop=self.hass.loop)", "def unsubscribe(self, observer, name=None):\n if name is None:\n name = 'default'\n if observer in self._observers:\n del self._observers[observer][name]", "def unregister(self, old):\n if old is not None and old is not Uninitialized:\n try:\n active = self.active.pop(old, None)\n if active is not None:\n for name, type in active:\n getattr(self, type)(old, name, True)\n except TypeError:\n # An error can occur if 'old' is a list or other object for\n # which a weakref cannot be created and used an a key for\n # 'self.active':\n pass", "async def on_disconnected(self):\n self._connected = False\n self._connectedToBroker = False", "def consul_deregister(self):\n try:\n if self.svc_name not in self.consul.agent.services():\n return\n self.log.info(\"consul-deregister\")\n self.consul.agent.service.deregister(\"qemu-{}\".format(self.name))\n except requests.exceptions.ConnectionError:\n pass\n except Exception:\n self.log.exception(\"consul-deregister-failed\", exc_info=True)", "def Stop(self):\n self.stopping = True\n service_names = umpire_service.GetAllServiceNames()\n deferred = self.StopServices(service_names)\n deferred.addBoth(lambda _: reactor.stop())\n return deferred", "def unbind_class(self, className, sequence):\n return super().unbind_class(className, sequence)", "def remote_destroy(self):\r\n # TODO: WHY ???\r\n if not self._owner:\r\n return\r\n\r\n self.stop()\r\n\r\n if self._owner:\r\n self._owner.unregisterInterface(self)\r\n self._owner = None", "async def async_will_remove_from_hass(self) -> None:\n self._disconnect_dispatcher()", "def unregisterStatusListener(self, cb):\r\n self._statusListener.discard(cb)", "def unlock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')", "def unregisterSimulationEvent(self, handle):\r\n raise NotImplementedError()", "def removeStatusChangeListener(self, handle):\n pass", "def test_disownServiceParent(self):\n port = self.port(store=self.store)\n port.setServiceParent(self.store)\n port.disownServiceParent()\n self.failIfIn(port, list(IService(self.store)))", "def destroy_service(\n self,\n service_name,\n manager_name,\n ):\n # Gets the node IP address.\n ip = self.get_node_ip(manager_name)\n\n ssh_username = self.get_ssh_username(manager_name)\n ssh_private_key_file = self.get_ssh_private_key_file(manager_name)\n\n # Creates the service.\n docker_utils.service_destroy(\n name=service_name,\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=ssh_username,\n ssh_private_key_file=ssh_private_key_file,\n executor=manager_name,\n logger=self._logger,\n )\n\n # Waits until all the replicas are not running anymore.\n while True:\n count = docker_utils.service_count_running(\n name=service_name,\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=ssh_username,\n ssh_private_key_file=ssh_private_key_file,\n executor=manager_name,\n logger=self._logger,\n )\n if count == 0:\n break\n time.sleep(1)", "async def async_will_remove_from_hass(self):\n self._unsub_dispatcher()", "def deregister(self, srvurl, callback = None, cbdata = None):\n cb = callback\n if not callback:\n cb = self.__errcb\n cbdata = [ SLPError.SLP_OK ]\n err = self.slph.deregister(srvurl, cb, cbdata)\n if err != SLPError.SLP_OK:\n raise SLPError(err)\n if not callback:\n if cbdata[0] != SLPError.SLP_OK:\n raise SLPError(cbdata[0])", "def InterfaceRemoved(self, interface_name):\n pass", "def unsetComponent(self):\n return _libsbml.OutwardBindingSite_unsetComponent(self)", "def off(self) -> bool:\n off_cmd = HomeAssistantPlugin.service_map[self.domain.lower()][\"off\"]\n return self.send(off_cmd)", "def __del__(self):\n\t\trospy.logdebug('MAVROSListener destruction')\n\t\t\n\t\tfor sub in self.__subs.values():\n\t\t\tsub.unregister()", "def stop_app(self, name, stateless):\n raise NotImplementedError", "def remove(self, service):\n os.remove(os.path.join(self.directory, service))", "def destroy(self, name):\n self._assert_space()\n\n service_instance = self._get_service_instance(name)\n if service_instance:\n lastop = service_instance.last_operation\n if 'delete' == lastop['type']:\n return service_instance\n return self._cc \\\n .service_instances(service_instance.guid) \\\n .set_query(accepts_incomplete='true') \\\n .delete()\n return None", "def __del__(self):\r\n self.debug(\"%s unloaded\" % self.name)", "async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)", "def command_stop_tracking(self, active_command):\n\n # Stop the service if it is running\n if self.driver._aprs_service._tracking_update_loop is not None and self.driver._aprs_service._tracking_update_loop.running:\n self.driver._aprs_service._tracking_update_loop.stop()\n return {'message': \"The balloon tracker has been stopped.\"}\n else:\n raise command.CommandError(\"The balloon tracker is not currently running.\")", "def stop_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"stop\", service_name])", "def unbind(self):\n if not self.isBound():\n return defer.fail(SMPPClientSessionStateError('unbind called with illegal session state: %s' % self.sessionState))\n\n self.cancelEnquireLinkTimer()\n \n self.log.info('Waiting for in-progress transactions to finish...')\n \n #Signal that\n # - no new data requests should be sent\n # - no new incoming data requests should be accepted\n self.sessionState = SMPPSessionStates.UNBIND_PENDING\n \n unbindDeferred = defer.Deferred()\n #Wait for any in-progress txns to finish\n self.finishTxns().addCallback(self.unbindAfterInProgressTxnsFinished, unbindDeferred)\n #Result is the deferred for the unbind txn\n return unbindDeferred", "def stopService(self):\n self.stopping = True\n self.deferreds = {}\n for name in self.processes:\n self.deferreds[name] = Deferred()\n super(DelayedStartupProcessMonitor, self).stopService()\n\n # Cancel any outstanding restarts\n for name, delayedCall in self.restart.items():\n if delayedCall.active():\n delayedCall.cancel()\n\n # Stop processes in the reverse order from which they were added and\n # started\n for name in reversed(self.processes):\n self.stopProcess(name)\n return gatherResults(self.deferreds.values())", "def __del__(self):\n try:\n pybullet.disconnect(physicsClientId=self._client)\n except pybullet.error:\n pass", "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "def removeOutputBinding(self, factory, product):\n # remove the {product} monitor from my pile of observers\n self.removeObserver(observer=product.pyre_status)\n # and chain up\n return super().removeOutputBinding(factory=factory, product=product)" ]
[ "0.770065", "0.717073", "0.69855356", "0.6912879", "0.6827076", "0.67656344", "0.66443497", "0.64091897", "0.6407706", "0.63915473", "0.6383925", "0.63309795", "0.6218311", "0.61347353", "0.612612", "0.6044134", "0.60432863", "0.60367554", "0.60173887", "0.60062134", "0.6004882", "0.5990229", "0.59433055", "0.59324473", "0.5892025", "0.5891285", "0.5877299", "0.58352846", "0.5835083", "0.5834169", "0.5763959", "0.5755869", "0.57513165", "0.57462406", "0.57456535", "0.57456404", "0.57309043", "0.57172596", "0.57052815", "0.5688197", "0.5667816", "0.5651024", "0.5632087", "0.5625519", "0.5610389", "0.56067765", "0.5557667", "0.5552902", "0.55526894", "0.5552331", "0.5539991", "0.5533126", "0.55148786", "0.55121666", "0.55112004", "0.5496601", "0.5489712", "0.5489712", "0.5489712", "0.5489712", "0.5489712", "0.54795724", "0.5472149", "0.54655147", "0.54620486", "0.54611266", "0.54554844", "0.54554844", "0.54489565", "0.5437285", "0.5429683", "0.54286236", "0.5417148", "0.54151756", "0.5405261", "0.5401539", "0.5398673", "0.5396145", "0.5388218", "0.53864074", "0.53797966", "0.53764904", "0.53750396", "0.5370836", "0.5367572", "0.536714", "0.5365801", "0.5363025", "0.535839", "0.53568864", "0.5355594", "0.5341265", "0.5340231", "0.533058", "0.53282213", "0.5324742", "0.5317473", "0.53165543", "0.5309545", "0.53087085", "0.5306934" ]
0.0
-1
checkKey is used to check for authentication
def checkKey(self): # TO DO for checking API authentication if self.apikey is None: return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_auth_publickey(self, username, key):\n return AUTH_FAILED", "def _check_key(self, key):\n raise NotImplementedError", "def api_key_check():\n req_path = request.path\n method_type = request.method\n app.logger.info(\">>> path = {}, method = {}\".format(req_path, method_type))\n\n if not app_props.api_key_check:\n app.logger.debug('>>> api key check closed')\n return None\n\n if req_path in app_props.api_key_white_list:\n app.logger.info('>>> {} in white list, pass'.format(req_path))\n return None\n headers = request.headers\n api_key_from_req = headers.get('x-api-key')\n if not api_key_from_req:\n app.logger.debug('>>> enter api-key error')\n return resp_json(BaseResp.err('no x-api-key header'))\n\n key_obj = Key.query.filter_by(api_key=api_key_from_req).first()\n if key_obj:\n app.logger.debug('>>> consumer_id = {}, secret_key = {}'.format(key_obj.consumer_id, key_obj.secret_key))\n g.consumer_id = key_obj.consumer_id\n g.secret_key = key_obj.secret_key\n return None\n else:\n return resp_json(BaseResp.err('Err api key'))", "def isValidKey(key):\n return True", "def check_auth():", "def check_ssh_key(self):\n return True", "def check_keys(self):", "def check_key(request):\n try:\n access_key = request.session.get('access_key_tw', None)\n if not access_key:\n return False\n except KeyError:\n return False\n return True\n\n\t# User info", "def check_key(cb):\n\n def funcn(*args, **kwargs):\n if 'key' not in kwargs:\n fail(REASON_NO_PASSKEY)\n key = kwargs['key']\n del kwargs['key']\n kwargs['user'] = STORAGE.lookup_user(key)\n if kwargs['user'].is_anonymous:\n fail(REASON_BAD_PASSKEY)\n return cb(*args, **kwargs)\n\n return funcn", "def get_key(self, user, api_key):\n return True", "def remote_verifyKey(self, key, protocol):\r\n if self._authenticated.called:\r\n return Failure(InvalidKey('Only one guess is possible.'))\r\n\r\n if isinstance(protocol, Failure):\r\n self._authenticated.errback(protocol)\r\n else:\r\n if self._key != key:\r\n e = Failure(InvalidKey('Wrong key supplied.'))\r\n self._authenticated.errback(e)\r\n return e\r\n\r\n self._authenticated.callback(protocol)", "def check_key(request):\n\ttry:\n\t\taccess_key = request.session.get('access_key_tw', None)\n\t\tif not access_key:\n\t\t\treturn False\n\texcept KeyError:\n\t\treturn False\n\treturn True", "def get_key(self, user, api_key):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n ApiKey.objects.get(user=user, key=api_key)\r\n except ApiKey.DoesNotExist:\r\n return self._unauthorized()\r\n\r\n return True", "def test_validate_api_key(app, seed_data, key, result):\n user_id, api_key = seed_data\n if key == 'use-valid-key':\n key = api_key\n with app.app_context():\n assert auth.validate_api_key(user_id, key) == result", "def verify_key(self, providerkey = None):\n h = Https(API_DOMAIN)\n\n data = {'apikey' : self.apikey}\n\n if providerkey is not None:\n data['providerkey'] = providerkey\n\n h.request( \"GET\",\n \"/publicapi/verify\"+ urlencode(data),\n headers=self.headers)\n\n request_status = h.getresponse().status\n\n if request_status != 200:\n raise Exception(\"Invalid API Key %s\" % self.apikey)", "def check_api_key(x_api_key: str = Security(api_key_header_auth)):\n\n if x_api_key != API_KEY:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid API Key\",\n )", "def verify_auth_key(cls, auth_key):\n key = ObjectId(auth_key)\n db = cls.mongo_cli.get_database(collection=\"users\")\n if db.count({\"_id\": key}) > 0:\n return True\n return False", "def check_empty_key(self, key):\n if key is None or key == \"\" or key == self.empty_api_key:\n print(\"ERROR, A KEY IS EMPTY - CHECK YOUR FILE\")\n return False\n return True", "def check_api(submitted_key, users_key):\r\n if users_key != submitted_key:\r\n return False\r\n else:\r\n return True", "def keyIsValid(key):\n\n isValid = 1\n \n try:\n temp = getParam(key)\n\n except ValueError:\n isValid = 0\n warning(\" WARNING: %s not set\" % (key))\n\n return isValid", "def checkKeys( ):\n\n if (HMACKey is None) or (AESKey is None):\n loadKeys()\n\n if (int(time.time()) - creationTime) > const.KEY_ROTATION_TIME:\n rotateKeys()", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def test_api_key (key):\n\tdb = getattr(g,'db', None)\n\n\tif isinstance(key, unicode):\n\t\tkey = key.encode('utf-8')\n\n\tqry = \"SELECT apikey FROM api_keys WHERE apikey=%s;\"\n\twith db as cur:\n\t\treturn 0 < cur.execute(qry, (key,))", "def valid_key(self): \n self.so.ValidKey.restype = c_bool\n result = self.so.ValidKey()\n return result", "def test_validate_yubikey(self):\n from_key = self.yk_rnd.from_key(self.yk_public_id, self.yk_key)\n self.assertTrue(pyhsm.yubikey.validate_yubikey_with_aead( \\\n self.hsm, from_key, self.aead.data, self.kh_validate))", "def _check_auth(self, group_id):\n return", "def execute_request(self, request: Request) -> bool:\r\n print(\"Handler is validating key\")\r\n if request.key is not None:\r\n if not self.next_handler:\r\n return True\r\n return self.next_handler.execute_request(request)\r\n else:\r\n print(\"Key is not valid\")\r\n return False", "def check_key(key):\n # Get config\n line = getfromconfig()\n\t\n # Open a new connection\n conn = my.MySQLConnection(line[0], line[1], line[2], line[3])\n set_of_tables = ['A']\n\n # Go through each table\n for table in set_of_tables:\n c = my.radiogaga_db_get(conn, table, {'ukey': key})\n if len(c) > 0:\n return(1)\n\n # End connection and return answer\n conn.end_connection()\n return(0)", "def _key_to_key_verify(self):\n params = {\n 'host': self._redis_host,\n 'port': self._redis_port,\n 'db': self._from_db,\n 'password': self._password\n }\n client = RedisPool(**params)\n proxy_list = client.get_all()\n if proxy_list:\n self._loop.run_until_complete(self.verify(proxy_list))\n self._update(client)", "def verify_request_session_key(self, key, uuid):\n return self.compute_request_session_key(uuid) == key", "def get_key_input():\n return get_input(message='Please enter your master key:',\n secure=True, check_timer=False)", "def check_key(self, key, key_pkl):\r\n start_time = time.time()\r\n # Verify that when we reload the KeyData from the pickled file, the\r\n # same key can be found in it, and is not equal to more than one\r\n # other key.\r\n key_data = cPickle.load(open(key_pkl, 'rb'))\r\n found = sum(key == other_key for other_key in key_data.keys)\r\n msg = ''\r\n if found == 0:\r\n msg = 'Key not found in unpickled KeyData file'\r\n if key_data.keys:\r\n # This is to make debugging in pdb easier, by providing\r\n # the offending keys in the local context.\r\n # key_data_keys = list(key_data.keys)\r\n ## import pdb; pdb.set_trace()\r\n pass\r\n elif found > 1:\r\n msg = 'Multiple equal keys found in unpickled KeyData file'\r\n if msg:\r\n raise AssertionError(\r\n \"%s. Verify the __eq__ and __hash__ functions of your \"\r\n \"Ops. The file is: %s. The key is: %s\" %\r\n (msg, key_pkl, key))\r\n # Also verify that there exists no other loaded key that would be equal\r\n # to this key. In order to speed things up, we only compare to keys\r\n # with the same version part and config md5, since we can assume this\r\n # part of the key is not broken.\r\n for other in self.similar_keys.get(get_safe_part(key), []):\r\n if other is not key and other == key and hash(other) != hash(key):\r\n raise AssertionError(\r\n \"Found two keys that are equal but have a different hash. \"\r\n \"Verify the __eq__ and __hash__ functions of your Ops. \"\r\n \"The keys are:\\n %s\\nand\\n %s\\n(found in %s).\" %\r\n (other, key, key_pkl))\r\n\r\n self.time_spent_in_check_key += time.time() - start_time", "def test_valid_keys(client):\n response=client.post(\"/signin\",data=dict(username=TestSignin.email, password=TestSignin.password), content_type=\"multipart/form-data\")\n data=json.loads(response.data)\n assert response.status_code==400\n assert data[\"error\"] == \"Please provide email and password as keys\"", "def test_api_key(self):\n self.assertEqual(self.route4me.key, '11111111111111111111111111111111')", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def private_key(self):", "def validate_key(key):\r\n try:\r\n secret.Secret(key)\r\n except secret.Secret.InvalidSecret as e:\r\n raise KeyIsInvalid(e.message)", "def __contains__(self, key):\n return self.cli.passwords.contains(key)", "def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...", "def authenticate(self, request=None):\r\n try:\r\n token = request.META.get('HTTP_AUTHORIZATION') or request.REQUEST['key']\r\n accesskey = AccessKey.objects.select_related('user').get(key=token)\r\n request.user = accesskey.user\r\n return request.user and request.user.is_active\r\n\r\n except(KeyError, AccessKey.DoesNotExist):\r\n return False", "def chk_key(chkname):\n return CHK_PREFIX + chkname", "def match_api_keys(key, ip):", "def is_valid_key(key, crypt_method):\n logger.info(f\"key: {key}, crypt_method: {crypt_method}\")\n if crypt_method == 'C':\n while type(key) is not int or key not in range(0, 95):\n try:\n key = Check.is_integer(key)[1]\n if key not in range(0, 95):\n raise ValueError\n except (TypeError, ValueError):\n print(\"You must enter an integer between 1 and 95!\")\n key = input(\"Enter an encryption key\\n>> \")\n elif crypt_method in ('M', 'P'):\n pass\n else:\n return False\n return True, key", "def userkey(hash):\n user = hl.getUser(\"Name\",session['name'])\n flagCheck = hl.checkDistributeFlag(user[\"Name\"])\n if flagCheck == False:\n return getKeys()\n elif flagCheck == True:\n flash(\"You have been logged out. Please contact your system administrator\")\n return redirect(url_for('logout'))", "def handle_key(self, k):\n\t\treturn False", "def _check_key(key): # type: (str) -> None\n if not key:\n raise ValueError('Key must not be empty.')\n if '.' in key:\n raise ValueError('Key must not contain dots.')", "def check_auth(username, password):\n return username == 'daniel' and password == config['redis_auth_key']", "def key_is_present(host):\n if(config.HOST_TYPE == 'linux'):\n status, stdout, stderr = host.conn.execute_command('ls /root/.ssh')\n if status:\n return False\n if 'id_rsa.pub' in stdout[0]:\n return True\n return False\n else:\n status, stdout, stderr = host.conn.execute_command('cmd /c dir \"C:\\\\Program Files (x86)\\\\freeSSHd\"')\n if status:\n return False\n for value in stdout:\n if 'RSAKey.cfg' in value:\n return True\n return False", "def getAuthKey( self ):\n d = {\n \"frob\" : FLICKR[ \"frob\" ],\n \"perms\" : \"delete\"\n }\n sig = self.signCall( d )\n url = self.urlGen( api.auth, d, sig )\n ans = \"\"\n try:\n webbrowser.open( url )\n print(\"Copy-paste following URL into a web browser and follow instructions:\")\n print(url)\n ans = raw_input(\"Have you authenticated this application? (Y/N): \")\n except:\n print(str(sys.exc_info()))\n if ( ans.lower() == \"n\" ):\n print(\"You need to allow this program to access your Flickr site.\")\n print(\"Copy-paste following URL into a web browser and follow instructions:\")\n print(url)\n print(\"After you have allowed access restart uploadr.py\")\n sys.exit()", "def req_CHECKPRESENT(self, key):\n # TODO: so we need to maintain mapping from urls to keys. Then\n # we could even store the filename within archive\n # Otherwise it is unrealistic to even require to recompute key if we\n # knew the backend etc\n lgr.debug(\"VERIFYING key %s\" % key)\n akey, afile = self._get_akey_afile(key)\n if self.get_contentlocation(akey):\n self.send(\"CHECKPRESENT-SUCCESS\", key)\n else:\n # TODO: proxy the same to annex itself to verify check for archive.\n # If archive is no longer available -- then CHECKPRESENT-FAILURE\n self.send(\"CHECKPRESENT-UNKNOWN\", key)", "def api_auth_validate(request, access_key):\n if not request.is_json:\n return {'error' : 'Bad request, payload must be JSON', 'code' : 400}\n if not 'working_repo' in session:\n return {'error' : 'Operation requires authentication', 'code': 401}\n if session['working_repo'] != access_key:\n return {'error' : 'Not authorized for this operation', 'code' : 403}\n \n return True", "def test_check_privatekey_valid(self):\n key = load_privatekey(FILETYPE_PEM, client_key_pem)\n cert = load_certificate(FILETYPE_PEM, client_cert_pem)\n context = Context(SSLv23_METHOD)\n context.use_privatekey(key)\n context.use_certificate(cert)\n assert None is context.check_privatekey()", "def test_create_key():\n\n assert symmetric.create_key() != \"\"", "def check_api_keys(self, request):\n app_id, api_obj = request.META.get(\"HTTP_APP_ID\"), None\n api_secret_key = request.META.get(\"HTTP_API_SECRET_KEY\")\n if app_id and api_secret_key:\n # validate app_id and api_secret_key\n app_id_bool = self._validate_app_id(app_id)\n if not app_id_bool:\n return False, self.app_id_message\n api_secret_key_bool = self._validate_api_secret_key(api_secret_key)\n if not api_secret_key:\n return False, self.api_secret_key_message\n try:\n api_obj = ApiApp.objects.get(app_id=app_id, api_secret_key=api_secret_key, active=True)\n if api_obj:\n self.app(request, api_obj)\n return True, ''\n except ApiApp.DoesNotExist:\n self.app(request, api_obj)\n return False, self.message\n else:\n self.app(request, api_obj)\n return False, self.message", "def try_api_keys(self, request, **kwargs):\n email = request.META.get('HTTP_PANDA_EMAIL') or request.GET.get('email')\n api_key = request.META.get('HTTP_PANDA_API_KEY') or request.GET.get('api_key')\n\n if email:\n email = unquote(email)\n\n if not email or not api_key:\n return False\n\n try:\n user = UserProxy.objects.get(username=email.lower())\n except (UserProxy.DoesNotExist, UserProxy.MultipleObjectsReturned):\n return False \n\n if not user.is_active:\n return False\n \n request.user = user\n\n return self.get_key(user, api_key)", "def test_getKey_keyexists(self):\n filename = self.mktemp()\n with open(filename, 'wb') as fh:\n fh.write(SEKRIT_KEY)\n fh.flush()\n\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))\n self.assertEqual(SEKRIT_KEY, key,\n \"\"\"The example key and the one read from file differ!\n key (in hex): %s\n SEKRIT_KEY (in hex): %s\"\"\"\n % (key.encode('hex'), SEKRIT_KEY.encode('hex')))", "def test_check_keys_exist_for_provider_string(self):\n\n secret_key = None\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def check_auth_gssapi_keyex(self, username,\n gss_authenticated=AUTH_FAILED,\n cc_file=None):\n if gss_authenticated == AUTH_SUCCESSFUL:\n return AUTH_SUCCESSFUL\n return AUTH_FAILED", "def test_good_with_no_prior_key(self):\n # config seems to be shared across tests, so we have to specifically set\n # it to None.\n config.set(xsrf_token_key=None)\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def key_valid(key_data):\n\n gpg = gnupg.GPG(gnupghome=\"gnupg\")\n import_result = gpg.import_keys(key_data)\n\n if import_result.count == 1:\n gpg.delete_keys(import_result.fingerprints[0])\n return True\n else:\n return False", "def validate_keystore_key(args):\n expected_name = 'keyRings/%s/cryptoKeys/%s'\n expected_name %= (args.kms_keyring, args.kms_key)\n describe_output = ''\n try:\n describe_output = subprocess.check_output(\n ['gcloud', 'kms', 'keys', 'describe', args.kms_key,\n '--project', args.kms_project,\n '--location', args.kms_location,\n '--keyring', args.kms_keyring,\n '--format', 'value(name)'])\n except subprocess.CalledProcessError:\n pass\n if expected_name in describe_output:\n return\n # Print warning and exit if output did not include the key.\n warning = 'KMS key \"%s\" not found in keyring=%s project=%s location=%s'\n warning %= (args.kms_key,\n args.kms_keyring,\n args.kms_project,\n args.kms_location)\n Print.YL(warning)\n sys.exit(1)", "def verify_decrypt_key(self):\r\n\t\tpercent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif percent_english > 50:\r\n\t\t\tself.right_key = False\r\n\t\t#If the key does not pass, the program will give you a warning and prompt you for another key. \r\n\t\telse: \r\n\t\t\tprint(f\"After decryption, it looks like only {percent_english}% of your words are english, you may have entered the wrong key?\")", "def validate_key_code(self, code):\n\n key = self.connect().query(KeyCode)\\\n .filter(KeyCode.code == code)\\\n .first()\n\n if key and (key.user and key.enabled):\n return True\n return False", "def test_blank_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._blank_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")", "def key():", "def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def key_request(self, user):\n\t\tclient_log.debug(f'Запрос публичного ключа для {user}')\n\t\treq = {\n\t\t\tACTION: PUBLIC_KEY_REQUEST,\n\t\t\tTIME: time.time(),\n\t\t\tACCOUNT_NAME: user\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tif RESPONSE in ans and ans[RESPONSE] == 511:\n\t\t\treturn ans[DATA]\n\t\telse:\n\t\t\tclient_log.error(f'Не удалось получить ключ собеседника{user}.')", "def test_no_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._no_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")", "def authenticate_header(self, request):\n return \"Api key authentication failed.\"", "def check_key(self):\n\n if self.type == \"RSA\" and self.size < 1024:\n raise HostkeyError(\"RSA keys must at least be 1024 bits.\")\n elif self.type == \"DSA\" and self.size != 1024:\n raise HostkeyError(\"DSA keys can only be 1024 bits.\")\n elif self.type == \"ECDSA\" and self.size not in [256, 384, 521]: # yes, that is *really* 521 bits, not a typo!\n raise HostkeyError(\"ECDSA key must be either 256, 384 or 521 bits (yes, 521 not 512!)\")\n elif self.type ==\"ED25519\" and self.size != 128:\n raise HostkeyError(\"ED25519 keys have a fixed size, which cannot be altered.\") # can't really happen, size is ignored for ED25519\n\n # if privkey is already there check size\n self.key_exists = False\n self.key_current_size = 0\n if os.path.exists(self.fullpath):\n self.key_exists = True\n if self.type == \"ED25519\":\n self.curve = \"EC25519\"\n self.key_current_size = 128 # somewhat erbitrary, attack complexity on ED25519 is larger that brute forcing a 128bit key\n self.key_exists = True\n elif self.type == \"RSA1\":\n self.key_exists = True\n self.key_current_size = 1024\n else:\n try:\n with open(self.fullpath, \"rb\") as key_file:\n self.privkey = crypto_serialization.load_pem_private_key(key_file.read(), password=None, backend=crypto_default_backend())\n except IOError:\n raise HostkeyError(get_exception())\n\n if self.type == \"DSA\" or self.type == \"RSA\":\n self.key_current_size = self.privkey.key_size\n elif self.type == \"ED25519\":\n self.key_current_size = 128\n elif self.type == \"ECDSA\":\n self.pubkey = self.privkey.public_key()\n if self.pubkey.curve.name == \"secp256r1\":\n self.key_current_size = 256\n elif self.pubkey.curve.name == \"secp384r1\":\n self.key_current_size = 384\n elif self.pubkey.curve.name == \"secp521r1\":\n self.key_current_size = 521\n else:\n self.curve = self.pubkey.curve.name", "def patched_paramiko_transport_verify_key(self, host_key, sig): # pylint: disable=W0613\n key = self._key_info[self.host_key_type](Message(host_key)) # pylint: disable=W0212\n if key is None:\n raise SSHException(\"Unknown host key type\")\n # Patched: no more checks are done here\n self.host_key = key", "def is_authenticated(self, request, **kwargs):\n\n try:\n username, api_key = self.extract_credentials(request)\n except ValueError:\n return self._unauthorized()\n\n if not username or not api_key:\n return self._unauthorized()\n\n # cache key=api_key, value=User-object\n user = self._cache.get(api_key)\n logging.error(\"cached ApiKey: %r value: %r==%r\" % (api_key, user and user.username, user))\n if not user:\n try:\n lookup_kwargs = {username_field: username}\n user = User.objects.get(**lookup_kwargs)\n ApiKey.objects.get(user=user, key=api_key)\n self._cache.set(api_key, user, self._ttl_seconds)\n except (User.DoesNotExist, User.MultipleObjectsReturned,\n ApiKey.DoesNotExist, ApiKey.MultipleObjectsReturned):\n return self._unauthorized()\n\n if user.username != username:\n return self._unauthorized()\n\n if not self.check_active(user):\n return False\n\n key_auth_check = self.get_key(user, api_key)\n if key_auth_check and not isinstance(key_auth_check, HttpUnauthorized):\n request.user = user\n\n return key_auth_check", "def _check_api_key(self):\n try:\n self.maps.places_nearby(\n location=(53.909804, 27.580184),\n radius=650,\n open_now=False,\n language=config.LANGUAGE,\n type='cafe',\n # rank_by='distance', # IMPORTANT: cannot use rank_by and radius options together\n page_token=None,\n )\n except Exception as e:\n\n with self.__writelock:\n self.print(f'ERROR: bad API key \"{self.maps.key}\" (tracker={self.stats.previous_requests})\\n')\n raise e", "def test_get_user_api_keys(self):\n pass", "def validate_api_key(self) -> tuple[bool, str]:\n response = self._api_query('wallets')\n\n if response.status_code != HTTPStatus.OK:\n result, msg = self._process_unsuccessful_response(\n response=response,\n case='validate_api_key',\n )\n return result, msg\n\n return True, ''", "def test_create_api_key(self):\n pass", "def test_validate_credentials(self):\n pass", "def _check(self, key):\n if not self.contains(key):\n raise KeyError(\"ConfigManager does not contain key '%s'\" % key)", "def test_missingKey(self):\n self.assertIsNone(self.users.key(\"mystery domain\", \"mystery user\"))", "def checkkey(self, k):\r\n if k == self.shortcut:\r\n return True\r\n return False", "def _is_valid_key(self, key):\r\n\r\n # Check the length\r\n if len(key) > 250:\r\n return False\r\n\r\n # Check that there are no spaces or control characters\r\n for char in key:\r\n if ord(char) < 33 or ord(char) == 127:\r\n return False\r\n\r\n return True", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def load_key():", "def authenticate_key(api_key):\n global db\n if db is None:\n init_db()\n try:\n if not api_key:\n return False\n user_model = Query()\n user = db.search(user_model.api_key == api_key)[0]\n if user:\n return user['username']\n except Exception as e:\n LOGGER.exception(e)\n LOGGER.error(\"Cannot retrieve user for this authentication key {}\".format(api_key))\n return False", "def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True", "def _check_transform_key(key: Hashable) -> None:\n _test_hashable = hash(key) # The only 'real' way to make sure is hashable\n # if not isinstance(key, Hashable):\n # raise TypeError((type(key), \"transformation lookup key is not hashable\"))", "def apicheck():\n\n async def predicate(ctx: commands.Context):\n travitia_keys = await ctx.bot.get_shared_api_tokens(\"travitia\")\n key = travitia_keys.get(\"api_key\") is None\n if ctx.invoked_with == \"help\" and key:\n return False\n if key:\n await ctx.send(\"The API key is not registered, the command is unavailable.\")\n return False\n return True\n\n return commands.check(predicate)", "def test_key_use() -> None:\n # check key usage method\n # don't test if all keys are translated, crowdin will monitor it\n lib_folder = Path(__file__).parents[1] / \"sepal_ui\"\n\n assert \"test_key\" in ms.key_use(lib_folder, \"ms\")\n\n return", "def test_incorrect_api_key(self):\n with self.subTest(\"Missing API key\"):\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"ApiKey {self.web_user.username}:\"\n )\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Missing username\"):\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"ApiKey :{self.web_user_api_key.key}\"\n )\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Missing header\"):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Incorrect API key\"):\n response = self.client.get(\n self.url,\n HTTP_AUTHORIZATION=f\"ApiKey {self.web_user.username}:Incorrectkey\",\n )\n self.assertEqual(response.status_code, 401)", "def test_storeAndRetrieveKey(self):\n domain, username, password, key = \"domain\", \"user\", \"password\", \"key\"\n\n self.assertStored(domain, username, password, key)\n self.assertEqual(self.users.key(domain, username), key)", "def check_tokenterminal_key(show_output: bool = False) -> str:\n\n if show_output:\n console.print(\"Checking status...\")\n\n current_user = get_current_user()\n\n if current_user.credentials.API_TOKEN_TERMINAL_KEY == \"REPLACE_ME\":\n logger.info(\"Token Terminal key not defined\")\n status = KeyStatus.NOT_DEFINED\n else:\n token_terminal = TokenTerminal(\n key=current_user.credentials.API_TOKEN_TERMINAL_KEY\n )\n\n if \"message\" in token_terminal.get_all_projects():\n logger.warning(\"Token Terminal key defined, test failed\")\n status = KeyStatus.DEFINED_TEST_FAILED\n else:\n logger.info(\"Token Terminal key defined, test passed\")\n status = KeyStatus.DEFINED_TEST_PASSED\n\n if show_output:\n console.print(status.colorize())\n\n return str(status)", "def get_key(self, user):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n key = ApiKey.objects.get(user=user)\r\n except ApiKey.DoesNotExist:\r\n return False\r\n\r\n return key.key", "def getAuthKey(self):\r\n auth_key = 'Que despierte la Red'\r\n assert len(auth_key) == self.AUTH_KEY_LEN\r\n return auth_key", "def _verify_keystore(self):\n keystore_uid = FileUtil(self.keystore_file).uid()\n if keystore_uid not in (-1, HostInfo.uid):\n raise IOError(\"not owner of keystore: %s\" % self.keystore_file)\n keystore_dir = os.path.dirname(self.keystore_file)\n if FileUtil(keystore_dir).uid() != HostInfo.uid:\n raise IOError(\"keystore dir not found or not owner: %s\" % keystore_dir)\n if (keystore_uid != -1 and (os.stat(self.keystore_file).st_mode & 0o077)):\n raise IOError(\"keystore is accessible to group or others: %s\" % self.keystore_file)", "def attempt(chal, request):\n team = Teams.query.filter_by(id=session['id']).first()\n if locked(chal):\n return False, 'Challenge Locked. You need at least {} points.'.format(chal.unlock_at)\n \n provided_key = request.form['key'].strip()\n chal_keys = Keys.query.filter_by(chal=chal.id).all()\n for chal_key in chal_keys:\n if get_key_class(chal_key.type).compare(chal_key.flag, provided_key):\n return True, 'Correct'\n return False, 'Incorrect'", "def test_rsa(self):\n key = c.KEY_RSA\n usage = [\n c.KU_DIGITALSIGNATURE,\n c.KU_NONREPUDIATION,\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n ]\n self.assertTrue(utils.check_key_usage(key, usage))", "def _key_check(self, key_list, chk_dict=None):\n exists = False\n if chk_dict is None:\n chk_dict = self._e_dict\n for key in key_list:\n exists = key in chk_dict.keys()\n if exists:\n chk_dict = chk_dict[key]\n else:\n break\n return exists", "def key():\n pass", "def key():\n pass", "def test_get(self):\n key = self.key_gen.get()\n key2 = self.key_gen.get()\n\n self.assertEqual(key, key2 - 1)" ]
[ "0.7483722", "0.7159838", "0.7102654", "0.70746124", "0.70519286", "0.69825", "0.69745266", "0.6936464", "0.6857674", "0.6834089", "0.67978585", "0.6719186", "0.661024", "0.65871054", "0.64848256", "0.6452751", "0.6445985", "0.644107", "0.6375559", "0.6362564", "0.6354385", "0.6344735", "0.62626284", "0.6254767", "0.6209742", "0.6201196", "0.6198189", "0.61824095", "0.61750257", "0.6174351", "0.61684465", "0.616279", "0.6139381", "0.6121935", "0.60924387", "0.6050223", "0.60475445", "0.604292", "0.6028092", "0.6017742", "0.60026824", "0.59959173", "0.5986703", "0.59849393", "0.59561914", "0.59494483", "0.5939697", "0.5939225", "0.59274983", "0.5924119", "0.5913804", "0.5906216", "0.58919203", "0.58790374", "0.58746624", "0.5870707", "0.58522415", "0.58434844", "0.5838719", "0.5819744", "0.58182645", "0.58168614", "0.58101964", "0.5808335", "0.57974637", "0.57913667", "0.579019", "0.57851726", "0.57833344", "0.5781537", "0.57796746", "0.5776966", "0.57753944", "0.57679874", "0.57673377", "0.5749965", "0.5745414", "0.5738429", "0.5734095", "0.57312745", "0.57288444", "0.57214046", "0.5712755", "0.57090783", "0.57040954", "0.57022214", "0.5695404", "0.5685032", "0.5683717", "0.568106", "0.56735927", "0.5673216", "0.56708455", "0.5667672", "0.5662317", "0.56621665", "0.56575763", "0.56530577", "0.56530577", "0.5650476" ]
0.76604617
0
This function is used to update API endpoints
def update(self): # TO DO for updating urls if changed pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_api(self) -> None:\n LOG.debug(\"%sTrying to update RestAPI through client\", self.log_prefix)\n response_put = cast(\n Dict,\n self._api_client.put_rest_api(restApiId=self._api_physical_id, mode=\"overwrite\", body=self._swagger_body),\n )\n LOG.debug(\"%sPut RestApi Result: %s\", self.log_prefix, response_put)", "def _update_from_rest_data(self) -> None:", "def update():\n return 'update api in put'", "def endpoint_update(self, endpoint_name=None, config=None):\n if config is None:\n raise Exception(\"Config required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint', 'POST', body=config)\n else:\n self.request('/v1.1/endpoints/%s' % endpoint_name, 'POST', body=config)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_update_action(self):\n pass", "def test_deprecated_update_bs(self):\n with app.test_client() as client:\n self.login_client(client)\n\n res = client.put(\n '/v1/sim/configs/ae',\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(data['message'], 'Method Not Allowed.')\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(res.status_code, 405)", "def update(self, params):", "def update_endpoint(EndpointName=None, EndpointConfigName=None):\n pass", "def taco_test_put_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def update_api(self, ApiId: str, ApiKeySelectionExpression: str = None, Description: str = None, DisableSchemaValidation: bool = None, Name: str = None, RouteSelectionExpression: str = None, Version: str = None) -> Dict:\n pass", "def test_deprecated_update_ae(self):\n with app.test_client() as client:\n self.login_client(client)\n\n res = client.put(\n '/v1/sim/configs/ae',\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(data['message'], 'Method Not Allowed.')\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(res.status_code, 405)", "def update(self, *args, **kwargs):", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_patch_action(self):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def _register_api(app):\n\n app.add_url_rule('/shipping/',\n \"put_shipping_method\", put_shipping_method, methods=['PUT'])", "def updateResource(self, authenticationToken, resource):\r\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def update(self, *args, **kwargs): # real signature unknown\n pass", "def fusion_api_edit_switch(self, body, uri, api=None, headers=None):\n return self.switch.update(body, uri, api, headers)", "def fusion_api_patch_interconnect(self, body, uri, param='', api=None, headers=None):\n return self.ic.patch(body=body, uri=uri, api=api, headers=headers, param=param)", "def register_endpoints(api):\n api.add_resource(EventList, '/events')", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_update_entity_action(self):\n pass", "def process_resource_api(self, resources, resource, api, context):\n pass", "def fusion_api_update_task(self, body, uri, api=None, headers=None):\n return self.task.update(body, uri, api, headers)", "def update( ):\r\n pass", "def update_controller(self):", "def update(self, *args, **kw):\n pass", "def fusion_api_generic_patch(self, body, uri, api=None, headers=None):\n if api:\n headers = self.fusion_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.fusion_client._headers\n uri = 'https://%s%s' % (self.fusion_client._host, uri)\n return self.fusion_client.patch(uri=uri, headers=headers, body=json.dumps(body))", "def fusion_api_edit_rack(self, body, uri, api=None, headers=None):\n return self.rack.update(body, uri, api, headers)", "def update(*args):", "def put(self, endpoint: str, json: Any = None) -> Any:\n pass", "def test_mocked_api_update_value(self):\n c = Client()\n patch_url = \"/apimock/mocked/api/account/45/?format=json\"\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(self.patch_url, data={\"account\": 456})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 456}', response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"account\": 456}', response.content)\n response = c.patch(self.patch_url, data={\"account\": 654})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 654}', response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"account\": 654}', response.content)", "def update_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.delete_endpoint(endpoint)\n self.add_endpoint(endpoint)", "def update(self) -> None:\n ...", "def test_api(self):\n new_route = self.route.api(\"new\")\n assert new_route != self.route\n assert new_route.route[\"api\"] == \"new\"", "def do_api_check(self):\n for charid in self.conn.get_api_characters():\n self.update_character(charid)\n for corpid in self.conn.get_api_corporations():\n self.update_corporation(corpid)\n for allyid in self.conn.get_api_alliances():\n self.update_alliance(allyid)", "def taco_test_post_param_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('POST', '/item/4', body=body)\n result = webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))\n # webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))\n debug.log('result', result)", "def reorder_api(apis, base):\n return", "def _load_api(self):\n self.app.add_url_rule('/scheduler', 'get_scheduler_info', self._apply_auth(api.get_scheduler_info))\n self.app.add_url_rule('/scheduler/jobs', 'add_job', self._apply_auth(api.add_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs', 'get_jobs', self._apply_auth(api.get_jobs))\n self.app.add_url_rule('/scheduler/jobs/reload_jobs', 'reload_jobs', self._apply_auth(api.reload_jobs), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'get_job', self._apply_auth(api.get_job))\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'delete_job', self._apply_auth(api.delete_job), methods=['DELETE'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>', 'update_job', self._apply_auth(api.update_job), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<id>/reschedule', 'reschedule_job', self._apply_auth(api.reschedule_job), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<id>/reschedule_once', 'reschedule_job_once', self._apply_auth(api.reschedule_job_once), methods=['PATCH'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/pause', 'pause_job', self._apply_auth(api.pause_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/resume', 'resume_job', self._apply_auth(api.resume_job), methods=['POST'])\n self.app.add_url_rule('/scheduler/jobs/<job_id>/run', 'run_job', self._apply_auth(api.run_job), methods=['POST'])", "def fusion_api_update_ls(self, body=None, uri=None, api=None, headers=None):\n return self.ls.put(body, uri, api, headers)", "def __update_data(self):\r\n # loop = asyncio.get_event_loop()\r\n api_base_info_req = self.loop.run_in_executor(None, self.__get_base_info_api)\r\n api_status_req = self.loop.run_in_executor(None, self.__get_status_api)\r\n api_status_res = yield from api_status_req\r\n api_base_info_res = yield from api_base_info_req\r\n\r\n self.__set_base_info_api(api_base_info_res)\r\n self.__set_status_api(api_status_res)", "def test_update(self):\n url_register = reverse('auth_register')\n resp = self.client.post(url_register, {\n \"username\": \"user\",\n \"password\": \"lol1lol1\",\n \"password2\": \"lol1lol1\",\n \"email\": \"lol@gmail.com\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"bio\": \"\"\n })\n print(resp.headers[\"Location\"])\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n url_auth = reverse('token_obtain_pair')\n resp = self.client.post(url_auth, {'username':'user', 'password':'lol1lol1'}, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n token = resp.data['access']\n\n url_upd = reverse('auth_update_profile', kwargs={'pk': 2})\n\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)\n resp = client.patch(url_upd, {\n \"username\": \"user3\",\n \"email\": \"lol@gmail.com\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"image\": \"\",\n \"bio\": \"\",\n \"city\": \"\",\n \"phone\": \"\"\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get().username, 'user3')", "def update():", "def update():", "def _update_params(self):\n pass", "def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())", "def handle_update(self):\n try:\n for controller in self.controllers:\n self._handle_single_update(controller)\n except urllib3.exceptions.HTTPError as http_error:\n raise HttpError('Error talking to Kubernetes', http_error) from http_error", "def update_params(self):\n pass", "def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)", "def fusion_api_patch_li(self, body=None, uri=None, api=None, headers=None):\n return self.li.patch(body, uri, api, headers)", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def update_endpoints(user_id):\n\n if not request.json:\n abort(400)\n\n new_ips = request.json[\"ips\"]\n\n db_conn = sqlite3.connect(db_path)\n db = db_conn.cursor()\n db_ips = []\n try:\n for row in db.execute(\"SELECT ip FROM ips WHERE username=?;\", [user_id]):\n db_ips.append(row[0])\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n\n to_add = []\n to_delete = []\n\n # Put the ips not present in the database in the list of ips to add\n for new_ip in new_ips:\n if(new_ip not in db_ips):\n to_add.append((user_id, new_ip))\n # Put the ips not in the new list in the list of ips to delete\n for db_ip in db_ips:\n if(db_ip not in new_ips):\n to_delete.append((user_id, db_ip))\n\n try:\n db.executemany('INSERT INTO ips (username, ip) VALUES (?,?);', to_add)\n db.executemany('DELETE FROM ips WHERE username=? AND ip=?;', to_delete)\n db_conn.commit()\n db_conn.close()\n except sqlite3.IntegrityError:\n db_conn.close()\n abort(400)\n return jsonify({'status':True})", "def test_ipam_services_update(self):\n pass", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def create_api_endpoints(app):\n manager = APIManager(app, flask_sqlalchemy_db=models.database)\n manager.create_api(models.State, results_per_page=0)\n manager.create_api(models.Party, results_per_page=0)\n manager.create_api(models.Candidate, results_per_page=0)\n manager.create_api(models.Election, results_per_page=0)\n manager.create_api(models.ElectoralCollege,\n results_per_page=0, collection_name='electoralcollege')\n manager.create_api(models.PartiesInvolved,\n results_per_page=0, collection_name='partiesinvolved')\n manager.create_api(models.ElectionsToState,\n results_per_page=0, collection_name='electionstostate')", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def test_ipam_services_partial_update(self):\n pass", "def test_update():\n payload = {'age': 99}\n sample_uuid = get_sample_id()\n response = requests.put(f'http://localhost:5000/api/persons/{sample_uuid}', json=payload)\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data", "def access_gemini_url_put_method(context, endpoint):\n url = urljoin(context.gemini_api_url, endpoint)\n context.response = requests.put(url)", "def _update(self, host):\n pass", "def put(self):\n try:\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('PUT returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n if agent_id is None:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n agent = self.db.get_agent(agent_id)\n\n if agent is not None:\n common.echo_json_response(self, 404, \"agent id not found\")\n logger.info('PUT returning 404 response. agent id: ' + agent_id + ' not found.')\n\n if \"reactivate\" in rest_params:\n agent['operational_state']=cloud_verifier_common.CloudAgent_Operational_State.START\n asyncio.ensure_future(self.process_agent(agent, cloud_verifier_common.CloudAgent_Operational_State.GET_QUOTE))\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n elif \"stop\" in rest_params:\n # do stuff for terminate\n logger.debug(\"Stopping polling on %s\"%agent_id)\n self.db.update_agent(agent_id,'operational_state',cloud_verifier_common.CloudAgent_Operational_State.TENANT_FAILED)\n\n common.echo_json_response(self, 200, \"Success\")\n logger.info('PUT returning 200 response for agent id: ' + agent_id)\n else:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning(\"PUT returning 400 response. uri not supported\")\n\n except Exception as e:\n common.echo_json_response(self, 400, \"Exception error: %s\"%e)\n logger.warning(\"PUT returning 400 response. Exception error: %s\"%e)\n logger.exception(e)\n self.finish()", "def updateResourceDef(url, user, pWd, resourceName, resJson):\n \n print(\"\\tupdating resource for catalog:-\" + url + \" resource=\" + \n resourceName + ' user=' + user)\n print(\"\\t\" + json.dumps(resJson))\n apiURL = url + '/access/1/catalog/resources/' + resourceName\n print(\"\\turl=\" + apiURL)\n header = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"} \n tResp = requests.put(apiURL, data=json.dumps(resJson), headers=header, \n auth=HTTPBasicAuth(user, pWd))\n print(\"\\tresponse=\" + str(tResp.status_code))\n if tResp.status_code == 200:\n # valid - return the jsom\n print(\"\\tyay - update resource worked...\")\n print(tResp)\n return tResp.status_code\n else:\n # not valid\n print(\"\\tdarn - update resource failed...\")\n print(tResp)\n return tResp.status_code", "def handle_task_enable(self, request):\n \"\"\"\n @api {post} /task/:id/enable Enable a task\n @apiName EnableTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {post} /task/:id/disable Disable a task\n @apiName DisableTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/(en|dis)able', request.uri_path)\n task = match.group(1)\n action = match.group(2)\n\n enabled = (action == 'en')\n\n tasks = self.cluster.config.get('tasks')\n\n if task in tasks:\n code = 200\n\n old = tasks[task].copy()\n tasks[task]['enabled'] = enabled\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskUpdated', task, old, tasks[task])\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps({\"id\": task, \"updated\": True})\n\n return HTTPReply(code = code, body = body, headers = headers)\n else:\n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n return HTTPReply(code = 404, headers = headers)", "def test_get_api_resources(self):\n pass", "def fusion_api_patch_fabric(self, uri, body, api=None, headers=None):\n return self.fabric.patch(uri, body, api, headers)", "def update_resolver_endpoint(ResolverEndpointId=None, Name=None):\n pass", "async def test_update_dispatch_route_by_id(client):\n update_dispatch_route_params = null\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = await client.request(\n method='PUT',\n path='/v1/fleet/dispatch/routes/{route_id}'.format(route_id=56),\n headers=headers,\n json=update_dispatch_route_params,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def update_endpoint_in_sipserver(self, endpoint: str, password: str) -> None:", "def fusion_api_edit_interconnect_ports(self, body, uri, api=None, param='', headers=None):\n param = '/update-ports%s' % param\n return self.ic.put(body=body, uri=uri, api=api, headers=headers, param=param)", "def endpoints(self, endpoints):\n\n self._endpoints = endpoints", "def endpoint_present(\n name,\n publicurl=None,\n internalurl=None,\n adminurl=None,\n region=None,\n profile=None,\n url=None,\n interface=None,\n **connection_args\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n _api_version(profile=profile, **connection_args)\n\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n\n def _changes(desc):\n return ret.get(\"comment\", \"\") + desc + \"\\n\"\n\n def _create_endpoint():\n if _OS_IDENTITY_API_VERSION > 2:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n url=url,\n interface=interface,\n profile=profile,\n **connection_args\n )\n else:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl,\n profile=profile,\n **connection_args\n )\n\n if endpoint and \"Error\" not in endpoint and endpoint.get(\"region\") == region:\n\n if _OS_IDENTITY_API_VERSION > 2:\n\n change_url = False\n change_interface = False\n\n if endpoint.get(\"url\", None) != url:\n ret[\"comment\"] = _changes(\n 'URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"url\", None), url\n )\n )\n change_url = True\n\n if endpoint.get(\"interface\", None) != interface:\n ret[\"comment\"] = _changes(\n 'Interface changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"interface\", None), interface\n )\n )\n change_interface = True\n\n if __opts__.get(\"test\") and (change_url or change_interface):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n return ret\n\n if change_url:\n ret[\"changes\"][\"url\"] = url\n\n if change_interface:\n ret[\"changes\"][\"interface\"] = interface\n\n else:\n change_publicurl = False\n change_adminurl = False\n change_internalurl = False\n\n if endpoint.get(\"publicurl\", None) != publicurl:\n change_publicurl = True\n\n ret[\"comment\"] = _changes(\n 'Public URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"publicurl\", None), publicurl\n )\n )\n\n if endpoint.get(\"adminurl\", None) != adminurl:\n change_adminurl = True\n ret[\"comment\"] = _changes(\n 'Admin URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"adminurl\", None), adminurl\n )\n )\n\n if endpoint.get(\"internalurl\", None) != internalurl:\n change_internalurl = True\n ret[\"comment\"] = _changes(\n 'Internal URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"internalurl\", None), internalurl\n )\n )\n\n if __opts__.get(\"test\") and (\n change_publicurl or change_adminurl or change_internalurl\n ):\n ret[\"result\"] = None\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n return ret\n\n if change_publicurl:\n ret[\"changes\"][\"publicurl\"] = publicurl\n\n if change_adminurl:\n ret[\"changes\"][\"adminurl\"] = adminurl\n\n if change_internalurl:\n ret[\"changes\"][\"internalurl\"] = internalurl\n\n if ret[\"comment\"]: # changed\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n _create_endpoint()\n ret[\"comment\"] += 'Endpoint for service \"{}\" has been updated'.format(name)\n\n else:\n # Add new endpoint\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be created\"\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be added'.format(name)\n return ret\n _create_endpoint()\n ret[\"comment\"] = 'Endpoint for service \"{}\" has been added'.format(name)\n\n if ret[\"comment\"] == \"\": # => no changes\n ret[\"comment\"] = 'Endpoint for service \"{}\" already exists'.format(name)\n return ret", "def update(self):\n self._client.patch(self)", "def _register_api(app):\n \n app.add_url_rule('/like/', \n \"new_like\", new_like, methods=['PUT'])\n app.add_url_rule('/like/', \n \"delete_like\", delete_like, methods=['DELETE'])", "def test_deprecated_update_ms(self):\n with app.test_client() as client:\n self.login_client(client)\n\n res = client.put(\n '/v1/sim/configs/ms',\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(data['message'], 'Method Not Allowed.')\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(res.status_code, 405)", "def rest(method, endpoint, access_token, data={}, id=None):\n headers = {\n 'Authorization': f\"Bearer {access_token}\",\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n }\n base_url = \"https://api.intercom.io/\"\n\n # for creating and updating a companies api using POST method only\n if endpoint in ['contacts', 'companies', 'events', 'tags'] and method == 'POST':\n url = f\"{base_url}{endpoint}\"\n elif endpoint == 'add_tags' and method == \"POST\":\n url = f\"{base_url}contacts/{id}/tags\"\n elif (endpoint == 'contacts') and method == 'POST':\n url = f\"{base_url}{endpoint}/{id}\"\n elif endpoint == 'contact_email' and method == 'POST':\n url = f\"{base_url}contacts/search\"\n elif endpoint == 'notes' and method == 'POST':\n url = f\"{base_url}contacts/{id}/notes\"\n elif endpoint == 'contacts' and method == 'PUT':\n url = f\"{base_url}{endpoint}/{id}\"\n elif endpoint == 'tags' and method == 'DELETE':\n url = f\"{base_url}contacts/{id[0]}/tags/{id[1]}\"\n elif endpoint == 'contacts' and method == 'GET':\n url = f\"{base_url}{endpoint}/{id}\"\n elif endpoint == 'companies' and method == 'GET':\n url = f\"{base_url}{endpoint}?company_id={id}\"\n elif endpoint == 'admin' and method == 'GET':\n url = f'{base_url}admins'\n\n response = requests.request(\n method, url, headers=headers, data=json.dumps(data))\n return response", "def update_api_consumer(self, host, port):\n\n APIConsumer.host = host\n APIConsumer.port = port\n APIConsumer.base_url = \"http://%s:%s\" % (host, port)", "def test_updateall():\n url = baseUrl + userurl + emailId\n payload = {'firstName': new_firstName, 'lastName': new_lastName, 'emailId': new_emailId}\n logging.info(\"Update a user's firstName to: %s, lastName to: %s and emailId to: %s\" % (new_firstName, new_lastName, new_emailId))\n r = requests.put(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 200\n resp = r.json()\n assert resp[\"userName\"] == emailId and resp[\"lastName\"] == new_lastName and resp[\"firstName\"] == new_firstName \\\n and resp[\"licenseType\"] == licensetype and resp[\"subscriptionIds\"][0] == subscriptionid and \\\n resp[\"isActive\"] is True and resp[\"source\"] == \"publicapi\" and resp[\"emailId\"] == new_emailId\n global user_id\n user_id = resp[\"id\"]\n assert user_id is not None", "def do_PUT(self):\n note_details = NoteDetails\n if self.path == '/note/api/update':\n response_data=note_details.update_data(self)\n Response(self).jsonResponse(status=200, data=response_data)", "def update_list(self, apikey):\n\n file = self.retrieve_update(apikey)\n self.parse_update(file)\n self.log.debug(self)", "def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def updateParameters(self):\n\n return", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update_api_access(info):\n try:\n file = open(PATH + \"/../DB/access.json\", 'r')\n accessData = json.load(file)\n except:\n raise\n\n try:\n accessData[info['application_name']] = {\n 'api_list': info['api_list'],\n 'timestamp': info['timestamp']\n }\n except Exception as e:\n print (e)\n raise\n\n try:\n with open(PATH + '/../DB/access.json', 'w') as f:\n f.write(json.dumps(accessData, indent=4, sort_keys=True))\n except:\n raise", "async def put(self):\r\n data = await self.request.json()\r\n agent_uuid = data[\"agent_uuid\"]\r\n ip_address = data[\"ip_address\"]\r\n agent_obj = Agent.filter(Agent.uuid == agent_uuid).first()\r\n if not agent_obj:\r\n response_obj = {\"status\": \"failed\"}\r\n logger.error(\"No agent found!!!\")\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n Agent.update(ip_address=ip_address).where(Agent.uuid == agent_uuid)\r\n logger.info(\"Agent updated!!!\")\r\n return web.Response(text=\"successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\"}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)", "def test_update_case(self):\n pass", "def taco_test_put_new(self):\n body = '{ \"id\": 400, \"name\": \"item_new\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def test_task_update(self):\r\n admin = UserFactory.create()\r\n user = UserFactory.create()\r\n non_owner = UserFactory.create()\r\n app = AppFactory.create(owner=user)\r\n task = TaskFactory.create(app=app)\r\n root_task = TaskFactory.create(app=app)\r\n data = {'state': '1'}\r\n datajson = json.dumps(data)\r\n root_data = {'state': '4'}\r\n root_datajson = json.dumps(root_data)\r\n\r\n ## anonymous\r\n res = self.app.put('/api/task/%s' % task.id, data=data)\r\n assert_equal(res.status, '401 UNAUTHORIZED', res.status)\r\n ### real user but not allowed as not owner!\r\n url = '/api/task/%s?api_key=%s' % (task.id, non_owner.api_key)\r\n res = self.app.put(url, data=datajson)\r\n assert_equal(res.status, '403 FORBIDDEN', res.status)\r\n\r\n ### real user\r\n url = '/api/task/%s?api_key=%s' % (task.id, user.api_key)\r\n res = self.app.put(url, data=datajson)\r\n out = json.loads(res.data)\r\n assert_equal(res.status, '200 OK', res.data)\r\n assert_equal(task.state, data['state'])\r\n assert task.id == out['id'], out\r\n\r\n ### root\r\n res = self.app.put('/api/task/%s?api_key=%s' % (root_task.id, admin.api_key),\r\n data=root_datajson)\r\n assert_equal(res.status, '200 OK', res.data)\r\n assert_equal(root_task.state, root_data['state'])\r\n\r\n # PUT with not JSON data\r\n res = self.app.put(url, data=data)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'ValueError', err\r\n\r\n # PUT with not allowed args\r\n res = self.app.put(url + \"&foo=bar\", data=json.dumps(data))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # PUT with fake data\r\n data['wrongfield'] = 13\r\n res = self.app.put(url, data=json.dumps(data))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'TypeError', err", "async def test_update(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/system/status\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"system-status.json\"),\n ),\n )\n\n aresponses.add(\n MATCH_HOST,\n \"/api/diskspace\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"diskspace.json\"),\n ),\n )\n\n aresponses.add(\n MATCH_HOST,\n \"/api/diskspace\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"diskspace.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.update()\n\n assert response\n assert isinstance(response.info, models.Info)\n assert isinstance(response.disks, List)\n\n response = await client.update()\n\n assert response\n assert isinstance(response.info, models.Info)\n assert isinstance(response.disks, List)", "def update(self):\n\n pass", "def update(self, request, phone):\n try:\n attrs = self.flatten_dict(request.POST)\n #if self.exists(**attrs):\n #return rc.DUPLICATE_ENTRY\n #else:\n endpoint = Endpoint.objects.get(uid__exact=phone, site__name__exact=request.user)\n if attrs.get('effective_caller_id_name'):\n endpoint.effective_caller_id_name = attrs.get('effective_caller_id_name')\n if attrs.get('password'):\n endpoint.password = attrs.get('password')\n if attrs.get('description'):\n endpoint.description = attrs.get('description')\n if attrs.get(\"enabled\") == \"false\":\n endpoint.enable = False\n elif attrs.get(\"enabled\") == \"true\":\n endpoint.enable = True\n if attrs.get(\"enable\") == \"false\":\n endpoint.enable = False\n elif attrs.get(\"enable\") == \"true\":\n endpoint.enable = True\n endpoint.save()\n return endpoint\n except:\n return rc.NOT_HERE", "def update_endpoint(self, endpoint_id, endpoint_ref):\n raise exception.NotImplemented() # pragma: no cover", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass" ]
[ "0.74739033", "0.7205048", "0.71335983", "0.66851205", "0.6498618", "0.6447019", "0.63703203", "0.62712914", "0.6245343", "0.6233306", "0.62273663", "0.6156109", "0.6128922", "0.6088179", "0.6088179", "0.6088179", "0.60405344", "0.60191697", "0.6019072", "0.6019072", "0.6019072", "0.6019072", "0.6019072", "0.6019072", "0.6014899", "0.6006903", "0.5988268", "0.59785986", "0.5977325", "0.59485656", "0.59320897", "0.5915641", "0.58869576", "0.5884221", "0.58752453", "0.5873093", "0.58729243", "0.58699656", "0.5868388", "0.586834", "0.5867963", "0.5866081", "0.58256406", "0.58210605", "0.5817012", "0.5808075", "0.5800019", "0.5785726", "0.5777291", "0.5777291", "0.5771738", "0.5771643", "0.576688", "0.57586944", "0.5755724", "0.5753125", "0.57470804", "0.57338405", "0.5719913", "0.5717219", "0.57041454", "0.5699729", "0.5699331", "0.5698777", "0.5693532", "0.56929725", "0.56795585", "0.567683", "0.56672615", "0.5665658", "0.56640786", "0.5654561", "0.565414", "0.56425935", "0.5631957", "0.56273633", "0.56248444", "0.5622899", "0.562086", "0.56207776", "0.5620757", "0.56184053", "0.56162125", "0.56111884", "0.5610986", "0.5608827", "0.5608384", "0.56070364", "0.55937636", "0.55897355", "0.55896974", "0.5588432", "0.5588011", "0.55850214", "0.5576846", "0.55727935", "0.5570621", "0.55669016", "0.55669016", "0.55669016" ]
0.6032268
17
getData is used to get satellite json data from the server.
def getSearch(self, satellite: str, startDate: str, endDate: str, latitude: float, longitude: float, minCloudCover=None, maxCloudCover=None, minCoverage=None, maxCoverage=None, ) -> list: if satellite.lower() == 'landsat8': satellite = 'l8' minCloudCover = self.minCloudCover if minCloudCover is None else minCloudCover maxCloudCover = self.maxCloudCover if maxCloudCover is None else maxCloudCover minCoverage = self.minCoverage if minCoverage is None else minCoverage maxCoverage = self.maxCoverage if maxCoverage is None else maxCoverage param = { 'table_name': 'satellite_dataset_prod', 'satellite': satellite.lower(), 'start_date': startDate, 'end_date': endDate, 'min_cloudcover': int(minCloudCover), 'max_cloudcover': int(maxCloudCover), 'min_coverage': int(minCoverage), 'max_coverage': int(maxCoverage), 'x': float(longitude), 'y': float(latitude) } try: response = requests.get(url=self.searchEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to server. \ Make sure url is correct, updated and \ you are connected to Internet. Error : {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(self, data):\n pass", "def get_data(self):\r\n pass", "def get(self, data):\n ret = self._rest_call({}, 'GET')\n return json.loads(ret[2])", "def get_data():\n try:\n response = requests.get(uri)\n json_data = response.json()\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)\n return json_data", "def get_data(self):", "def get_data(self):\n return self.data.to_json()", "def get_sdata(self):\n payload = self.get('data_request?id=sdata&output_format=json')\n return payload", "def get_json_data():\n return None", "def _get_data(self):\n raise NotImplementedError()", "def get_velib_data():\n api_url = \"https://api.jcdecaux.com/vls/v1/\"\n query_string = \"stations?contract=Paris&apiKey=\"\n api_key = \"ec29d3b17e5162e1459aaad45cddfe74fe832379\"\n my_url = api_url + query_string + api_key\n\n urlobj = URL.urlopen(my_url)\n data = json.load(urlobj)\n# data = urlobj.read()\n# help(data)\n return data", "async def stations_data():\n with open(\"/data/station_data.json\") as j:\n data = json.load(j)\n return data", "def get_data():\n string = open('Tinder/static/data/data.json').read()\n return flask.jsonify(json.loads(string))", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def get_data(self): # TODO: add smooth possibility\n return self.data", "async def get_data(self, path: str) -> Dict:\n # function accepts paths that start with / and also path that do not start with /\n if path.startswith(\"/\"):\n path = path[1:]\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"{self._opa_url}/data/{path}\") as opa_response:\n return await opa_response.json()\n except aiohttp.ClientError as e:\n logger.warning(\"Opa connection error: {err}\", err=e)\n raise", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def getData(self, local_cache):", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def getDatafromWebService(self,region, station, start_date, end_date):\n #construct filename in the format \"region_station_startdate_enddate.json\" with no spaces and \"-\"\n \"\"\"\n filename = region + \"_\" + station+ \"_\" + start_date + \"_\" + end_date + \".json\"\n filename = filename.replace(\" \",\"\")\n filename = filename.replace(\"-\",\"\")\n print (\"filename: \"+filename)\n \"\"\"\n #date format for getting data from web service = yy/mm/dd\n obj = RegionData()\n stationcode = obj.getStaionCode(region, station)\n newStart_Date = datetime.datetime.strptime(start_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n newEnd_Date = datetime.datetime.strptime(end_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n server = SOAPpy.SOAPProxy(\"http://cdmo.baruch.sc.edu/webservices2/requests.cfc?wsdl\")\n\n #stationcode=\"pdbjewq\"\n responsedata = server.exportAllParamsDateRangeXMLNew(stationcode, newStart_Date, newEnd_Date,'*')\n #responsedata = server.exportAllParamsDateRangeXMLNew('pdbjewq','2014-12-30', '2014-12-31', '*')\n\n # print responsedata\n pythonObject = SOAPpy.Types.simplify(responsedata)\n #jsonObject = json.dumps(pythonObject)\n #assert type(jsonObject) == str\n dataArray = pythonObject[\"returnData\"][\"data\"] # returns { [{...},{....},.....]}\n\n #data from webservice has date format mm/dd/yy = 12/31/2014\n #print(dataArray)\n\n return json.dumps(dataArray)\n \"\"\"\n print (dataArray)\n self.dataToJson(dataArray, filename) # store the data into a json file\n #store data into rawdata collection\n \n rawObj =RawData()\n rawObj.insertRawStationData(region,station,start_date,end_date,dataArray)\n \n #return filename # return the json filename where data is stored\n \"\"\"", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def getData(language=None):", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def get_data_from_web():\n pass", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def get_data(self):\n\n self.set_query_string()\n self.realtime_data = super().get_data()\n self.set_coordinate()\n return self.realtime_data", "def get_data(self, out_format: str='json'):\n if self.data:\n return self.data\n self.data_ready = self.check_available()\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def data():\n return volumes_fetchers.get_json_data()", "def get_data(self):\n return self.parsed_data", "def get_data():\n try:\n with open(CONS[\"OUTPUT_FILE\"], \"r\") as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print(\"Data file not found.\")\n exit()", "def data(self):\n return json.loads(self.data_json)", "def getData(self):\r\n return self._data", "def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data", "def get(self):\r\n return self.data", "def get_data(self, state=None, request=None):\n raise NotImplementedError", "def getData(self):\n return self.__data", "def fetch_data(data_url):\n return requests.get(data_url).content", "def get_data(self):\n return DataGatherer().get_rainfall_data()", "def getData(eUrl, eToken):\n if eUrl == '' or eToken == '':\n print(\"Enviroment is not setup\")\n exit(1)\n\n with urllib.request.urlopen(eUrl + eToken) as url:\n data = json.loads(url.read().decode())\n\n return(data)", "def get_data(self):\n return DataGatherer().get_wind_data()", "def get_data():\n try:\n with open('./data.json') as data_file:\n return json.load(data_file)\n except:\n data_json = json.loads('{\"message\": \"Error with file data.json\", \"success\": false}')\n return data_json", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def getData(self):\n return self._data", "def getData(self):\n return self._data", "def GetGeData(self, *args, **kwargs):\n pass", "def getStockData():\n pass", "def get_path_data(self, path):\n url = self.api_server + path\n return self.get_url_data(url)", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def data():\n return None", "def GetData(self):\r\n \r\n return self._data", "def getData():\n data = {\n \"name\": \"Kim\",\n \"message\": \"Hello there!\"\n }\n return jsonify(data) # respond to the API caller with a JSON representation of data. jsonify is important, as it sets response headers that indicate the respose is in JSON as well", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get(self):\n return self.data", "def get(self):\n return self.get_data()", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "def get_data(self, label: str) -> Any:\r\n return self._get_resource(label, self._data, \"data\")", "def get_data(self):\n with open(self.filepath, 'r') as openFile:\n data = json.load(openFile)\n logging.info('Loaded JSON data file.')\n return data", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def get_data(self):\n return self.topo_data_flattened", "def fetch_data(self):", "def get_data():\n if not hasattr(g, 'data'):\n g.data = load_data()\n return g.data", "def GetData():\r\n return _hiew.HiewGate_GetData()", "def get(self):\n city = str(request.args.get('city')) ## /?city=stockholm\n source = urllib.request.urlopen('http://127.0.0.1:5050/?city=' + city).read()\n data = json.loads(source)\n print(data)\n tempinc = {\"name\" : (str(data['name'])),\n \"country\" : (str(data['country'])),\n \"temp\" : (str(data['temp']))+' c'}\n return tempinc", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "async def get_data(self, endpoint):\n try:\n with async_timeout.timeout(5, loop=self._loop):\n response = await self._session.get(f\"{endpoint}\")\n\n _LOGGER.debug(\"Response from Dingz device: %s\", response.status)\n self.data = await response.json()\n _LOGGER.debug(self.data)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n _LOGGER.error(\"Can not load data from Dingz device\")\n self.data = None\n raise exceptions.DingzConnectionError()", "def _extract_data(self):\n if self.URL_type == \"youtube\" or self.URL_type == \"ytmusic\":\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self._get_soundcloud_data()", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def _fetch_data(self):\n pass", "def data(self):\n self._get_latest_content()\n return self._data.get('data', {})", "def get(self):\n data = request.args.get('data')\n\n if not data:\n data = \"OK!\"\n\n return json.loads(dumps(data)), 200", "def data(self):\n return self._data", "def data(self):\n file_name = join(PARENT_BASE_DIR, '.files', 'data.data.json')\n if isfile(file_name):\n debug(f'{file_name} file is exist.')\n debug(f'try for load {file_name} file ->->->->->->->->->->')\n start_load_file = time()\n with open(file_name, 'r', encoding='utf-8')as file:\n data = file.read()\n data = loads(data)\n debug(f'load file - [runtime: {time() - start_load_file}] <-<-<-<-<-<-<-<-<-<-')\n return data, 'data exist.'\n else:\n debug(f'{file_name} file is not exist.')\n return None, 'data not exist in \"base directory/.files/data.data.json\"'", "def get_data(self, lat=53.3498, lon=-6.2603):\n r = requests.get(\n f\"https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid=92fb08f48a98e0f39b990060352ffebe\")\n return r.text", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def getData(self, product, variables, attributes, variable, *args):\r\n\r\n data = None\r\n return data", "def getAllData(self):\r\n return self.data", "def get_data(self, body):\n params = json.loads(body)\n logger.debug('New Data Format')\n return self._get_data(body)", "def stationdata():\n # * Return a JSON list of stations from the dataset.\n # as this should be a list, I'm just grabbing the station name\n session = Session(engine)\n results = session.query(Station.name).all()\n session.close()\n\n stations = list(np.ravel(results))\n return jsonify(stations)", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data" ]
[ "0.7276632", "0.7044919", "0.7044919", "0.7044919", "0.6988719", "0.6904598", "0.67315245", "0.67315245", "0.6711421", "0.6699119", "0.66586465", "0.6641715", "0.65758157", "0.65712667", "0.6570018", "0.6539462", "0.6526975", "0.64456314", "0.64355475", "0.6282369", "0.6267849", "0.62539583", "0.62318444", "0.62056994", "0.6203443", "0.61973894", "0.61973894", "0.61973894", "0.61941123", "0.6193835", "0.6171375", "0.61706764", "0.61706764", "0.61699045", "0.613302", "0.6105646", "0.60901976", "0.60713613", "0.6064109", "0.60600907", "0.6059058", "0.60442257", "0.60441184", "0.60246354", "0.602411", "0.6003093", "0.59959054", "0.59833884", "0.5977262", "0.59436166", "0.5936724", "0.5918368", "0.5902448", "0.5889219", "0.5873394", "0.5867444", "0.58654165", "0.58654165", "0.5854547", "0.58541197", "0.584554", "0.584539", "0.5839185", "0.5818117", "0.578982", "0.57875663", "0.5783141", "0.5783141", "0.5783141", "0.5783141", "0.5777883", "0.57754195", "0.5767481", "0.5762121", "0.5746903", "0.5734854", "0.57335156", "0.5730804", "0.5727303", "0.5726277", "0.57260615", "0.57227606", "0.57018274", "0.56889516", "0.5685928", "0.56830907", "0.56817275", "0.56644094", "0.5659732", "0.56545985", "0.5647078", "0.56387955", "0.56344116", "0.5630541", "0.5630077", "0.56223285", "0.5616545", "0.561541", "0.5613653", "0.5613653", "0.5613653" ]
0.0
-1
getData is used to get satellite json data from the server.
def getValue(self, url: str, latitude: list, longitude: list, satellite='l8', index='ndvi'): if type(latitude) is not list: latitude = [str(latitude)] else: latitude = [str(l) for l in latitude] if type(longitude) is not list: longitude = [str(longitude)] else: longitude = [str(l) for l in longitude] param = { 'url': url, 'x': ','.join(longitude), 'y': ','.join(latitude), 'satellite': satellite, 'index': index } try: response = requests.get(url=self.valueEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to value endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(self, data):\n pass", "def get_data(self):\r\n pass", "def get(self, data):\n ret = self._rest_call({}, 'GET')\n return json.loads(ret[2])", "def get_data():\n try:\n response = requests.get(uri)\n json_data = response.json()\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)\n return json_data", "def get_data(self):", "def get_data(self):\n return self.data.to_json()", "def get_sdata(self):\n payload = self.get('data_request?id=sdata&output_format=json')\n return payload", "def get_json_data():\n return None", "def _get_data(self):\n raise NotImplementedError()", "def get_velib_data():\n api_url = \"https://api.jcdecaux.com/vls/v1/\"\n query_string = \"stations?contract=Paris&apiKey=\"\n api_key = \"ec29d3b17e5162e1459aaad45cddfe74fe832379\"\n my_url = api_url + query_string + api_key\n\n urlobj = URL.urlopen(my_url)\n data = json.load(urlobj)\n# data = urlobj.read()\n# help(data)\n return data", "async def stations_data():\n with open(\"/data/station_data.json\") as j:\n data = json.load(j)\n return data", "def get_data():\n string = open('Tinder/static/data/data.json').read()\n return flask.jsonify(json.loads(string))", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def get_data(self): # TODO: add smooth possibility\n return self.data", "async def get_data(self, path: str) -> Dict:\n # function accepts paths that start with / and also path that do not start with /\n if path.startswith(\"/\"):\n path = path[1:]\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"{self._opa_url}/data/{path}\") as opa_response:\n return await opa_response.json()\n except aiohttp.ClientError as e:\n logger.warning(\"Opa connection error: {err}\", err=e)\n raise", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def getData(self, local_cache):", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def getDatafromWebService(self,region, station, start_date, end_date):\n #construct filename in the format \"region_station_startdate_enddate.json\" with no spaces and \"-\"\n \"\"\"\n filename = region + \"_\" + station+ \"_\" + start_date + \"_\" + end_date + \".json\"\n filename = filename.replace(\" \",\"\")\n filename = filename.replace(\"-\",\"\")\n print (\"filename: \"+filename)\n \"\"\"\n #date format for getting data from web service = yy/mm/dd\n obj = RegionData()\n stationcode = obj.getStaionCode(region, station)\n newStart_Date = datetime.datetime.strptime(start_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n newEnd_Date = datetime.datetime.strptime(end_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n server = SOAPpy.SOAPProxy(\"http://cdmo.baruch.sc.edu/webservices2/requests.cfc?wsdl\")\n\n #stationcode=\"pdbjewq\"\n responsedata = server.exportAllParamsDateRangeXMLNew(stationcode, newStart_Date, newEnd_Date,'*')\n #responsedata = server.exportAllParamsDateRangeXMLNew('pdbjewq','2014-12-30', '2014-12-31', '*')\n\n # print responsedata\n pythonObject = SOAPpy.Types.simplify(responsedata)\n #jsonObject = json.dumps(pythonObject)\n #assert type(jsonObject) == str\n dataArray = pythonObject[\"returnData\"][\"data\"] # returns { [{...},{....},.....]}\n\n #data from webservice has date format mm/dd/yy = 12/31/2014\n #print(dataArray)\n\n return json.dumps(dataArray)\n \"\"\"\n print (dataArray)\n self.dataToJson(dataArray, filename) # store the data into a json file\n #store data into rawdata collection\n \n rawObj =RawData()\n rawObj.insertRawStationData(region,station,start_date,end_date,dataArray)\n \n #return filename # return the json filename where data is stored\n \"\"\"", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def getData(language=None):", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def get_data_from_web():\n pass", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def get_data(self):\n\n self.set_query_string()\n self.realtime_data = super().get_data()\n self.set_coordinate()\n return self.realtime_data", "def get_data(self, out_format: str='json'):\n if self.data:\n return self.data\n self.data_ready = self.check_available()\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def get_data(self):\n return self.parsed_data", "def data():\n return volumes_fetchers.get_json_data()", "def get_data():\n try:\n with open(CONS[\"OUTPUT_FILE\"], \"r\") as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print(\"Data file not found.\")\n exit()", "def data(self):\n return json.loads(self.data_json)", "def getData(self):\r\n return self._data", "def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data", "def get(self):\r\n return self.data", "def get_data(self, state=None, request=None):\n raise NotImplementedError", "def getData(self):\n return self.__data", "def fetch_data(data_url):\n return requests.get(data_url).content", "def get_data(self):\n return DataGatherer().get_rainfall_data()", "def getData(eUrl, eToken):\n if eUrl == '' or eToken == '':\n print(\"Enviroment is not setup\")\n exit(1)\n\n with urllib.request.urlopen(eUrl + eToken) as url:\n data = json.loads(url.read().decode())\n\n return(data)", "def get_data(self):\n return DataGatherer().get_wind_data()", "def get_data():\n try:\n with open('./data.json') as data_file:\n return json.load(data_file)\n except:\n data_json = json.loads('{\"message\": \"Error with file data.json\", \"success\": false}')\n return data_json", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def getData(self):\n return self._data", "def getData(self):\n return self._data", "def GetGeData(self, *args, **kwargs):\n pass", "def getStockData():\n pass", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def get_path_data(self, path):\n url = self.api_server + path\n return self.get_url_data(url)", "def data():\n return None", "def GetData(self):\r\n \r\n return self._data", "def getData():\n data = {\n \"name\": \"Kim\",\n \"message\": \"Hello there!\"\n }\n return jsonify(data) # respond to the API caller with a JSON representation of data. jsonify is important, as it sets response headers that indicate the respose is in JSON as well", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get(self):\n return self.data", "def get(self):\n return self.get_data()", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "def get_data(self, label: str) -> Any:\r\n return self._get_resource(label, self._data, \"data\")", "def get_data(self):\n with open(self.filepath, 'r') as openFile:\n data = json.load(openFile)\n logging.info('Loaded JSON data file.')\n return data", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def get_data(self):\n return self.topo_data_flattened", "def get_data():\n if not hasattr(g, 'data'):\n g.data = load_data()\n return g.data", "def fetch_data(self):", "def GetData():\r\n return _hiew.HiewGate_GetData()", "def get(self):\n city = str(request.args.get('city')) ## /?city=stockholm\n source = urllib.request.urlopen('http://127.0.0.1:5050/?city=' + city).read()\n data = json.loads(source)\n print(data)\n tempinc = {\"name\" : (str(data['name'])),\n \"country\" : (str(data['country'])),\n \"temp\" : (str(data['temp']))+' c'}\n return tempinc", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "async def get_data(self, endpoint):\n try:\n with async_timeout.timeout(5, loop=self._loop):\n response = await self._session.get(f\"{endpoint}\")\n\n _LOGGER.debug(\"Response from Dingz device: %s\", response.status)\n self.data = await response.json()\n _LOGGER.debug(self.data)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n _LOGGER.error(\"Can not load data from Dingz device\")\n self.data = None\n raise exceptions.DingzConnectionError()", "def _extract_data(self):\n if self.URL_type == \"youtube\" or self.URL_type == \"ytmusic\":\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self._get_soundcloud_data()", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def _fetch_data(self):\n pass", "def data(self):\n self._get_latest_content()\n return self._data.get('data', {})", "def get(self):\n data = request.args.get('data')\n\n if not data:\n data = \"OK!\"\n\n return json.loads(dumps(data)), 200", "def data(self):\n return self._data", "def data(self):\n file_name = join(PARENT_BASE_DIR, '.files', 'data.data.json')\n if isfile(file_name):\n debug(f'{file_name} file is exist.')\n debug(f'try for load {file_name} file ->->->->->->->->->->')\n start_load_file = time()\n with open(file_name, 'r', encoding='utf-8')as file:\n data = file.read()\n data = loads(data)\n debug(f'load file - [runtime: {time() - start_load_file}] <-<-<-<-<-<-<-<-<-<-')\n return data, 'data exist.'\n else:\n debug(f'{file_name} file is not exist.')\n return None, 'data not exist in \"base directory/.files/data.data.json\"'", "def get_data(self, lat=53.3498, lon=-6.2603):\n r = requests.get(\n f\"https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid=92fb08f48a98e0f39b990060352ffebe\")\n return r.text", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def getData(self, product, variables, attributes, variable, *args):\r\n\r\n data = None\r\n return data", "def getAllData(self):\r\n return self.data", "def get_data(self, body):\n params = json.loads(body)\n logger.debug('New Data Format')\n return self._get_data(body)", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def stationdata():\n # * Return a JSON list of stations from the dataset.\n # as this should be a list, I'm just grabbing the station name\n session = Session(engine)\n results = session.query(Station.name).all()\n session.close()\n\n stations = list(np.ravel(results))\n return jsonify(stations)" ]
[ "0.72779673", "0.7046692", "0.7046692", "0.7046692", "0.6990083", "0.6905413", "0.67326444", "0.67326444", "0.67116404", "0.67000073", "0.6659789", "0.66423845", "0.657657", "0.65723705", "0.6571265", "0.6539489", "0.6527483", "0.64475584", "0.6434517", "0.6281322", "0.62673676", "0.6255409", "0.6231226", "0.6207303", "0.6203103", "0.61984956", "0.61984956", "0.61984956", "0.6195434", "0.61948794", "0.61720955", "0.61720955", "0.61705697", "0.61701906", "0.61327624", "0.61058617", "0.6089856", "0.60706997", "0.6064289", "0.60605884", "0.6060254", "0.6044793", "0.60443103", "0.6025632", "0.6024497", "0.6004558", "0.5996323", "0.59844005", "0.5978183", "0.5945097", "0.59371924", "0.59191114", "0.5902455", "0.58900607", "0.5874524", "0.5867122", "0.58668286", "0.58668286", "0.58553886", "0.58549976", "0.5845742", "0.58452636", "0.58392817", "0.5819086", "0.5790499", "0.5786983", "0.57841724", "0.57841724", "0.57841724", "0.57841724", "0.5778774", "0.57762915", "0.5768052", "0.57629234", "0.5745028", "0.57355803", "0.57341474", "0.5730678", "0.57276666", "0.5727472", "0.5726741", "0.5723196", "0.5700075", "0.5689483", "0.568717", "0.56825423", "0.5682135", "0.5665188", "0.56594765", "0.5654654", "0.5648013", "0.5639657", "0.56337845", "0.56318176", "0.563108", "0.5623548", "0.56176", "0.5614621", "0.5614621", "0.5614621", "0.5614105" ]
0.0
-1
getData is used to get satellite json data from the server.
def getStats(self, url: str, latitude: list, longitude: list, satellite='l8', index='ndvi'): if type(latitude) is not list: latitude = [str(latitude)] else: latitude = [str(l) for l in latitude] if type(longitude) is not list: longitude = [str(longitude)] else: longitude = [str(l) for l in longitude] param = { 'url': url, 'x': ','.join(longitude), 'y': ','.join(latitude), 'satellite': satellite, 'index': index } try: response = requests.get(url=self.statsEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to statistics endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(self, data):\n pass", "def get_data(self):\r\n pass", "def get(self, data):\n ret = self._rest_call({}, 'GET')\n return json.loads(ret[2])", "def get_data():\n try:\n response = requests.get(uri)\n json_data = response.json()\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)\n return json_data", "def get_data(self):", "def get_data(self):\n return self.data.to_json()", "def get_sdata(self):\n payload = self.get('data_request?id=sdata&output_format=json')\n return payload", "def get_json_data():\n return None", "def _get_data(self):\n raise NotImplementedError()", "def get_velib_data():\n api_url = \"https://api.jcdecaux.com/vls/v1/\"\n query_string = \"stations?contract=Paris&apiKey=\"\n api_key = \"ec29d3b17e5162e1459aaad45cddfe74fe832379\"\n my_url = api_url + query_string + api_key\n\n urlobj = URL.urlopen(my_url)\n data = json.load(urlobj)\n# data = urlobj.read()\n# help(data)\n return data", "async def stations_data():\n with open(\"/data/station_data.json\") as j:\n data = json.load(j)\n return data", "def get_data():\n string = open('Tinder/static/data/data.json').read()\n return flask.jsonify(json.loads(string))", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def get_data(self): # TODO: add smooth possibility\n return self.data", "async def get_data(self, path: str) -> Dict:\n # function accepts paths that start with / and also path that do not start with /\n if path.startswith(\"/\"):\n path = path[1:]\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"{self._opa_url}/data/{path}\") as opa_response:\n return await opa_response.json()\n except aiohttp.ClientError as e:\n logger.warning(\"Opa connection error: {err}\", err=e)\n raise", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def getData(self, local_cache):", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def getDatafromWebService(self,region, station, start_date, end_date):\n #construct filename in the format \"region_station_startdate_enddate.json\" with no spaces and \"-\"\n \"\"\"\n filename = region + \"_\" + station+ \"_\" + start_date + \"_\" + end_date + \".json\"\n filename = filename.replace(\" \",\"\")\n filename = filename.replace(\"-\",\"\")\n print (\"filename: \"+filename)\n \"\"\"\n #date format for getting data from web service = yy/mm/dd\n obj = RegionData()\n stationcode = obj.getStaionCode(region, station)\n newStart_Date = datetime.datetime.strptime(start_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n newEnd_Date = datetime.datetime.strptime(end_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n server = SOAPpy.SOAPProxy(\"http://cdmo.baruch.sc.edu/webservices2/requests.cfc?wsdl\")\n\n #stationcode=\"pdbjewq\"\n responsedata = server.exportAllParamsDateRangeXMLNew(stationcode, newStart_Date, newEnd_Date,'*')\n #responsedata = server.exportAllParamsDateRangeXMLNew('pdbjewq','2014-12-30', '2014-12-31', '*')\n\n # print responsedata\n pythonObject = SOAPpy.Types.simplify(responsedata)\n #jsonObject = json.dumps(pythonObject)\n #assert type(jsonObject) == str\n dataArray = pythonObject[\"returnData\"][\"data\"] # returns { [{...},{....},.....]}\n\n #data from webservice has date format mm/dd/yy = 12/31/2014\n #print(dataArray)\n\n return json.dumps(dataArray)\n \"\"\"\n print (dataArray)\n self.dataToJson(dataArray, filename) # store the data into a json file\n #store data into rawdata collection\n \n rawObj =RawData()\n rawObj.insertRawStationData(region,station,start_date,end_date,dataArray)\n \n #return filename # return the json filename where data is stored\n \"\"\"", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def getData(language=None):", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def get_data_from_web():\n pass", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def get_data(self):\n\n self.set_query_string()\n self.realtime_data = super().get_data()\n self.set_coordinate()\n return self.realtime_data", "def get_data(self, out_format: str='json'):\n if self.data:\n return self.data\n self.data_ready = self.check_available()\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def get_data(self):\n return self.parsed_data", "def data():\n return volumes_fetchers.get_json_data()", "def get_data():\n try:\n with open(CONS[\"OUTPUT_FILE\"], \"r\") as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print(\"Data file not found.\")\n exit()", "def data(self):\n return json.loads(self.data_json)", "def getData(self):\r\n return self._data", "def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data", "def get(self):\r\n return self.data", "def get_data(self, state=None, request=None):\n raise NotImplementedError", "def getData(self):\n return self.__data", "def fetch_data(data_url):\n return requests.get(data_url).content", "def get_data(self):\n return DataGatherer().get_rainfall_data()", "def getData(eUrl, eToken):\n if eUrl == '' or eToken == '':\n print(\"Enviroment is not setup\")\n exit(1)\n\n with urllib.request.urlopen(eUrl + eToken) as url:\n data = json.loads(url.read().decode())\n\n return(data)", "def get_data(self):\n return DataGatherer().get_wind_data()", "def get_data():\n try:\n with open('./data.json') as data_file:\n return json.load(data_file)\n except:\n data_json = json.loads('{\"message\": \"Error with file data.json\", \"success\": false}')\n return data_json", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def getData(self):\n return self._data", "def getData(self):\n return self._data", "def GetGeData(self, *args, **kwargs):\n pass", "def getStockData():\n pass", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def get_path_data(self, path):\n url = self.api_server + path\n return self.get_url_data(url)", "def data():\n return None", "def GetData(self):\r\n \r\n return self._data", "def getData():\n data = {\n \"name\": \"Kim\",\n \"message\": \"Hello there!\"\n }\n return jsonify(data) # respond to the API caller with a JSON representation of data. jsonify is important, as it sets response headers that indicate the respose is in JSON as well", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get(self):\n return self.data", "def get(self):\n return self.get_data()", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "def get_data(self, label: str) -> Any:\r\n return self._get_resource(label, self._data, \"data\")", "def get_data(self):\n with open(self.filepath, 'r') as openFile:\n data = json.load(openFile)\n logging.info('Loaded JSON data file.')\n return data", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def get_data(self):\n return self.topo_data_flattened", "def get_data():\n if not hasattr(g, 'data'):\n g.data = load_data()\n return g.data", "def fetch_data(self):", "def GetData():\r\n return _hiew.HiewGate_GetData()", "def get(self):\n city = str(request.args.get('city')) ## /?city=stockholm\n source = urllib.request.urlopen('http://127.0.0.1:5050/?city=' + city).read()\n data = json.loads(source)\n print(data)\n tempinc = {\"name\" : (str(data['name'])),\n \"country\" : (str(data['country'])),\n \"temp\" : (str(data['temp']))+' c'}\n return tempinc", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "async def get_data(self, endpoint):\n try:\n with async_timeout.timeout(5, loop=self._loop):\n response = await self._session.get(f\"{endpoint}\")\n\n _LOGGER.debug(\"Response from Dingz device: %s\", response.status)\n self.data = await response.json()\n _LOGGER.debug(self.data)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n _LOGGER.error(\"Can not load data from Dingz device\")\n self.data = None\n raise exceptions.DingzConnectionError()", "def _extract_data(self):\n if self.URL_type == \"youtube\" or self.URL_type == \"ytmusic\":\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self._get_soundcloud_data()", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def _fetch_data(self):\n pass", "def data(self):\n self._get_latest_content()\n return self._data.get('data', {})", "def get(self):\n data = request.args.get('data')\n\n if not data:\n data = \"OK!\"\n\n return json.loads(dumps(data)), 200", "def data(self):\n return self._data", "def data(self):\n file_name = join(PARENT_BASE_DIR, '.files', 'data.data.json')\n if isfile(file_name):\n debug(f'{file_name} file is exist.')\n debug(f'try for load {file_name} file ->->->->->->->->->->')\n start_load_file = time()\n with open(file_name, 'r', encoding='utf-8')as file:\n data = file.read()\n data = loads(data)\n debug(f'load file - [runtime: {time() - start_load_file}] <-<-<-<-<-<-<-<-<-<-')\n return data, 'data exist.'\n else:\n debug(f'{file_name} file is not exist.')\n return None, 'data not exist in \"base directory/.files/data.data.json\"'", "def get_data(self, lat=53.3498, lon=-6.2603):\n r = requests.get(\n f\"https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid=92fb08f48a98e0f39b990060352ffebe\")\n return r.text", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def getData(self, product, variables, attributes, variable, *args):\r\n\r\n data = None\r\n return data", "def getAllData(self):\r\n return self.data", "def get_data(self, body):\n params = json.loads(body)\n logger.debug('New Data Format')\n return self._get_data(body)", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def stationdata():\n # * Return a JSON list of stations from the dataset.\n # as this should be a list, I'm just grabbing the station name\n session = Session(engine)\n results = session.query(Station.name).all()\n session.close()\n\n stations = list(np.ravel(results))\n return jsonify(stations)" ]
[ "0.72779673", "0.7046692", "0.7046692", "0.7046692", "0.6990083", "0.6905413", "0.67326444", "0.67326444", "0.67116404", "0.67000073", "0.6659789", "0.66423845", "0.657657", "0.65723705", "0.6571265", "0.6539489", "0.6527483", "0.64475584", "0.6434517", "0.6281322", "0.62673676", "0.6255409", "0.6231226", "0.6207303", "0.6203103", "0.61984956", "0.61984956", "0.61984956", "0.6195434", "0.61948794", "0.61720955", "0.61720955", "0.61705697", "0.61701906", "0.61327624", "0.61058617", "0.6089856", "0.60706997", "0.6064289", "0.60605884", "0.6060254", "0.6044793", "0.60443103", "0.6025632", "0.6024497", "0.6004558", "0.5996323", "0.59844005", "0.5978183", "0.5945097", "0.59371924", "0.59191114", "0.5902455", "0.58900607", "0.5874524", "0.5867122", "0.58668286", "0.58668286", "0.58553886", "0.58549976", "0.5845742", "0.58452636", "0.58392817", "0.5819086", "0.5790499", "0.5786983", "0.57841724", "0.57841724", "0.57841724", "0.57841724", "0.5778774", "0.57762915", "0.5768052", "0.57629234", "0.5745028", "0.57355803", "0.57341474", "0.5730678", "0.57276666", "0.5727472", "0.5726741", "0.5723196", "0.5700075", "0.5689483", "0.568717", "0.56825423", "0.5682135", "0.5665188", "0.56594765", "0.5654654", "0.5648013", "0.5639657", "0.56337845", "0.56318176", "0.563108", "0.5623548", "0.56176", "0.5614621", "0.5614621", "0.5614621", "0.5614105" ]
0.0
-1
getData is used to get satellite json data from the server.
def getTimelineValue(self, url: str, latitude: float, longitude: float, startDate: str, endDate: str, minCloudCover=None, maxCloudCover=None, minCoverage=None, maxCoverage=None, satellite='l8', index='ndvi'): latitude = str(latitude) longitude = str(longitude) minCloudCover = self.minCloudCover if minCloudCover is None else minCloudCover maxCloudCover = self.maxCloudCover if maxCloudCover is None else maxCloudCover minCoverage = self.minCoverage if minCoverage is None else minCoverage maxCoverage = self.maxCoverage if maxCoverage is None else maxCoverage param = { 'url': url, 'x': str(longitude), 'y': str(latitude), 'satellite': satellite.lower(), 'index': index, 'start_date': startDate, 'end_date': endDate, 'min_cloudcover': int(minCloudCover), 'max_cloudcover': int(maxCloudCover), 'min_coverage': int(minCoverage), 'max_coverage': int(maxCoverage), } try: response = requests.get(url=self.timelineValueEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to value endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(self, data):\n pass", "def get_data(self):\r\n pass", "def get(self, data):\n ret = self._rest_call({}, 'GET')\n return json.loads(ret[2])", "def get_data():\n try:\n response = requests.get(uri)\n json_data = response.json()\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)\n return json_data", "def get_data(self):", "def get_data(self):\n return self.data.to_json()", "def get_sdata(self):\n payload = self.get('data_request?id=sdata&output_format=json')\n return payload", "def get_json_data():\n return None", "def _get_data(self):\n raise NotImplementedError()", "def get_velib_data():\n api_url = \"https://api.jcdecaux.com/vls/v1/\"\n query_string = \"stations?contract=Paris&apiKey=\"\n api_key = \"ec29d3b17e5162e1459aaad45cddfe74fe832379\"\n my_url = api_url + query_string + api_key\n\n urlobj = URL.urlopen(my_url)\n data = json.load(urlobj)\n# data = urlobj.read()\n# help(data)\n return data", "async def stations_data():\n with open(\"/data/station_data.json\") as j:\n data = json.load(j)\n return data", "def get_data():\n string = open('Tinder/static/data/data.json').read()\n return flask.jsonify(json.loads(string))", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def get_data(self): # TODO: add smooth possibility\n return self.data", "async def get_data(self, path: str) -> Dict:\n # function accepts paths that start with / and also path that do not start with /\n if path.startswith(\"/\"):\n path = path[1:]\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"{self._opa_url}/data/{path}\") as opa_response:\n return await opa_response.json()\n except aiohttp.ClientError as e:\n logger.warning(\"Opa connection error: {err}\", err=e)\n raise", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def getData(self, local_cache):", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def getDatafromWebService(self,region, station, start_date, end_date):\n #construct filename in the format \"region_station_startdate_enddate.json\" with no spaces and \"-\"\n \"\"\"\n filename = region + \"_\" + station+ \"_\" + start_date + \"_\" + end_date + \".json\"\n filename = filename.replace(\" \",\"\")\n filename = filename.replace(\"-\",\"\")\n print (\"filename: \"+filename)\n \"\"\"\n #date format for getting data from web service = yy/mm/dd\n obj = RegionData()\n stationcode = obj.getStaionCode(region, station)\n newStart_Date = datetime.datetime.strptime(start_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n newEnd_Date = datetime.datetime.strptime(end_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n server = SOAPpy.SOAPProxy(\"http://cdmo.baruch.sc.edu/webservices2/requests.cfc?wsdl\")\n\n #stationcode=\"pdbjewq\"\n responsedata = server.exportAllParamsDateRangeXMLNew(stationcode, newStart_Date, newEnd_Date,'*')\n #responsedata = server.exportAllParamsDateRangeXMLNew('pdbjewq','2014-12-30', '2014-12-31', '*')\n\n # print responsedata\n pythonObject = SOAPpy.Types.simplify(responsedata)\n #jsonObject = json.dumps(pythonObject)\n #assert type(jsonObject) == str\n dataArray = pythonObject[\"returnData\"][\"data\"] # returns { [{...},{....},.....]}\n\n #data from webservice has date format mm/dd/yy = 12/31/2014\n #print(dataArray)\n\n return json.dumps(dataArray)\n \"\"\"\n print (dataArray)\n self.dataToJson(dataArray, filename) # store the data into a json file\n #store data into rawdata collection\n \n rawObj =RawData()\n rawObj.insertRawStationData(region,station,start_date,end_date,dataArray)\n \n #return filename # return the json filename where data is stored\n \"\"\"", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def getData(language=None):", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def get_data_from_web():\n pass", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def get_data(self):\n\n self.set_query_string()\n self.realtime_data = super().get_data()\n self.set_coordinate()\n return self.realtime_data", "def get_data(self, out_format: str='json'):\n if self.data:\n return self.data\n self.data_ready = self.check_available()\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def get_data(self):\n return self.parsed_data", "def data():\n return volumes_fetchers.get_json_data()", "def data(self):\n return json.loads(self.data_json)", "def get_data():\n try:\n with open(CONS[\"OUTPUT_FILE\"], \"r\") as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print(\"Data file not found.\")\n exit()", "def getData(self):\r\n return self._data", "def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data", "def get(self):\r\n return self.data", "def get_data(self, state=None, request=None):\n raise NotImplementedError", "def getData(self):\n return self.__data", "def fetch_data(data_url):\n return requests.get(data_url).content", "def get_data(self):\n return DataGatherer().get_rainfall_data()", "def getData(eUrl, eToken):\n if eUrl == '' or eToken == '':\n print(\"Enviroment is not setup\")\n exit(1)\n\n with urllib.request.urlopen(eUrl + eToken) as url:\n data = json.loads(url.read().decode())\n\n return(data)", "def get_data(self):\n return DataGatherer().get_wind_data()", "def get_data():\n try:\n with open('./data.json') as data_file:\n return json.load(data_file)\n except:\n data_json = json.loads('{\"message\": \"Error with file data.json\", \"success\": false}')\n return data_json", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def getData(self):\n return self._data", "def getData(self):\n return self._data", "def GetGeData(self, *args, **kwargs):\n pass", "def getStockData():\n pass", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def get_path_data(self, path):\n url = self.api_server + path\n return self.get_url_data(url)", "def data():\n return None", "def GetData(self):\r\n \r\n return self._data", "def getData():\n data = {\n \"name\": \"Kim\",\n \"message\": \"Hello there!\"\n }\n return jsonify(data) # respond to the API caller with a JSON representation of data. jsonify is important, as it sets response headers that indicate the respose is in JSON as well", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get(self):\n return self.data", "def get(self):\n return self.get_data()", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "def get_data(self, label: str) -> Any:\r\n return self._get_resource(label, self._data, \"data\")", "def get_data(self):\n with open(self.filepath, 'r') as openFile:\n data = json.load(openFile)\n logging.info('Loaded JSON data file.')\n return data", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def get_data(self):\n return self.topo_data_flattened", "def get_data():\n if not hasattr(g, 'data'):\n g.data = load_data()\n return g.data", "def fetch_data(self):", "def GetData():\r\n return _hiew.HiewGate_GetData()", "def get(self):\n city = str(request.args.get('city')) ## /?city=stockholm\n source = urllib.request.urlopen('http://127.0.0.1:5050/?city=' + city).read()\n data = json.loads(source)\n print(data)\n tempinc = {\"name\" : (str(data['name'])),\n \"country\" : (str(data['country'])),\n \"temp\" : (str(data['temp']))+' c'}\n return tempinc", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "async def get_data(self, endpoint):\n try:\n with async_timeout.timeout(5, loop=self._loop):\n response = await self._session.get(f\"{endpoint}\")\n\n _LOGGER.debug(\"Response from Dingz device: %s\", response.status)\n self.data = await response.json()\n _LOGGER.debug(self.data)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n _LOGGER.error(\"Can not load data from Dingz device\")\n self.data = None\n raise exceptions.DingzConnectionError()", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def _extract_data(self):\n if self.URL_type == \"youtube\" or self.URL_type == \"ytmusic\":\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self._get_soundcloud_data()", "def _fetch_data(self):\n pass", "def data(self):\n self._get_latest_content()\n return self._data.get('data', {})", "def get(self):\n data = request.args.get('data')\n\n if not data:\n data = \"OK!\"\n\n return json.loads(dumps(data)), 200", "def data(self):\n return self._data", "def data(self):\n file_name = join(PARENT_BASE_DIR, '.files', 'data.data.json')\n if isfile(file_name):\n debug(f'{file_name} file is exist.')\n debug(f'try for load {file_name} file ->->->->->->->->->->')\n start_load_file = time()\n with open(file_name, 'r', encoding='utf-8')as file:\n data = file.read()\n data = loads(data)\n debug(f'load file - [runtime: {time() - start_load_file}] <-<-<-<-<-<-<-<-<-<-')\n return data, 'data exist.'\n else:\n debug(f'{file_name} file is not exist.')\n return None, 'data not exist in \"base directory/.files/data.data.json\"'", "def get_data(self, lat=53.3498, lon=-6.2603):\n r = requests.get(\n f\"https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid=92fb08f48a98e0f39b990060352ffebe\")\n return r.text", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def getData(self, product, variables, attributes, variable, *args):\r\n\r\n data = None\r\n return data", "def getAllData(self):\r\n return self.data", "def get_data(self, body):\n params = json.loads(body)\n logger.debug('New Data Format')\n return self._get_data(body)", "def stationdata():\n # * Return a JSON list of stations from the dataset.\n # as this should be a list, I'm just grabbing the station name\n session = Session(engine)\n results = session.query(Station.name).all()\n session.close()\n\n stations = list(np.ravel(results))\n return jsonify(stations)", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data" ]
[ "0.72748417", "0.7042909", "0.7042909", "0.7042909", "0.6986527", "0.69044787", "0.6730157", "0.6730157", "0.67095935", "0.6698002", "0.6657195", "0.66411364", "0.6575214", "0.65689933", "0.6568993", "0.6539387", "0.6526602", "0.64448017", "0.643305", "0.62808186", "0.6266045", "0.62530833", "0.6230977", "0.62047446", "0.62023747", "0.61962587", "0.61962587", "0.61962587", "0.6193218", "0.61930925", "0.6169693", "0.6169693", "0.6169271", "0.61692584", "0.6132015", "0.610331", "0.6089289", "0.60693574", "0.60641956", "0.6059549", "0.60585314", "0.60427296", "0.60426944", "0.6025194", "0.6023156", "0.60020524", "0.59958684", "0.59818995", "0.5976509", "0.594268", "0.593523", "0.59174246", "0.59016556", "0.58875465", "0.5873129", "0.58663595", "0.58644193", "0.58644193", "0.58530504", "0.5852787", "0.58446234", "0.584416", "0.5837671", "0.58164924", "0.5789456", "0.5785923", "0.57820135", "0.57820135", "0.57820135", "0.57820135", "0.5776453", "0.5773474", "0.5765753", "0.5760926", "0.5745279", "0.57353574", "0.5733556", "0.5729883", "0.5726364", "0.5724414", "0.5723797", "0.5719825", "0.5699225", "0.5688072", "0.5686226", "0.56823844", "0.56817037", "0.566298", "0.5658644", "0.56544554", "0.5645314", "0.56388634", "0.5633947", "0.5629257", "0.56289667", "0.562071", "0.5616985", "0.5613694", "0.5612528", "0.5612528", "0.5612528" ]
0.0
-1
getData is used to get satellite json data from the server.
def getTimelineStats(self, url: str, latitude: list, longitude: list, startDate: str, endDate: str, minCloudCover=None, maxCloudCover=None, minCoverage=None, maxCoverage=None, satellite='l8', index='ndvi'): if type(latitude) is not list: latitude = [str(latitude)] else: latitude = [str(l) for l in latitude] if type(longitude) is not list: longitude = [str(longitude)] else: longitude = [str(l) for l in longitude] minCloudCover = self.minCloudCover if minCloudCover is None else minCloudCover maxCloudCover = self.maxCloudCover if maxCloudCover is None else maxCloudCover minCoverage = self.minCoverage if minCoverage is None else minCoverage maxCoverage = self.maxCoverage if maxCoverage is None else maxCoverage param = { 'url': url, 'x': ','.join(longitude), 'y': ','.join(latitude), 'satellite': satellite.lower(), 'index': index, 'start_date': startDate, 'end_date': endDate, 'min_cloudcover': int(minCloudCover), 'max_cloudcover': int(maxCloudCover), 'min_coverage': int(minCoverage), 'max_coverage': int(maxCoverage), } try: response = requests.get(url=self.timelineStatsEndpoint, params=param) except Exception as e: raise exceptions( 'Unable to reach to value endpoint. Error: {}'.format(e)) return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n return", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(self, data):\n pass", "def get_data(self):\r\n pass", "def get(self, data):\n ret = self._rest_call({}, 'GET')\n return json.loads(ret[2])", "def get_data():\n try:\n response = requests.get(uri)\n json_data = response.json()\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)\n return json_data", "def get_data(self):", "def get_data(self):\n return self.data.to_json()", "def get_sdata(self):\n payload = self.get('data_request?id=sdata&output_format=json')\n return payload", "def get_json_data():\n return None", "def _get_data(self):\n raise NotImplementedError()", "def get_velib_data():\n api_url = \"https://api.jcdecaux.com/vls/v1/\"\n query_string = \"stations?contract=Paris&apiKey=\"\n api_key = \"ec29d3b17e5162e1459aaad45cddfe74fe832379\"\n my_url = api_url + query_string + api_key\n\n urlobj = URL.urlopen(my_url)\n data = json.load(urlobj)\n# data = urlobj.read()\n# help(data)\n return data", "async def stations_data():\n with open(\"/data/station_data.json\") as j:\n data = json.load(j)\n return data", "def get_data():\n string = open('Tinder/static/data/data.json').read()\n return flask.jsonify(json.loads(string))", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def get_data(self): # TODO: add smooth possibility\n return self.data", "async def get_data(self, path: str) -> Dict:\n # function accepts paths that start with / and also path that do not start with /\n if path.startswith(\"/\"):\n path = path[1:]\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"{self._opa_url}/data/{path}\") as opa_response:\n return await opa_response.json()\n except aiohttp.ClientError as e:\n logger.warning(\"Opa connection error: {err}\", err=e)\n raise", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def getData(self, local_cache):", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def getDatafromWebService(self,region, station, start_date, end_date):\n #construct filename in the format \"region_station_startdate_enddate.json\" with no spaces and \"-\"\n \"\"\"\n filename = region + \"_\" + station+ \"_\" + start_date + \"_\" + end_date + \".json\"\n filename = filename.replace(\" \",\"\")\n filename = filename.replace(\"-\",\"\")\n print (\"filename: \"+filename)\n \"\"\"\n #date format for getting data from web service = yy/mm/dd\n obj = RegionData()\n stationcode = obj.getStaionCode(region, station)\n newStart_Date = datetime.datetime.strptime(start_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n newEnd_Date = datetime.datetime.strptime(end_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n server = SOAPpy.SOAPProxy(\"http://cdmo.baruch.sc.edu/webservices2/requests.cfc?wsdl\")\n\n #stationcode=\"pdbjewq\"\n responsedata = server.exportAllParamsDateRangeXMLNew(stationcode, newStart_Date, newEnd_Date,'*')\n #responsedata = server.exportAllParamsDateRangeXMLNew('pdbjewq','2014-12-30', '2014-12-31', '*')\n\n # print responsedata\n pythonObject = SOAPpy.Types.simplify(responsedata)\n #jsonObject = json.dumps(pythonObject)\n #assert type(jsonObject) == str\n dataArray = pythonObject[\"returnData\"][\"data\"] # returns { [{...},{....},.....]}\n\n #data from webservice has date format mm/dd/yy = 12/31/2014\n #print(dataArray)\n\n return json.dumps(dataArray)\n \"\"\"\n print (dataArray)\n self.dataToJson(dataArray, filename) # store the data into a json file\n #store data into rawdata collection\n \n rawObj =RawData()\n rawObj.insertRawStationData(region,station,start_date,end_date,dataArray)\n \n #return filename # return the json filename where data is stored\n \"\"\"", "def getTheData(self, dev):\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"getTheData FrontViewAPI method called.\")\n\n # dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Download\")\n try:\n url = 'http://' + dev.pluginProps['sourceXML'] + '/FrontView'\n r = requests.get(url,timeout=5)\n result = r.json()\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Result:\" + unicode(result))\n self.WaitInterval = 1\n dev.updateStateOnServer('deviceIsOnline', value=True, uiValue=\"Online\")\n dev.setErrorStateOnServer(None)\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n return result\n\n except Exception as error:\n\n indigo.server.log(u\"Error connecting to Device:\" + dev.name)\n self.WaitInterval = 60\n if self.debugLevel >= 2 and self.debug:\n self.debugLog(u\"Device is offline. No data to return. \")\n dev.updateStateOnServer('deviceIsOnline', value=False, uiValue=\"Offline\")\n # dev.updateStateOnServer('deviceTimestamp', value=t.time())\n dev.setErrorStateOnServer(u'Offline')\n result = \"\"\n return result", "def getData(language=None):", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def get_data_from_web():\n pass", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def get_data(self):\n\n self.set_query_string()\n self.realtime_data = super().get_data()\n self.set_coordinate()\n return self.realtime_data", "def get_data(self, out_format: str='json'):\n if self.data:\n return self.data\n self.data_ready = self.check_available()\n if not self.data_ready:\n raise DataNotReady(\"The run {} has not yet finished, data not available yet.\".format(self))\n resp = self.ph.conn.request(\n 'GET', self.ph.URLS['getdata'].format(self.run_token), dict(api_key=self.ph.api_key, format=out_format))\n data = resp.data.decode('utf-8')\n self.data = self.parse_json_data(data)\n return self.data", "def data():\n return volumes_fetchers.get_json_data()", "def get_data(self):\n return self.parsed_data", "def get_data():\n try:\n with open(CONS[\"OUTPUT_FILE\"], \"r\") as file:\n data = json.load(file)[1]\n return data\n except FileNotFoundError:\n print(\"Data file not found.\")\n exit()", "def data(self):\n return json.loads(self.data_json)", "def getData(self):\r\n return self._data", "def getData(url):\n data = ''\n request = urllib2.Request(url, headers={\"Accept\": \"application/json\"})\n try:\n data = json.loads(urllib2.urlopen(request).read())\n except urllib2.HTTPError, e:\n raise Exception(\"HTTP error: %d\" % e.code)\n except urllib2.URLError, e:\n raise Exception(\"Network error: %s\" % e.reason.args[1])\n\n return data", "def get(self):\r\n return self.data", "def get_data(self, state=None, request=None):\n raise NotImplementedError", "def getData(self):\n return self.__data", "def fetch_data(data_url):\n return requests.get(data_url).content", "def get_data(self):\n return DataGatherer().get_rainfall_data()", "def getData(eUrl, eToken):\n if eUrl == '' or eToken == '':\n print(\"Enviroment is not setup\")\n exit(1)\n\n with urllib.request.urlopen(eUrl + eToken) as url:\n data = json.loads(url.read().decode())\n\n return(data)", "def get_data(self):\n return DataGatherer().get_wind_data()", "def get_data():\n try:\n with open('./data.json') as data_file:\n return json.load(data_file)\n except:\n data_json = json.loads('{\"message\": \"Error with file data.json\", \"success\": false}')\n return data_json", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def getData(self):\n return self._data", "def getData(self):\n return self._data", "def GetGeData(self, *args, **kwargs):\n pass", "def getStockData():\n pass", "def get_path_data(self, path):\n url = self.api_server + path\n return self.get_url_data(url)", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def data():\n return None", "def GetData(self):\r\n \r\n return self._data", "def getData():\n data = {\n \"name\": \"Kim\",\n \"message\": \"Hello there!\"\n }\n return jsonify(data) # respond to the API caller with a JSON representation of data. jsonify is important, as it sets response headers that indicate the respose is in JSON as well", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get(self):\n return self.data", "def get(self):\n return self.get_data()", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def get_data(self):\n self._send_command(self._adapter.get_data())", "def stations():\n print(\"server received request for stations data...\")\n return jsonify(stations_data)", "def get_data(self, label: str) -> Any:\r\n return self._get_resource(label, self._data, \"data\")", "def get_data(self):\n with open(self.filepath, 'r') as openFile:\n data = json.load(openFile)\n logging.info('Loaded JSON data file.')\n return data", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def get_data(self):\n return self.topo_data_flattened", "def fetch_data(self):", "def get_data():\n if not hasattr(g, 'data'):\n g.data = load_data()\n return g.data", "def GetData():\r\n return _hiew.HiewGate_GetData()", "def get(self):\n city = str(request.args.get('city')) ## /?city=stockholm\n source = urllib.request.urlopen('http://127.0.0.1:5050/?city=' + city).read()\n data = json.loads(source)\n print(data)\n tempinc = {\"name\" : (str(data['name'])),\n \"country\" : (str(data['country'])),\n \"temp\" : (str(data['temp']))+' c'}\n return tempinc", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "async def get_data(self, endpoint):\n try:\n with async_timeout.timeout(5, loop=self._loop):\n response = await self._session.get(f\"{endpoint}\")\n\n _LOGGER.debug(\"Response from Dingz device: %s\", response.status)\n self.data = await response.json()\n _LOGGER.debug(self.data)\n except (asyncio.TimeoutError, aiohttp.ClientError):\n _LOGGER.error(\"Can not load data from Dingz device\")\n self.data = None\n raise exceptions.DingzConnectionError()", "def _extract_data(self):\n if self.URL_type == \"youtube\" or self.URL_type == \"ytmusic\":\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self._get_soundcloud_data()", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def _fetch_data(self):\n pass", "def data(self):\n self._get_latest_content()\n return self._data.get('data', {})", "def get(self):\n data = request.args.get('data')\n\n if not data:\n data = \"OK!\"\n\n return json.loads(dumps(data)), 200", "def data(self):\n return self._data", "def data(self):\n file_name = join(PARENT_BASE_DIR, '.files', 'data.data.json')\n if isfile(file_name):\n debug(f'{file_name} file is exist.')\n debug(f'try for load {file_name} file ->->->->->->->->->->')\n start_load_file = time()\n with open(file_name, 'r', encoding='utf-8')as file:\n data = file.read()\n data = loads(data)\n debug(f'load file - [runtime: {time() - start_load_file}] <-<-<-<-<-<-<-<-<-<-')\n return data, 'data exist.'\n else:\n debug(f'{file_name} file is not exist.')\n return None, 'data not exist in \"base directory/.files/data.data.json\"'", "def get_data(self, lat=53.3498, lon=-6.2603):\n r = requests.get(\n f\"https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid=92fb08f48a98e0f39b990060352ffebe\")\n return r.text", "def get_data(self):\n return DataGatherer().get_temperature_data()", "def getData(self, product, variables, attributes, variable, *args):\r\n\r\n data = None\r\n return data", "def getAllData(self):\r\n return self.data", "def get_data(self, body):\n params = json.loads(body)\n logger.debug('New Data Format')\n return self._get_data(body)", "def stationdata():\n # * Return a JSON list of stations from the dataset.\n # as this should be a list, I'm just grabbing the station name\n session = Session(engine)\n results = session.query(Station.name).all()\n session.close()\n\n stations = list(np.ravel(results))\n return jsonify(stations)", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data", "def get_data(self):\n\n return self._data" ]
[ "0.7276632", "0.7044919", "0.7044919", "0.7044919", "0.6988719", "0.6904598", "0.67315245", "0.67315245", "0.6711421", "0.6699119", "0.66586465", "0.6641715", "0.65758157", "0.65712667", "0.6570018", "0.6539462", "0.6526975", "0.64456314", "0.64355475", "0.6282369", "0.6267849", "0.62539583", "0.62318444", "0.62056994", "0.6203443", "0.61973894", "0.61973894", "0.61973894", "0.61941123", "0.6193835", "0.6171375", "0.61706764", "0.61706764", "0.61699045", "0.613302", "0.6105646", "0.60901976", "0.60713613", "0.6064109", "0.60600907", "0.6059058", "0.60442257", "0.60441184", "0.60246354", "0.602411", "0.6003093", "0.59959054", "0.59833884", "0.5977262", "0.59436166", "0.5936724", "0.5918368", "0.5902448", "0.5889219", "0.5873394", "0.5867444", "0.58654165", "0.58654165", "0.5854547", "0.58541197", "0.584554", "0.584539", "0.5839185", "0.5818117", "0.578982", "0.57875663", "0.5783141", "0.5783141", "0.5783141", "0.5783141", "0.5777883", "0.57754195", "0.5767481", "0.5762121", "0.5746903", "0.5734854", "0.57335156", "0.5730804", "0.5727303", "0.5726277", "0.57260615", "0.57227606", "0.57018274", "0.56889516", "0.5685928", "0.56830907", "0.56817275", "0.56644094", "0.5659732", "0.56545985", "0.5647078", "0.56387955", "0.56344116", "0.5630541", "0.5630077", "0.56223285", "0.5616545", "0.561541", "0.5613653", "0.5613653", "0.5613653" ]
0.0
-1
make the cosmos and DES meds files
def make_all_cosmos_des(run, cosmos_config, des_config, catfile, tileid): flist = files.get_cosmos_flist(tileid) cosmos_meds = files.get_meds_file(run, tileid, 'cosmos','i') print('making cosmos MEDS:',cosmos_meds) maker = CosmosMEDSMaker( config_path=cosmos_config, catname=catfile, flistname=flist, ) maker.write(cosmos_meds) for band in ['u','g','r','i','z']: band_flist = files.get_des_flist(band) band_meds = files.get_meds_file(run, tileid, 'des',band) print('making DES MEDS:',band_meds) maker = CosmosMEDSMaker( config_path=des_config, catname=cosmos_meds, flistname=band_flist, ) maker.write(band_meds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_data_raw (mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname,',',mdp.tag\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n # write first header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n # write another header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def make_phys():\n for rn in dcm_dict.keys():\n # PPG\n if not dcm_dict[rn]['ppg_file'] == 'File missing':\n # Files\n ppg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ppg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['ppg_file'],ppg_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 100.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(ppg_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # Respiration\n if not dcm_dict[rn]['resp_file'] == 'File missing':\n # Files\n resp_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.tsv.gz')\n resp_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 25.0\n data['StartTime'] = -30.0\n data['Columns'] = 'respiratory'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # ECG\n # What to do if they have PPG and ECG?\n if not dcm_dict[rn]['ecg_file'] == 'File missing':\n # Files\n ecg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ecg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 1000.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)", "def create_demo_dcm_data(dcm_dir):\n pet_fname = os.path.join(os.path.dirname(__file__), 'data', 'brainweb_06_osem.nii')\n mr_fname = os.path.join(os.path.dirname(__file__), 'data', 'brainweb_06_t1.nii')\n \n pet, pet_affine = flip_ras_lps(*load_nii_in_ras(pet_fname))\n mr, mr_affine = flip_ras_lps(*load_nii_in_ras(mr_fname))\n\n os.mkdir(dcm_dir)\n write_3d_static_dicom(pet, os.path.join(dcm_dir, 'PT'), pet_affine, modality = 'PT')\n write_3d_static_dicom(mr, os.path.join(dcm_dir, 'MR'), mr_affine, modality = 'MR')", "def writeNMD(filename, modes, atoms, zeros=False):\n\n if not isinstance(modes, (NMA, ModeSet, Mode, Vector)):\n raise TypeError('modes must be NMA, ModeSet, Mode, or Vector, '\n 'not {0}'.format(type(modes)))\n if modes.numAtoms() != atoms.numAtoms():\n raise Exception('number of atoms do not match')\n out = openFile(addext(filename, '.nmd'), 'w')\n\n #out.write('#!{0} -e\\n'.format(VMDPATH))\n out.write('nmwiz_load {0}\\n'.format(abspath(filename)))\n name = modes.getTitle()\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = str(atoms)\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = splitext(split(filename)[1])[0]\n out.write('name {0}\\n'.format(name))\n try:\n coords = atoms.getCoords()\n except:\n raise ValueError('coordinates could not be retrieved from atoms')\n if coords is None:\n raise ValueError('atom coordinates are not set')\n\n try:\n data = atoms.getNames()\n if data is not None:\n out.write('atomnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnames()\n if data is not None:\n out.write('resnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnums()\n if data is not None:\n out.write('resids ')\n data.tofile(out, ' ')\n out.write('\\n')\n except:\n pass\n try:\n data = atoms.getChids()\n if data is not None:\n out.write('chainids {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getSegnames()\n if data is not None:\n out.write('segnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n\n try:\n data = atoms.getBetas()\n if data is not None:\n out.write('bfactors ')\n data.tofile(out, ' ', '%.2f')\n out.write('\\n')\n except:\n pass\n\n format = '{0:.3f}'.format\n out.write('coordinates ')\n coords.tofile(out, ' ', '%.3f')\n out.write('\\n')\n count = 0\n if isinstance(modes, Vector):\n out.write('mode 1 {0:.2f} '.format(abs(modes)))\n modes.getNormed()._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n else:\n if isinstance(modes, Mode):\n modes = [modes]\n for mode in modes:\n if (mode.getEigval() < ZERO) and not zeros:\n continue\n elif (mode.getEigval() < ZERO) and zeros:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, np.sqrt(1/(0.0001*(mode.getIndex()+1)))))\n else:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, mode.getVariance()**0.5))\n arr = mode._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n if count == 0:\n LOGGER.warning('No normal mode data was written. '\n 'Given modes might have 0 eigenvalues.')\n out.close()\n return filename", "def make_data_raw_fast(mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def writeDataCards(opt,sigExp,bkgExp,shapesURL):\n\n #create a card per category\n dcList=[]\n for icat in range(len(opt.categs)):\n cat='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n dcTxt='%s/shapes-parametric.datacard_%s.dat'%(opt.output,cat)\n dcList.append(dcTxt)\n with open(dcTxt,'w') as dc:\n dc.write('#\\n')\n dc.write('# datacard was automatically generated with generateWorkspace.py\\n')\n dc.write('# the options passed are printed below\\n')\n dc.write('# %s\\n'%opt)\n dc.write('#\\n')\n dc.write('imax *\\n')\n dc.write('jmax *\\n')\n dc.write('kmax *\\n')\n dc.write('-'*50+'\\n')\n dc.write('shapes * * {0} $PROCESS_{1} $PROCESS_$SYSTEMATIC\\n'.format(shapesURL,cat))\n dc.write('shapes data_obs * {0} $PROCESS_{1}\\n'.format(shapesURL,cat))\n dc.write('-'*50+'\\n')\n dc.write('bin %s\\n'%cat)\n dc.write('observation -1\\n')\n dc.write('-'*50+'\\n')\n dc.write('%15s %15s %15s\\n'%('bin',cat,cat))\n dc.write('%15s %15s %15s\\n'%('process','sig','bkg'))\n dc.write('%15s %15s %15s\\n'%('process','0', '1'))\n dc.write('%15s %15s %15s\\n'%('rate','%3.2f'%sigExp[icat], '%3.2f'%bkgExp[icat]))\n dc.write('-'*50+'\\n')\n \n #float the background normalization as well as the signal\n dc.write('mu_bkg{0} rateParam {0} bkg 1\\n'.format(cat))\n\n #uncertainties\n dc.write('lumi %8s %15s %15s\\n'%('lnN','1.027','-'))\n dc.write('%s_sigShape %8s %15s %15s\\n'%(cat,'shape','1','-'))\n dc.write('%s_bkgShape %8s %15s %15s\\n'%(cat,'shape','-','1'))\n dc.write('{0} autoMCStats 0.0 1\\n'.format(cat))\n \n print '\\tshapes available @',shapesURL\n print '\\tgenerated the following datacards',dcList", "def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1", "def Construct3DMolToFile(fileName,writeFile):\r\n # Writing sets of molecules\r\n \r\n\r\n w = Chem.SDWriter(writeFile)\r\n suppl = Chem.SDMolSupplier(fileName)\r\n mols = [x for x in suppl]\r\n for mol in mols:\r\n \t# print(mol.GetProp(\"Solvent\"))\r\n \t# print(mol.GetPropNames)\r\n \tsignal.signal(signal.SIGALRM, handler)\r\n \tsignal.alarm(100)\r\n \ttry:\r\n \t\tmol3d = GetMolFromMol(mol,dimension=3)\r\n \t\tw.write(mol3d)\r\n \texcept Exception:\r\n \t\tmol3d = mol\r\n \t\tw.write(mol3d)\r\n \t\t# print(mol.GetPropsAsDict())\r\n\r\n\r\n w.close()", "def generate_epics_db(self):\n if (self.verbose):\n # Generate digital application related databases and configuration files\n print(\"==================================================\")\n print(\"== Generating EPICS DB and configuration files: ==\")\n print(\"==================================================\")\n \n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n for app in self.digital_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n app_prefix = 'MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"])\n if (self.verbose):\n print(\"Application path : {}\".format(app_path))\n print(\"Application prefix : {}\".format(app_prefix))\n \n self.__write_dig_app_id_confg(path=app_path, macros={\"ID\":str(app[\"app_id\"])})\n\n # Add the IOC name environmental variable for the Link Nodes\n self.__write_header_env(path=app_path, macros={\"MPS_LINK_NODE\":app[\"link_node_name\"],\n \"MPS_DB_VERSION\":self.config_version,\n \"DATE\":datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')})\n self.__write_iocinfo_env(path=app_path, macros={\"AREA\":app[\"link_node_area\"].upper(),\n \"LOCATION\":app[\"link_node_location\"].upper(),\n \"LOC_IDX\":app['link_node_location'].upper().replace('MP', ''),\n \"C_IDX\":unicode(app['card_index'])})\n if self.link_nodes[app[\"link_node_name\"]]['type'] == 'Digital':\n self.__write_prefix_env(path=app_path, macros={\"P\":app_prefix})\n self.__write_mps_db(path=app_path, macros={\"P\":app_prefix, \"THR_LOADED\":\"1\"})\n self.__write_app_id_config(path=app_path, macros={\"ID\":\"0\"}) # If there are no analog cards, set ID to invalid\n\n has_virtual = False\n for device in app[\"devices\"]:\n device_prefix = \"{}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"])\n\n if (self.verbose):\n print(\" Device prefix : {}\".format(device_prefix))\n\n for input in device[\"inputs\"]:\n\n if app[\"virtual\"]:\n has_virtual = True\n if (input[\"bit_position\"]>=32):\n scan = \".2 second\"\n if (input['name'] == 'WDOG'):\n if (\"MPSHEARTBEAT\" in input[\"input_pv\"]):\n scan = \".1 second\"\n channel = input[\"bit_position\"] - 32\n vmacros = { \"P\":input[\"input_pv\"]+'_THR',\n \"R\":input[\"name\"],\n \"N\":self.mps_name.getDeviceInputNameFromId(input[\"db_id\"]),\n \"INPV\":input[\"input_pv\"],\n \"ALSTATE\":str(input[\"alarm_state\"]),\n \"NALSTATE\":str(to_bool(not input[\"alarm_state\"])),\n \"ZSV\":input[\"zero_severity\"],\n \"OSV\":input[\"one_severity\"],\n \"BIT\":\"{:02d}\".format(channel).format,\n \"ZNAM\":input[\"zero_name\"],\n \"ONAM\":input[\"one_name\"], \n \"GID\":str(app[\"app_id\"]),\n \"SCAN\":scan}\n if (input['name'] == 'WDOG'):\n self.__write_virtual_wdog_db(path=app_path, macros=vmacros)\n else:\n self.__write_virtual_db(path=app_path, macros=vmacros)\n\n\n macros = { \"P\":device_prefix,\n \"R\":input[\"name\"],\n \"BIT\":input[\"bit_position\"],\n \"ZNAM\":input[\"zero_name\"],\n \"ONAM\":input[\"one_name\"] }\n\n if (self.verbose):\n print(\" Digital Input : {}\".format(input[\"name\"]))\n\n if (self.verbose):\n print(\"----------------------------\")\n\n print(\"==================================================\")\n print(\"\")\n\n # Generates analog application related databases and configuration files\n if (self.verbose):\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n for app in self.analog_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n app_prefix = 'MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"])\n if (self.verbose):\n print(\"Application path : {}\".format(app_path))\n print(\"Application prefix : {}\".format(app_prefix))\n\n self.__write_mps_db(path=app_path, macros={\"P\":app_prefix, \"THR_LOADED\":\"0\"})\n self.__write_app_id_config(path=app_path, macros={\"ID\":str(app[\"app_id\"])})\n self.__write_thresholds_off_config(path=app_path)\n\n # Add the IOC name environmental variable for the Link Nodes\n if app[\"analog_link_node\"]:\n self.__write_header_env(path=app_path, macros={\"MPS_LINK_NODE\":app[\"link_node_name\"],\n \"MPS_DB_VERSION\":self.config_version,\n \"DATE\":datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')})\n\n self.__write_iocinfo_env(path=app_path, macros={\"AREA\":app[\"link_node_area\"].upper(),\n \"LOCATION\":app[\"link_node_location\"].upper(),\n \"LOC_IDX\":app['link_node_location'].upper().replace('MP', ''),\n \"C_IDX\":unicode(app['card_index'])})\n self.__write_prefix_env(path=app_path, macros={\"P\":app_prefix})\n\n spare_channels = range(0,6)\n for device in app[\"devices\"]:\n device_prefix = \"{}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"])\n\n if (self.verbose):\n print(\" Device prefix : {}\".format(device_prefix))\n\n if (device[\"type_name\"] not in self.non_link_node_types):\n macros = { \"P\": app_prefix,\n \"CH\":str(device[\"channel_index\"]),\n \"CH_NAME\":device[\"device_name\"],\n \"CH_PVNAME\":device_prefix,\n \"CH_SPARE\":\"0\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n processing = 0\n ch = device['channel_index']\n if (device[\"type_name\"] == \"CBLM\"):\n processing = 1\n if (device[\"type_name\"] == \"KICK\"):\n processing = 1\n int0 = device['channel_index']*4\n int1 = device['channel_index']*4 + 1\n macros = { \"CH\":format(device['channel_index']),\n \"PROC\":format(processing),\n \"INT0\":format(int0),\n \"INT1\":format(int1)\n }\n self.__write_ana_config(path=app_path, macros=macros)\n spare_channels[device[\"channel_index\"]] = -1\n for fault in device[\"faults\"].values():\n bsa_slot = fault['integrators'][0]*6 + device[\"channel_index\"]\n macros = { \"P\":app_prefix,\n \"R\":'ANA_BSA_DATA_{}'.format(bsa_slot),\n \"P_DEV\":device_prefix,\n \"R_DEV\":self.get_analog_type_name(device[\"type_name\"]),\n \"FAULT\":fault['name'],\n \"EGU\":self.get_app_units(device[\"type_name\"],fault[\"name\"])\n }\n self.__write_analog_db(path=app_path, macros=macros)\n macros = { \"P\":device_prefix,\n \"BAY\":format(device[\"bay_number\"]),\n \"APP\":self.get_app_type_name(device[\"type_name\"]),\n \"FAULT\":fault[\"name\"],\n \"FAULT_INDEX\":self.get_fault_index(device[\"type_name\"], fault[\"name\"], device[\"channel_number\"]),\n \"DESC\":fault[\"description\"],\n \"EGU\":self.get_app_units(device[\"type_name\"],fault[\"name\"]),\n \"SLOPE\":unicode(self.get_slope(device[\"type_name\"])),\n \"OFFSET\":unicode(self.get_offset(device[\"type_name\"]))}\n self.__write_thr_base_db(path=app_path, macros=macros)\n # Generate PV for all possible thresholds, even if not defined in database\n for bit in range(0,8):#fault[\"bit_positions\"]:\n fault_prefix = \"{}_T{}\".format(fault[\"name\"], bit)\n macros[\"BIT_POSITION\"] = str(bit)\n self.__write_thr_db(path=app_path, macros=macros)\n if (self.verbose):\n print(\" Fault prefix : {}\".format(fault_prefix))\n\n\n for ch in spare_channels:\n if ch > -1:\n macros = { \"P\": app_prefix,\n \"CH\":str(ch),\n \"CH_NAME\":\"Spare\",\n \"CH_PVNAME\":\"None\",\n \"CH_SPARE\":\"1\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n\n #\n # Write db information about slots of each link node\n #\n for app in self.analog_apps + self.digital_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n link_node_info=self.link_nodes[app[\"link_node_name\"]]\n #print link_node_info\n if not 'exported' in link_node_info:\n for slot in range(2,8):\n if slot in link_node_info['slots']:\n macros = { \"P\": app[\"app_prefix\"],\n \"SLOT\": str(slot),\n \"SLOT_NAME\": link_node_info['slots'][slot]['type'],\n \"SLOT_PVNAME\": link_node_info['slots'][slot]['pv_base'],\n \"SLOT_SPARE\": \"0\"}\n else:\n macros = { \"P\": app[\"app_prefix\"],\n \"SLOT\": str(slot),\n \"SLOT_NAME\": \"Spare\",\n \"SLOT_PVNAME\": \"Spare\",\n \"SLOT_SPARE\": \"1\"}\n\n self.__write_link_node_slot_info_db(path=app_path, macros=macros)\n\n # Add CH_* PVs for digital-only link nodes. These are added before \n # only if the LN is Mixed or Analog\n if link_node_info['type'] == 'Digital':\n for ch in spare_channels:\n macros = { \"P\": app[\"app_prefix\"],\n \"CH\":str(ch),\n \"CH_NAME\":\"Not Available\",\n \"CH_PVNAME\":\"None\",\n \"CH_SPARE\":\"1\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n\n link_node_info['exported']=True\n\n #\n # Add Link Node related information\n #\n #for ln_name,ln in self.link_nodes.items():\n # if \"lc1_node_id\" not in ln:\n # continue\n # if \"dig_app_id\" not in ln:\n # continue\n # print ln[\"lc1_node_id\"] + ' ' + ln[\"type\"] + ' ' + ln[\"dig_app_id\"]\n for ln_name,ln in self.link_nodes.items():\n self.__write_lc1_info_config(ln)\n self.__write_link_node_info_db(ln_name, ln)\n\n if (self.verbose):\n print(\"--------------------------\")", "def export_model_description(md: ModelDescription) -> bytes:\n\n # ---------------- write model description -------------------\n\n fmd = ET.Element(\"fmiModelDescription\")\n fmd.set(\"fmiVersion\", \"2.0\")\n fmd.set(\"modelName\", md.modelName)\n fmd.set(\"guid\", md.guid)\n fmd.set(\"author\", md.author)\n fmd.set(\"generationDateAndTime\", md.generationDateAndTime)\n fmd.set(\"variableNamingConvention\", md.variableNamingConvention)\n fmd.set(\"generationTool\", md.generationTool)\n fmd.set(\"description\", md.description)\n\n # CoSimulation\n cs = ET.SubElement(fmd, \"CoSimulation\")\n cs.set(\"modelIdentifier\", md.CoSimulation.modelIdentifier)\n cs.set(\n \"needsExecutionTool\", str(md.CoSimulation.needsExecutionTool).lower(),\n )\n cs.set(\n \"canHandleVariableCommunicationStepSize\",\n str(md.CoSimulation.canHandleVariableCommunicationStepSize).lower(),\n )\n cs.set(\n \"canInterpolateInputs\", str(md.CoSimulation.canInterpolateInputs).lower(),\n )\n\n cs.set(\n \"maxOutputDerivativeOrder\", str(md.CoSimulation.maxOutputDerivativeOrder),\n )\n cs.set(\n \"canRunAsynchronuously\", str(md.CoSimulation.canRunAsynchronuously).lower(),\n )\n cs.set(\n \"canBeInstantiatedOnlyOncePerProcess\",\n str(md.CoSimulation.canBeInstantiatedOnlyOncePerProcess).lower(),\n )\n cs.set(\n \"canNotUseMemoryManagementFunctions\",\n str(md.CoSimulation.canNotUseMemoryManagementFunctions).lower(),\n )\n cs.set(\n \"canGetAndSetFMUstate\", str(md.CoSimulation.canGetAndSetFMUstate).lower(),\n )\n cs.set(\n \"canSerializeFMUstate\", str(md.CoSimulation.canSerializeFMUstate).lower(),\n )\n cs.set(\n \"providesDirectionalDerivative\",\n str(md.CoSimulation.providesDirectionalDerivative).lower(),\n )\n\n # 2.2.4 p.42) Log categories:\n cs = ET.SubElement(fmd, \"LogCategories\")\n for ac in md.logCategories:\n c = ET.SubElement(cs, \"Category\")\n c.set(\"name\", ac)\n\n # 2.2.7 p.47) ModelVariables\n mvs = ET.SubElement(fmd, \"ModelVariables\")\n\n variable_index = 0\n\n for var in md.modelVariables:\n var.variability\n value_reference = str(var.value_reference)\n\n idx_comment = ET.Comment(f'Index of variable = \"{variable_index + 1}\"')\n mvs.append(idx_comment)\n sv = ET.SubElement(mvs, \"ScalarVariable\")\n sv.set(\"name\", var.name)\n sv.set(\"valueReference\", value_reference)\n sv.set(\"variability\", var.variability)\n sv.set(\"causality\", var.causality)\n\n if var.description:\n sv.set(\"description\", var.description)\n\n if var.initial:\n i = var.initial\n sv.set(\"initial\", i)\n\n val = ET.SubElement(sv, var.dataType)\n\n # 2.2.7. p.48) start values\n if var.initial in {\"exact\", \"approx\"} or var.causality == \"input\":\n assert (\n var.start != None\n ), \"a start value must be defined for intial ∈ {exact, approx}\"\n val.set(\"start\", var.start)\n\n variable_index += 1\n\n ms = ET.SubElement(fmd, \"ModelStructure\")\n\n # 2.2.8) For each output we must declare 'Outputs' and 'InitialUnknowns'\n outputs = [\n (idx + 1, o)\n for idx, o in enumerate(md.modelVariables)\n if o.causality == \"output\"\n ]\n\n if outputs:\n os = ET.SubElement(ms, \"Outputs\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n os = ET.SubElement(ms, \"InitialUnknowns\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n # FMI requires encoding to be encoded as UTF-8 and contain a header:\n #\n # See 2.2 p.28\n return ET.tostring(fmd, pretty_print=True, encoding=\"utf-8\", xml_declaration=True)", "def make_database(num_files=10):\n for i in range(num_files):\n print('\\n\\n\\nCreating set', str(i), '\\n\\n\\n')\n s_file = 'set' + str(i) + '.hdf5' \n play_dominoes(save_file=s_file)", "def write(self, filename):\n assert filename[-3:]=='.fz','name must end in .fz'\n\n files.makedir_fromfile(filename)\n\n ucfilename=filename[0:-3]\n bname = os.path.basename(ucfilename)\n\n tmp_path = os.path.join(\n files.get_temp_dir(),\n bname,\n )\n files.makedir_fromfile(tmp_path)\n\n with TempFile(tmp_path) as tfile:\n super(CosmosMEDSMaker,self).write(tfile.path)\n self._compress_meds_file(tfile.path, filename)", "def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def generate_file(material_id):\n apr=get_doc_from_MP(material_id)\n mat_list=generate_matrix(apr)\n formu=POSCAR_title(apr)\n cell_for=generate_cell_formula(apr)\n needed_dos=generate_dos_str(material_id)\n revise_dos=dos_into_string(needed_dos)\n ordered_list=generate_ordered_list(revise_dos)\n my_ordered_elements=generate_ordered_elements(revise_dos,ordered_list)\n my_ordered_numbers=generate_ordered_numbers(revise_dos,ordered_list,cell_for)\n generate_POSCAR(formu,mat_list,my_ordered_elements,my_ordered_numbers,revise_dos)", "def createDataDescriptionTxtFile(pMassFile=[], pMassDcmpFile=[], pMassDcmpSpkesFile=[], pMassBrianFile=[]):\n\n if isinstance(pMassFile,str):\n if os.path.isfile(pMassFile):\n # Load in the data to see what the hell it is\n inputDataFile = open(pMassFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [dataOut, xmin, xmax, vmin, vmax, amin, amax, dt, tmax]\n numbOfTrials = len(dataOut[0]) # This is the number of individual trails stored\n xmin = dataOut[1]\n xmax = dataOut[2]\n vmin = dataOut[3]\n vmax = dataOut[4]\n amin = dataOut[5]\n amax = dataOut[6]\n dt = dataOut[7]\n tmax = dataOut[8]\n descpFName = pMassFile[:-4]+'DESC.txt'\n file = open(descpFName,'w')\n file.write(\"1-D Moving Mass Data Description\\n\")\n file.write(\"\\nData here is generated from the genAndSaveMoving1DMassData() method\\n\")\n file.write(\"\\nFileName... \"+pMassFile)\n file.write(\"\\niterations. \"+str(numbOfTrials))\n file.write(\"\\nxmin....... \"+str(xmin))\n file.write(\"\\nxmax....... \"+str(xmax))\n file.write(\"\\nvmin........\"+str(vmin))\n file.write(\"\\nvmax........\"+str(vmax))\n file.write(\"\\namin........\"+str(amin))\n file.write(\"\\namax........\"+str(amax))\n file.write(\"\\ndt..........\"+str(dt))\n file.write(\"\\ntmax........\"+str(tmax))\n file.close\n if isinstance(pMassDcmpFile, str):\n if os.path.isfile(pMassDcmpFile):\n # Load in teh data to see what it has it there\n inputDataFile = open(pMassDcmpFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [segmentedTrials, gCenters, b, dataFile, xStart, xStop, nGaus, bInitial, dt]\n numbOfTrials = len(dataOut[0])\n originatingDataFile = dataOut[3]\n numbGaus = dataOut[6]\n xStart = dataOut[4]\n xStop = dataOut[5]\n GausVarb = dataOut[2]\n dt = dataOut[8]\n descpFName = pMassDcmpFile[:-4]+\"DESC.txt\"\n file = open(descpFName,'w')\n file.write(\"1-D Moving Mass Decomposed Data Description\\n\")\n file.write(\"\\nData here is generated from the decomposeMoving1DMassData() method\\n\")\n file.write(\"\\nFileName.......\"+pMassDcmpFile)\n file.write(\"\\nInputDataFile..\"+originatingDataFile)\n file.write(\"\\niterations.....\"+str(numbOfTrials))\n file.write(\"\\nNumbOfGauss....\"+str(numbGaus))\n file.write(\"\\nxStart.........\"+str(xStart))\n file.write(\"\\nxStop..........\"+str(xStop))\n file.write(\"\\nGausVarbl_b....\"+str(GausVarb))\n file.write(\"\\ndt.............\"+str(dt))\n file.close()\n if isinstance(pMassDcmpSpkesFile, str):\n if os.path.isfile(pMassDcmpSpkesFile):\n # Load in teh data to see what it has it there\n inputDataFile = open(pMassDcmpSpkesFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [segmentedSpikesList, dataFile, spikeGenType, analgSingnalScaling]\n numbOfTrials = len(dataOut[0])\n originatingDataFile = dataOut[1]\n spikeGenType = dataOut[2]\n analogScl = dataOut[3]\n descpFName = pMassDcmpSpkesFile[:-4]+\"DESC.txt\"\n file = open(descpFName,'w')\n file.write(\"1-D Moving mass Decomposed and Turned to Spikes Data Description\\n\")\n file.write(\"\\nData here is generated from the decompedToSpikes1DMassData\\n\")\n file.write(\"\\nFileName.........\"+pMassDcmpSpkesFile)\n file.write(\"\\nInputDataFile....\"+originatingDataFile)\n file.write(\"\\nIterations.......\"+str(numbOfTrials))\n file.write(\"\\nSpike-Gen-Type...\"+spikeGenType)\n file.write(\"\\nSignal-Scaling...\"+str(analogScl))\n file.close()\n if isinstance(pMassBrianFile, str):\n if os.path.isfile(pMassBrianFile):\n # Load in the data to see what it has in there\n inputDataFile = open(pMassBrianFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [spikesInBrianForm, dataFile, origDataFile]\n descpFName = pMassBrianFile[:-4]+\"DESC.txt\"\n file = open(descpFName, 'w')\n file.write(\"1-D Moving mass data decomposed, turned into spikes, and then turned into a brain sim. compatible format\\n\")\n file.write(\"\\n Data here is generated from the convertSpikeseToBrainForm()\\n\")\n file.write(\"\\nFilename.............................\"+pMassBrianFile)\n file.write(\"\\nOriginating Decompled Data File .... \"+dataOut[1])\n file.close()", "def handle_store(self, event):\n\n \n mode_prefixes = {'CT Image Storage' : 'CT',\n 'Enhanced CT Image Storage' : 'CTE',\n 'MR Image Storage' : 'MR',\n 'Enhanced MR Image Storage' : 'MRE',\n 'Positron Emission Tomography Image Storage' : 'PT',\n 'RT Plan Storage' : 'RP',\n 'RT Structure Set Storage' : 'RS',\n 'Computed Radiography Image Storage' : 'CR',\n 'Ultrasound Image Storage' : 'US',\n 'Enhanced Ultrasound Image Storage' : 'USE',\n 'X-Ray Angiographic Image Storage' : 'XA',\n 'Enhanced XA Image Storage' : 'XAE',\n 'Nuclear Medicine Image Storage' : 'NM',\n 'Secondary Capture Image Storage' : 'SC'\n }\n\n ds = event.dataset\n # Because pydicom uses deferred reads for its decoding, decoding errors\n # are hidden until encountered by accessing a faulty element\n try:\n sop_class = ds.SOPClassUID\n sop_instance = ds.SOPInstanceUID\n except Exception as exc:\n # Unable to decode dataset\n return 0xC210\n\n try:\n # Get the elements we need\n mode_prefix = mode_prefixes[sop_class.name]\n except KeyError:\n mode_prefix = 'UN'\n\n filename = os.path.join(self.config['output']['directory'],'tmp/{0!s}.dcm'.format(uuid.uuid4()))\n\n # Presentation context\n cx = event.context\n\n meta = Dataset()\n meta.MediaStorageSOPClassUID = sop_class\n meta.MediaStorageSOPInstanceUID = sop_instance\n \n meta.TransferSyntaxUID = cx.transfer_syntax\n \n\n ds.file_meta = meta\n ds.is_little_endian = cx.transfer_syntax.is_little_endian\n ds.is_implicit_VR = cx.transfer_syntax.is_implicit_VR\n\n status_ds = Dataset()\n \n try:\n ds.save_as(filename, write_like_original=False)\n self.file_count += 1\n self.writing_queue.put((filename, ds))\n status_ds.Status = 0x0000 # Success\n except IOError:\n # Failed - Out of Resources - IOError\n status_ds.Status = 0xA700\n except:\n # Failed - Out of Resources - Miscellaneous error\n status_ds.Status = 0xA701\n\n\n return status_ds", "def create_dnz_file(args):\n\n file = open(args.o, 'w')\n\n file.write(\"% ----DATA VARIABLES----\\n\\n\")\n file.write(\"t=\" + str(args.t) + \";\" + \"%number of attributes\\n\")\n file.write(\"k=\" + str(args.k) + \";\" + \"%max length of the support set\\n\")\n file.write(\"n=\" + str(args.n) + \";\" + \"%number of positive instances\\n\")\n file.write(\"m=\" + str(args.m) + \";\" + \"%number of negative instances\\n\")\n file.write(\"c=\" + str(args.c) + \";\" + \"%number of atMostOne Constraints\\n\\n\")\n\n file.write(\"% ----OMEGAS----\\n\\n\")\n\n omega_p = generate_omega_data(args.t, args.n, args.b)\n file.write(\"omegap= \" + omega_to_mz(omega_p) + \"\\n\\n\")\n\n omega_n = generate_disjoint_omega_data(omega_p, args.m, args.b)\n file.write(\"omegan= \" + omega_to_mz(omega_n) + \"\\n\\n\")\n\n file.write(\"% ----CONSTRAINS----\\n\\n\")\n at_most_one = generate_at_most_one(int(args.t/2), args.c, 1, args.t)\n file.write(\"atMostOne=\" + at_most_one_to_mz(at_most_one))", "def build(self):\n self.kwargs.pop('clobber', None)\n\n # Read in mock catalog with assigned photometric redshifts\n # and calculate the line-of-sight displacement between the \n # upweighted galaxy and the photometric redshift of the \n # collided galaxy \n photoz_cat_corr = {\n 'catalog': self.cat_corr['catalog'].copy(), \n 'correction': {'name': 'photoz'}\n }\n dataclass = Data('data', photoz_cat_corr) \n dataclass.read() \n\n cosmo = dataclass.cosmo()\n\n coll = np.where(dataclass.wfc == 0) \n \n dlos_actual = (cosmos.distance.comoving_distance(dataclass.z[coll], **cosmo) - \\\n cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']\n dlos_photoz = (cosmos.distance.comoving_distance(dataclass.photoz[coll], **cosmo) - \\\n cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']\n\n # each value of d_NN corresponds to a dLOS value \n # in dLOS file \n print self.file_name\n np.savetxt(self.file_name, \n np.c_[dlos_actual, dlos_photoz], \n fmt=['%10.5f', '%10.5f'],\n header='Columns : dLOS, dLOS_photoz'\n ) \n\n return None", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def dict2file(dict, filename, foldername):\n if foldername:\n if not os.path.exists(\"../Created_QD/\" + foldername):\n os.makedirs(\"../Created_QD/\" + foldername)\n file = open(\"../Created_QD/\" + foldername + \"/\" + filename + \".xyz\", \"w\")\n else:\n file = open(\"../Created_QD/\" + filename + \".xyz\", \"w\")\n file.write(\" \\n\\n\")\n for atom, values in dict.items():\n file.write(values['element'] + \"\\t\" + str(values['coor'][0]) + \"\\t\\t\" +\n str(values['coor'][1]) + \"\\t\\t\" + str(values['coor'][2]) + \"\\n\")\n file.seek(0)\n file.write(str(len(dict)))\n file.close()\n print(\"\\nQuantum Dot created :)\")", "def createckfk(self, observer, dbname, t0, field1, nfields, mk): \n\n observerint=self.mpc2internal(observer)\n instrumentint=observerint*1000\n\n with open(\"cksetupfile\", \"w\") as f:\n f.write(\"KPL/IK \\nComments describing the keywords and values \\nto follow, as well as any other pertinent \\ninformation.\\n\\\\begindata\\n\")\n f.write(\"LSK_FILE_NAME = '%s'\\n\" %(mk))\n f.write(\"\\n\")\n f.write(\"INTERNAL_FILE_NAME = 'Survey Sim Camera Orientation'\\n\")\n f.write(\"\\n\")\n f.write(\"MAKE_FAKE_SCLK = 'tmpsclk'\\n\")\n f.write(\"CK_TYPE = 3\\n\")\n f.write(\"CK_SEGMENT_ID = 'Instrument Orientation'\\n\")\n f.write(\"INSTRUMENT_ID = %i \\n\" %(instrumentint))\n f.write(\"REFERENCE_FRAME_NAME = 'J2000'\\n\")\n f.write(\"ANGULAR_RATE_PRESENT = 'NO'\\n\")\n f.write(\"\\n\")\n f.write(\"INPUT_DATA_TYPE = 'SPICE QUATERNIONS'\\n\")\n f.write(\"INPUT_TIME_TYPE = 'UTC'\\n\")\n f.write(\"MAXIMUM_VALID_INTERVAL = 60\\n\") \n f.write(\"\\n\")\n f.write(\"PRODUCER_ID = 'Survey Sim, JPL'\\n\")\n f.write(\"\\\\begintext\")\n f.close()\n\n\n self.readfields(dbname,field1,nfields, t0)\n with open(\"ckip\",\"w\") as f:\n\n for i in range(len(self.fieldRA)):\n quat=self.computerotmat(self.fieldRA[i], self.fieldDec[i], self.rotSkyPos[i])\n\n #This helps with duplicate entries. For example enigma_1189 can have same fieldID's under different propID's\n #Issue warning for duplicate time. Have a verbose mode for displaying that (true as default)\n if (self.fieldMJD[i] !=self.fieldMJD[i-1]):\n JD=self.fieldMJD[i]+shared.mjd2jd\n timestring= 'JD'+repr(JD)\n f.write(\"%s %f %f %f %f\\n\" %(timestring,quat[0],quat[1],quat[2],quat[3]))\n f.close()\n try:\n os.system('rm tmp.ck tmpsclk test.ck fakesclk >/dev/null')\n except:\n pass\n os.system('msopck cksetupfile ckip tmp.ck > /dev/null')\n\n os.system('rsync tmpsclk fakesclk > /dev/null')\n os.system('rsync tmp.ck test.ck > /dev/null')\n\n with open(\"tmp.fk\",\"w\") as f:\n f.write(\"\\\\begindata\\n\\n\")\n f.write(\"FRAME_CAMERA_FRAME = %i\\n\" %(instrumentint))\n f.write(\"FRAME_%i_NAME = 'CAMERA_FRAME'\\n\" %(instrumentint))\n f.write(\"FRAME_%i_CLASS = 3\\n\" %(instrumentint))\n f.write(\"FRAME_%i_CLASS_ID = %i\\n\" %(instrumentint, instrumentint))\n f.write(\"FRAME_%i_CENTER = %i\\n\" %(instrumentint, observerint))\n f.write(\"CK_%i_SCLK = %i\\n\" %(instrumentint, observerint))\n f.write(\"CK_%i_SPK = %i\\n\\n\" %(instrumentint, observerint))\n f.write(\"\\\\begintext\\n\")\n f.close()\n \n os.system('rsync tmp.fk test.fk')", "def create_sdxmetadata(sdx_dir, output_dir):\n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #segment and store metadata \n #define SDX files to be read\n for root, dirs, files in os.walk(sdx_dir):\n for idx, file in enumerate(files):\n if file.endswith(\".sdx\"):\n \n print(\"Reading File: \" + file)\n \n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #scan for pick info\n with open(root + file,\"r\") as f:\n searchlines = f.readlines()\n for i, line in enumerate(searchlines):\n #strip whitespace/end-of-line characters for exact text matching\n line = line.rstrip()\n #find pick info\n if \"pick\" == line:\n for l in searchlines[i:i+16]: \n #print(l)\n #assign pick info/instrument info to variables and store\n instrument_info = searchlines[i+1]\n pick_info = searchlines[i+2]\n phase_info = searchlines[i+9:i+13]\n instrument.append(instrument_info)\n picks.append(pick_info)\n phases.append(phase_info)\n \n #create a .txt file for each seperate event to store pick info\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n f = open(output_dir + os.path.splitext(file)[0] + \".txt\",'w')\n #header information...\n f.write('Data read from correpsonding SDX file:' + '\\n')\n f.write(file + '\\n\\n')\n f.write('Instrument/component' + '\\t\\t\\t' + 'Pick information' '\\t\\t\\t' + 'Phase information\\n')\n \n # print both instrument and pick information to the \n # associated event file\n for item in zip(instrument, picks, phases):\n \n #remove preceding whitespace/formatting characters\n item0 = item[0].rstrip()\n item1 = item[1].rstrip()\n item2 = list(map(str.strip, item[2]))\n \n #remove associated list formatting\n item2 = (\", \".join( str(e) for e in item2))\n\n #print...\n #format | instrument info | pick info | phase info\n f.write(\"%s\\t\\t%s\\t\\t%s\\n\" % (item0,item1,item2))\n \n f.close()", "def dump_to_disk(self, prefix):\n\n f = open(prefix + rpki.sundial.now().isoformat() + \"Z.cms\", \"wb\")\n f.write(self.get_DER())\n f.close()", "def writeCADFile(self, filename):\n valid_filetypes = [\"brep\", \"bstl\", \"egads\", \"egg\", \"iges\", \"igs\", \"sens\", \"step\", \"stl\", \"stp\", \"tess\", \"grid\"]\n file_extension = filename.split(\".\")[-1]\n if file_extension.lower() not in valid_filetypes:\n raise OSError(\n \"CAD filename \"\n + filename\n + \" must have a valid exension. \"\n + \"Consult the EngineeringSketchPad docs for the DUMP function\"\n )\n if self.comm.rank == 0:\n modelCopy = self.espModel.Copy()\n n_branches, _, _ = modelCopy.Info()\n modelCopy.NewBrch(\n n_branches, modelCopy.GetCode(\"dump\"), \"<none>\", 0, filename, \"0\", \"0\", \"0\", \"\", \"\", \"\", \"\", \"\"\n )\n modelCopy.Build(0, 0)", "def _generate_metadata_kind(filename, items, affidavit=None):\n store = appstream.Store('lvfs')\n for item in items:\n\n # add each component\n for md in item.mds:\n component = appstream.Component()\n component.id = md.cid\n component.kind = 'firmware'\n component.name = md.name\n component.summary = md.summary\n component.description = md.description\n if md.url_homepage:\n component.urls['homepage'] = md.url_homepage\n component.metadata_license = md.metadata_license\n component.project_license = md.project_license\n component.developer_name = md.developer_name\n\n # add provide\n for guid in md.guids:\n prov = appstream.Provide()\n prov.kind = 'firmware-flashed'\n prov.value = guid\n component.add_provide(prov)\n\n # add release\n if md.version:\n rel = appstream.Release()\n rel.version = md.version\n rel.description = md.release_description\n if md.release_timestamp:\n rel.timestamp = md.release_timestamp\n rel.checksums = []\n rel.location = app.config['FIRMWARE_BASEURL'] + item.filename\n rel.size_installed = md.release_installed_size\n rel.size_download = md.release_download_size\n rel.urgency = md.release_urgency\n component.add_release(rel)\n\n # add container checksum\n if md.checksum_container:\n csum = appstream.Checksum()\n csum.target = 'container'\n csum.value = md.checksum_container\n csum.filename = item.filename\n rel.add_checksum(csum)\n\n # add content checksum\n if md.checksum_contents:\n csum = appstream.Checksum()\n csum.target = 'content'\n csum.value = md.checksum_contents\n csum.filename = md.filename_contents\n rel.add_checksum(csum)\n\n # add screenshot\n if md.screenshot_caption:\n ss = appstream.Screenshot()\n ss.caption = md.screenshot_caption\n if md.screenshot_url:\n im = appstream.Image()\n im.url = md.screenshot_url\n ss.add_image(im)\n component.add_screenshot(ss)\n\n # add requires for each allowed vendor_ids\n group = db.groups.get_item(item.group_id)\n if group.vendor_ids:\n req = appstream.Require()\n req.kind = 'firmware'\n req.value = 'vendor-id'\n if len(group.vendor_ids) == 1:\n req.compare = 'eq'\n else:\n req.compare = 'regex'\n req.version = '|'.join(group.vendor_ids)\n component.add_require(req)\n\n # add manual firmware or fwupd version requires\n for req_txt in md.requirements:\n split = req_txt.split('/', 4)\n req = appstream.Require()\n req.kind = split[0]\n req.value = split[1]\n req.compare = split[2]\n req.version = split[3]\n component.add_require(req)\n\n # add component\n store.add(component)\n\n # dump to file\n download_dir = app.config['DOWNLOAD_DIR']\n if not os.path.exists(download_dir):\n os.mkdir(download_dir)\n filename = os.path.join(download_dir, filename)\n store.to_file(filename)\n\n # upload to the CDN\n blob = open(filename, 'rb').read()\n _upload_to_cdn(filename, blob)\n\n # generate and upload the detached signature\n if affidavit:\n blob_asc = affidavit.create(blob)\n _upload_to_cdn(filename + '.asc', blob_asc)", "def prepare_dataset(sdffile, dest=None, overwrite=False):\n root, name = op.split(sdffile)\n name = op.splitext(name)[0]\n\n if not dest: dest = root\n\n dest_sdf = op.join(dest, name + '-prepared.sdf')\n master_table = op.join(dest, name + '-master.csv')\n sali_table = op.join(dest, name + '-saliviewer.csv')\n\n if op.exists(dest_sdf) and not overwrite:\n print '%s is already there and not overwriting requested' % dest_sdf\n else:\n print 'Reading %s' % sdffile\n mols = list(pybel.readfile('sdf', sdffile))\n\n print '\\tCreating dataset root: %s' % dest\n if not op.exists(dest):\n os.makedirs(dest)\n\n print '\\tRenaming the compounds to keep track of the provenance'\n rename_mols_by_index(mols, name + '-')\n\n print '\\tGenerating conformations'\n for mol in mols:\n if not any(name in mol.title for name in ('train-3988', 'train-4205')):\n try:\n print 'Conformation for %s' % mol.title\n mol.make3D()\n except Exception:\n print 'Error computing a 3D conformation for %s' % mol.title\n\n print '\\tSaving compounds'\n save_mols(mols, dest_sdf)\n\n print '\\tCreating \\\"master\\\" table: %s' % master_table\n create_master_table(dest_sdf, master_table, fields=['Activity'])\n\n print '\\tCreating \\\"saliviewer\\\" table: %s' % sali_table\n create_saliviewer_input(master_table, sali_table)\n\n return dest_sdf, master_table", "def _build_meds_layout(self):\n\n\n nim = self.image_info.size\n nobj = self.obj_data.size\n\n trim_to_coadd = self.get('trim_to_coadd',False)\n if trim_to_coadd:\n print(' trimming to coadd')\n coadd_wcs, coadd_pos, coadd_bnds, coadd_q = \\\n self._get_pos_and_bounds(self.obj_data, 0)\n in_bnds = coadd_bnds.contains_points(coadd_pos['zrow'], coadd_pos['zcol'])\n w_in_bnds, = np.where(in_bnds == True)\n assert w_in_bnds.size > 0,\"none found in coadd\"\n\n w_in_bnds = coadd_q[w_in_bnds]\n self.obj_data = self.obj_data[w_in_bnds]\n\n self._do_psf_setup()\n\n # box sizes are even\n half_box_size = self.obj_data['box_size']//2\n\n for file_id in range(nim):\n\n wcs, pos, bnds, q = self._get_pos_and_bounds(self.obj_data, file_id)\n\n # do the test\n in_bnds = bnds.contains_points(pos['zrow'], pos['zcol'])\n q_rc, = np.where(in_bnds == True)\n print(' second cut: %6d of %6d objects' % (len(q_rc),len(q)))\n\n # now make sure everything is there\n if self['check_in_first_image']:\n if file_id == 0 and len(self.obj_data['ra']) != len(q_rc):\n raise MEDSCreationError('Not all objects were found in first image for '\n 'MEDS making (which is the coadd/detection '\n 'image by convention).')\n # compose them\n q = q[q_rc]\n\n # fill in the object_data structure\n\n # note q_rc since pos was created using obj_data[q]\n qrow = pos['zrow'][q_rc]\n qcol = pos['zcol'][q_rc]\n\n icut = self.obj_data['ncutout'][q]\n self.obj_data['file_id'][q,icut] = file_id\n self.obj_data['orig_row'][q,icut] = qrow\n self.obj_data['orig_col'][q,icut] = qcol\n\n # this results in the object center being close to\n # the natural center (dim-1.)/2.\n ostart_row = qrow.astype('i4') - half_box_size[q] + 1\n ostart_col = qcol.astype('i4') - half_box_size[q] + 1\n crow = qrow - ostart_row\n ccol = qcol - ostart_col\n\n self.obj_data['orig_start_row'][q,icut] = ostart_row\n self.obj_data['orig_start_col'][q,icut] = ostart_col\n self.obj_data['cutout_row'][q,icut] = crow\n self.obj_data['cutout_col'][q,icut] = ccol\n\n # do jacobian, in original, not-offset coords\n # note q_rc since pos was created using self.obj_data[q]\n jacob = wcs.get_jacobian(\n x=pos['wcs_col'][q_rc],\n y=pos['wcs_row'][q_rc])\n\n # jacob is a tuple of arrays\n self.obj_data['dudcol'][q,icut] = jacob[0]\n self.obj_data['dudrow'][q,icut] = jacob[1]\n self.obj_data['dvdcol'][q,icut] = jacob[2]\n self.obj_data['dvdrow'][q,icut] = jacob[3]\n\n # increment\n self.obj_data['ncutout'][q] += 1\n\n w,=np.where(self.obj_data['ncutout'] > 0)\n print('%d/%d had ncut > 0' % (w.size, self.obj_data.size))\n #self.obj_data = self.obj_data[w]\n\n self.obj_data = self._make_resized_data(self.obj_data)\n print('setting number field as sequential')\n self.obj_data['number'] = 1+np.arange(self.obj_data.size)\n\n\n self._set_start_rows_and_pixel_count()\n\n if self['survey']=='cosmos':\n self._set_psf_layout_hst()\n else:\n self._set_psf_layout_psfex()", "def export_ctsdg(cfg):\n generator = Generator(\n image_in_channels=config.image_in_channels,\n edge_in_channels=config.edge_in_channels,\n out_channels=config.out_channels\n )\n generator.set_train(False)\n load_checkpoint(cfg.checkpoint_path, generator)\n\n ckpt_path = Path(cfg.checkpoint_path)\n output_file_name = (ckpt_path.parent / ckpt_path.stem).as_posix()\n file_format = config.file_format\n\n img_dummy = mnp.zeros([1, config.image_in_channels, *cfg.image_load_size],\n dtype=mstype.float32)\n edge_dummy = mnp.zeros([1, 2, *cfg.image_load_size], dtype=mstype.float32)\n mask_dummy = mnp.zeros([1, 1, *cfg.image_load_size], dtype=mstype.float32)\n\n export(generator, img_dummy, edge_dummy, mask_dummy,\n file_name=output_file_name, file_format=file_format)\n\n print(f'{output_file_name}.mindir exported successfully!', flush=True)", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def create_pythia_cmnd_files(self):\n \n for higgsname, higgspid in {'H': 35, 'A': 36}.iteritems():\n \n # Get mass and width from 2HDMC LHA file\n lha = LHA(self.lhafile)\n mass = lha.get_block('MASS').get_entry_by_key(higgspid)\n width = lha.get_decay(higgspid).width \n \n outname = self.lhafile.replace('.lha', '_%s.cmnd' % higgsname)\n self.cmndfiles[higgsname] = outname\n \n # Write command file\n with open(outname, 'w') as outfile:\n \n outfile.write('Beams:eCM = 13000.\\n')\n outfile.write('Higgs:useBSM = on\\n')\n \n if higgspid == 36:\n #outfile.write('HiggsBSM:allA3 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2A3 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2A3 = on\\n') # gluon fusion\n elif higgspid == 35:\n #outfile.write('HiggsBSM:allH2 = on\\n') # All production modes\n outfile.write('HiggsBSM:ffbar2H2 = on\\n') # quark fusion\n outfile.write('HiggsBSM:gg2H2 = on\\n') # gluon fusion\n \n outfile.write('{}:all = A0 A0 1 0 0 {} {} 50.0 0.0\\n'.format(higgspid, mass, width))\n outfile.write('{}:onMode = off\\n'.format(higgspid))\n outfile.write('{}:onIfMatch = 15 -15\\n'.format(higgspid))\n \n outfile.write('15:onMode = off\\n')\n outfile.write('15:onIfMatch = 16 111 211\\n')\n outfile.write('\\n')\n outfile.write('Next:numberShowEvent = 0\\n')\n\n return 0", "def write_to_md(dictData, outputDirectory):\n\tdic = prepare_hw_dict(dictData)\n\tfor hw in dic:\n\t\tfileout = os.path.join(outputDirectory, hw+'.md')\n\t\t# Prepare the output file\n\t\tfout = codecs.open(fileout, 'w', 'utf-8')\n\t\t#‌ Write frontmatter\n\t\tfout.write('---\\ntitle: \"'+hw+'\"\\n---\\n\\n')\n\t\t# For each (headword, meanings, verseNumber, PageNum) tuples,\n\t\tfor (hw, meanings, verse, verseNumDetails, pageNumDetails) in dic[hw]:\n\t\t\tcommaed = ', '.join(meanings)\n\t\t\tverse = verse.replace('<BR>', '<br />')\n\t\t\t# Write in babylon format. <BR><BR> is to separate verses.\n\t\t\tfout.write('# ' + hw + '\\n## ' + commaed + '\\n' + verse + '<br />verse ' + verseNumDetails + '<br />page ' + pageNumDetails +'\\n\\n')\n\t\tfout.close()\n\n\t# Give some summary to the user\n\tprint('MD files generated. Success!')\n\tprint('{} separate .md files written, one per headword.'.format(len(dic)))", "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))", "def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()", "def build_plasticc_metadata(fname_meta: str, snana_dir: str, out_fname,\n screen=False, extragal=True, field='DDF'):\n\n # map between zenodo and SNANA types\n SNANA_types = {90:11, 62:{1:3, 2:13}, 42:{1:2, 2:12, 3:14},\n 67:41, 52:43, 64:51, 95:60, 994:61, 992:62,\n 993:63, 15:64, 88:70, 92:80, 65:81, 16:83,\n 53:84, 991:90, 6:{1:91, 2:93}}\n \n extragal_zenodo_types = [15, 42, 52, 62, 64, 67, 88, 90, 95]\n \n # read zenodo metadata\n meta = pd.read_csv(fname_meta)\n\n if field == 'DDF':\n # identify only DDF objects\n ddf_flag = meta['ddf_bool'].values == 1\n elif field == 'WFD':\n ddf_flag = meta['ddf_bool'].values == 0\n else:\n ddf_flag = np.array([True for i in range(meta.shape[0])])\n\n # get ids\n ids = meta['object_id'].values[ddf_flag] \n\n names = get_SNR_headers()\n\n if not os.path.isfile(out_fname):\n op = open(out_fname, 'w+')\n for item in names[:-1]:\n op.write(item + ',')\n op.write(names[-1] + '\\n')\n\n else: \n op = open(out_fname, 'a+')\n \n # which group to search for\n if extragal:\n search_group = extragal_zenodo_types\n else:\n search_group = list(SNANA_types.keys())\n \n for code_zenodo in search_group:\n \n if screen:\n print('code_zenodo: ', code_zenodo)\n\n if code_zenodo not in [62, 42, 6]:\n code_snana = SNANA_types[code_zenodo]\n\n for n in range(1, 11):\n fname2 = snana_dir + 'LSST_DDF_MODEL' + str(code_snana).zfill(2) + \\\n '/LSST_DDF_NONIa-00' + str(n).zfill(2) + '_PHOT.FITS.gz'\n\n photo = read_fits(fname2)\n\n for indx in range(photo[0].shape[0]):\n # read data for 1 object\n snid_raw = photo[0]['SNID'].values[indx]\n snid = int(re.sub(\"[^0-9]\", \"\", str(snid_raw)))\n\n if snid in ids: \n line = calculate_SNR(snid=snid, \n code_zenodo=code_zenodo,\n photo_data=photo[1],\n head_data=photo[0],\n snana_file_index=n,\n code_snana=code_snana)\n \n if len(line) > 0:\n for item in line[:-1]:\n op.write(str(item) + ',')\n op.write(str(line[-1]) + '\\n')\n \n del photo\n \n else:\n for subtype in SNANA_types[code_zenodo].keys():\n code_snana = SNANA_types[code_zenodo][subtype]\n \n for n in range(1, 11): \n fname2 = snana_dir + 'LSST_DDF_MODEL' + str(code_snana).zfill(2) + \\\n '/LSST_DDF_NONIa-00' + str(n).zfill(2) + '_PHOT.FITS.gz'\n\n photo = read_fits(fname2)\n\n for indx in range(photo[0].shape[0]):\n\n # read data for 1 object\n snid_raw = photo[0]['SNID'].values[indx]\n snid = int(re.sub(\"[^0-9]\", \"\", str(snid_raw)))\n\n if snid in ids:\n line = calculate_SNR(snid=snid, \n code_snana=code_snana,\n code_zenodo=code_zenodo,\n photo_data=photo[1], \n head_data=photo[0],\n snana_file_index=n)\n \n if len(line) > 0:\n for item in line[:-1]:\n op.write(str(item) + ',')\n op.write(str(line[-1]) + '\\n')\n \n del photo\n \n op.close()", "def make_header_files():\n os.makedirs(DATA_DIR) if not os.path.exists(DATA_DIR) else None\n from dkistdataratemodel.units import frame\n from dkist_data_model.generator.dataproducts.visp import CalibratedVISP\n\n \"\"\"\n Generate VISP\n \"\"\"\n visp = CalibratedVISP(end_condition=20*frame)\n\n visp_files = visp.to_fits(\"sp_5_labelled\",\n path_template=os.path.join(DATA_DIR, 'visp_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"visp.zip\"), \"w\") as myzip:\n for fname in visp_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)\n\n \"\"\"\n Generate VTF\n \"\"\"\n from dkist_data_model.generator.dataproducts.vtf import CalibratedVTF\n vtf = CalibratedVTF(end_condition=96*frame)\n\n vtf_files = vtf.to_fits(\"5d_test\",\n path_template=os.path.join(DATA_DIR, 'vtf_5d_{i:02d}.fits'))\n\n with ZipFile(os.path.join(DATA_DIR, \"vtf.zip\"), \"w\") as myzip:\n for fname in vtf_files:\n myzip.write(fname, os.path.split(fname)[1])\n os.remove(fname)", "def Generar_Claves():\n salida=Keypp()\n savekey(salida)\n savecomp(salida)", "def generate():", "def write_db_tables(self, datadir='.', do_com=False, do_angmom=False, \n do_totalcom=False, do_totalangmom=False, do_normals=False,\n do_sigmas=False, do_relmotion=False):\n\n filepath = Path(datadir)\n\n db = DB()\n cur = db.get_cursor()\n\n # CoM data\n if do_com:\n colheads = ','.join(['gal','snap','t','x','y','z','vx','vy','vz'])\n query = f\"\"\"\n INSERT INTO centerofmass( {colheads} ) \n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)\n ON CONFLICT DO NOTHING\n \"\"\"\n\n for gname in ('MW','M31','M33'):\n filename = f'com_{gname}.txt'\n fullname = filepath / filename\n data = self.read_file(fullname)\n \n for snap, d in enumerate(data):\n rec = [gname, snap,] + list(d)\n cur.execute(query, rec)\n\n # angular momentum data\n if do_angmom:\n colheads = ','.join(['gal','snap','t','x_hat','y_hat','z_hat','l_mag'])\n query = f\"\"\"\n INSERT INTO angmom( {colheads} ) \n VALUES (%s,%s,%s,%s,%s,%s,%s)\n ON CONFLICT DO NOTHING\n \"\"\"\n\n for gname in ('MW','M31','M33'):\n filename = f'angmom_{gname}.txt'\n fullname = filepath / filename\n data = self.read_file(fullname)\n \n for snap, d in enumerate(data):\n rec = [gname, snap,] + list(d)\n cur.execute(query, rec)\n\n # total CoM data\n if do_totalcom:\n colheads = ','.join(['snap','t','x','y','z','vx','vy','vz'])\n query = f\"\"\"\n INSERT INTO totalcom( {colheads} ) \n VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\n ON CONFLICT DO NOTHING\n \"\"\"\n\n filename = 'total_com.txt'\n fullname = filepath / filename\n data = self.read_file(fullname)\n \n for snap, d in enumerate(data):\n rec = [snap,] + list(d)\n cur.execute(query, rec)\n\n # total angular momentum data\n if do_totalangmom:\n colheads = ','.join(['snap','t','Lx','Ly','Lz'])\n query = f\"\"\"\n INSERT INTO totalangmom( {colheads} ) \n VALUES (%s,%s,%s,%s,%s)\n ON CONFLICT DO NOTHING\n \"\"\"\n\n filename = 'total_angmom.txt'\n fullname = filepath / filename\n data = self.read_file(fullname)\n \n for snap, d in enumerate(data):\n rec = [snap,] + list(d)\n cur.execute(query, rec)\n\n # 3-galaxy normals data\n if do_normals:\n colheads = ','.join(['snap','t','x_hat','y_hat','z_hat'])\n query = f\"\"\"\n INSERT INTO normals( {colheads} ) \n VALUES (%s,%s,%s,%s,%s)\n ON CONFLICT DO NOTHING\n \"\"\"\n\n filename = 'normals.txt'\n fullname = filepath / filename\n data = self.read_file(fullname)\n \n for snap, d in enumerate(data):\n rec = [snap,] + list(d)\n cur.execute(query, rec)\n\n # velocity dispersions\n if do_sigmas:\n colheads = ','.join(['gal','snap','t','sigma'])\n query = f\"\"\"\n INSERT INTO sigmas( {colheads} ) \n VALUES (%s,%s,%s,%s)\n ON CONFLICT DO NOTHING\n \"\"\"\n\n for gname in ('MW','M31','M33'):\n filename = f'sigma_{gname}.txt'\n fullname = filepath / filename\n data = self.read_file(fullname)\n \n for snap, d in enumerate(data):\n rec = [gname, snap,] + list(d)\n cur.execute(query, rec)\n\n # relative motions\n if do_relmotion:\n colheads = ','.join(['snap','t','pos_MW_M31','pos_M33_M31','pos_M33_MW',\n 'vel_MW_M31','vel_M33_M31','vel_M33_MW'])\n query = f\"\"\"\n INSERT INTO relmotion( {colheads} ) \n VALUES (%s,%s,%s,%s,%s,%s,%s,%s)\n ON CONFLICT DO NOTHING\n \"\"\"\n\n filename = 'relmotion.txt'\n fullname = filepath / filename\n data = self.read_file(fullname)\n \n for snap, d in enumerate(data):\n rec = [snap,] + list(d)\n cur.execute(query, rec)", "def opensslCmsDataCreate( conveyedInfoFile ):\n opensslCmdArgs = [ \"openssl\", \"cms\", \"-data_create\", \"-in\", conveyedInfoFile,\n \"-outform\", \"der\" ]\n conveyedInfoCmsDerBase64 = runOpensslCmd( opensslCmdArgs, [ \"base64\" ] )\n return conveyedInfoCmsDerBase64", "def vacuum_cgmd(self):\n\n\t\texstring_dssp = 'except: cannot find dssp at '+gmxpaths['dssp']+\\\n\t\t\t'\\nconsider using the following syntax to download for 64-bit linux:'+\\\n\t\t\t'\\n\\twget ftp://ftp.cmbi.ru.nl/pub/software/dssp/dssp-2.0.4-linux-amd64'+\\\n\t\t\t'\\n\\tor navigate to ftp://ftp.cmbi.ru.nl/pub/software/dssp/'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\t\t\n\t\texstring_martinize = 'except: cannot find martinize at '+gmxpaths['martinize']+\\\n\t\t\t'\\nconsider using the following syntax to download:'+\\\n\t\t\t'\\n\\twget http://md.chem.rug.nl/cgmartini/images/tools/martinize/martinize-2.4/martinize.py'+\\\n\t\t\t'\\n\\tor navigate to http://md.chem.rug.nl/cgmartini/index.php/tools2/proteins-and-bilayers'+\\\n\t\t\t'\\n\\tand make sure you add execute permissions'\n\t\n\t\t#---first test to see if executables are available\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['dssp'])): raise Exception(exstring_dssp)\n\t\tif not os.path.isfile(os.path.expanduser(gmxpaths['martinize'])): raise Exception(exstring_martinize)\t\n\t\n\t\tcmd = [gmxpaths['martinize'],\n\t\t\t'-f system-input.pdb',\n\t\t\t'-o system-original.top',\n\t\t\t'-x protein-cg.pdb',\n\t\t\t'-ff martini22','-ed',\n\t\t\t'-dssp '+gmxpaths['dssp']]\n\t\tcall(cmd,logfile='log-martinize',cwd=self.rootdir)\n\t\t\n\t\twith open(self.rootdir+'system-original.top') as fp: lines = fp.readlines()\n\t\tself.itp_protein = [l.split()[0] for l in lines if l[:7] == 'Protein']\n\n\t\t#---note that this section leaves out lipids\n\t\tself.itp_lipid = []\n\t\t\n\t\t#---note that this method is currently set to only simulate one protein\n\t\tself.nprots = [1]\n\t\tself.write_topology_protein('vacuum.top')\n\t\t\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f protein-cg.pdb',\n\t\t\t'-o vacuum-alone.gro']\n\t\tcall(cmd,logfile='log-editconf-convert',cwd=self.rootdir)\n\t\n\t\tprint \"building box with \"+str(self.settings['wbuffer'])+'nm of water'\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone.gro',\n\t\t\t'-d '+str(self.settings['wbuffer']),\n\t\t\t'-o vacuum.gro','-c']\n\t\tcall(cmd,logfile='log-editconf-vacuum',cwd=self.rootdir)\n\t\t\n\t\tself.minimization_method('vacuum')", "def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')", "def writeNew(self,masters=[],mtime=0):\n tes3 = Tes3()\n tes3.hedr = Tes3_Hedr('HEDR',0)\n if self.isEsp(): tes3.hedr.fileType = 0\n elif self.isEsm(): tes3.hedr.fileType = 1\n elif self.isEss(): tes3.hedr.fileType = 32\n for master in masters:\n tes3.masters.append((master,modInfos[master].size))\n tes3.hedr.setChanged()\n tes3.setChanged()\n #--Write it\n path = os.path.join(self.dir,self.name)\n out = file(path,'wb')\n tes3.getSize()\n tes3.dump(out)\n out.close()\n self.setMTime(mtime)", "def create_weka_mfcc_13():\n global ARGS\n\n ## ten thu muc can trich chon vector dac trung (RLS, LMS, NLMS, Kalman, Non)\n name = '';\n fout = open('weka/MFCC78_TUNNING_{}_dataset.arff'.format(name), 'w')\n fout.write('@RELATION {}_dataset\\n\\n'.format(name))\n\n fout.write('@ATTRIBUTE MEAN_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE class \t{'+ARGS.labels+'}\\n\\n')\n \n fout.write('@DATA\\n')\n\n ## cua so\n windowing = Windowing(type='hamming',\n size=1104,\n zeroPhase=False)\n \n ## quang pho\n spectrum = Spectrum(size=1104)\n\n ##khoi tao MFCC\n mfcc = MFCC(highFrequencyBound=4000, ## gioi han tren cua tan so\n inputSize=201, \t\t\t ## kich thuoc pho dau vao\n lowFrequencyBound=0,\t ## gioi han duoi cua tan so\n numberBands=40,\t\t\t ## so luong cac dai Mels trong bo loc\n numberCoefficients=13, ## so luong dau ra cac he so Mel\n sampleRate=16000)\t\t ## tan so lay mau\n\n for label in ARGS.labels.split(','): ## duyet cac thu muc giong voi ten nhan\n\n ## dia chi thu muc\n dir = os.path.join(ARGS.dir, label)\n\n logging.info('Access folder <{}>'.format(dir))\n\n for file in sorted(os.listdir(dir)):\n\n \t## duyet cac file .wav\n if file.endswith('.wav'):\n logging.info('Process <{}>'.format(file))\n path = os.path.join(dir, file)\n \n ## doc file am thanh\n loader = MonoLoader(filename=path, sampleRate=ARGS.sampleRate)\n audio = loader()\n cnt = 0\n\n for window in FrameGenerator(audio, \n frameSize=ARGS.window_length*ARGS.sampleRate/1000, \n hopSize=ARGS.window_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n mfccs = []\n for frame in FrameGenerator(window, \n frameSize=ARGS.frame_length*ARGS.sampleRate/1000, \n hopSize=ARGS.frame_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n s = spectrum(windowing(frame))\n\n _, m = mfcc(s)\n\n m_delta = librosa.feature.delta(m, order=1) ## dao ham bac 1\n m_delta_delta = librosa.feature.delta(m, order=2) ## dao ham bac 2\n\n m_all = np.concatenate((m, m_delta, m_delta_delta), axis=0) ## them vao chuoi\n mfccs.append(m_all)\n mfccs = np.array(mfccs)\n mfccs_mean = np.mean(mfccs, axis=0)\n mfccs_std = np.std(mfccs, axis=0)\n feat = np.concatenate((mfccs_mean, mfccs_std), axis=0).tolist()\n str_feat = [str(x) for x in feat]\n line = ','.join(str_feat)+','+label\n fout.write(line+'\\n')\n cnt = cnt+1\n logging.info('{} samples'.format(cnt))", "def make_libfile():\n # wfc3_obsmodes_uvis\n wfc3_uvis = [\n \"f218w\",\n \"f225w\",\n \"f275w\",\n \"f336w\",\n \"f390m\",\n \"f390w\",\n \"f410m\",\n \"f438w\",\n \"f467m\",\n \"f475w\",\n \"f547m\",\n \"f555w\",\n \"f606w\",\n \"f621m\",\n \"f625w\",\n \"f689m\",\n \"f763m\",\n \"f775w\",\n \"f814w\",\n \"f845m\",\n ]\n\n wfc3_ir = [\n \"f098m\",\n \"f105w\",\n \"f110w\",\n \"f125w\",\n \"f127m\",\n \"f139m\",\n \"f140w\",\n \"f153m\",\n \"f160w\",\n ]\n\n wfpc2 = [\n \"f122m\",\n \"f157w\",\n \"f336w\",\n \"f410m\",\n \"f467m\",\n \"f547m\",\n \"f439w\",\n \"f569w\",\n \"f675w\",\n \"f791w\",\n \"f170w\",\n \"f185w\",\n \"f218w\",\n \"f255w\",\n \"f300w\",\n \"f380w\",\n \"f555w\",\n \"f622w\",\n \"f450w\",\n \"f606w\",\n \"f702w\",\n \"f814w\",\n ]\n\n acs_wfc = [\n \"f435w\",\n \"f475w\",\n \"f550m\",\n \"f555w\",\n \"f606w\",\n \"f625w\",\n \"f775w\",\n \"f814w\",\n ]\n # galex\n galex = [\"fuv\", \"nuv\"]\n\n # Open hd5 file for writing\n hf = h5py.File(__ROOT__ + \"filters.hd5\", \"w\")\n\n # Create group for nice hierarchical structure\n f = hf.create_group(\"filters\")\n\n # Define arrays for \"contents\" / descriptive information\n tablenames = []\n observatories = []\n instruments = []\n names = []\n norms = []\n cwaves = []\n pwaves = []\n comments = []\n\n # Loop through WFC3_UVIS filters\n for filt in wfc3_uvis:\n\n # define uvis 1 and uvis2 modes\n mode_1 = \"wfc3, uvis1, \" + filt\n mode_2 = \"wfc3, uvis2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of uvis1 and uvis2\")\n\n # Loop through WFC3_IR filters\n for filt in wfc3_ir:\n\n # define ir mode\n mode = \"wfc3, ir, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"HST_WFC3_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFC3\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # Loop through WFPC2 filters\n for filt in wfpc2:\n\n # define chips 1, 2, 3, 4 modes\n mode_1 = \"wfpc2, 1, \" + filt\n mode_2 = \"wfpc2, 2, \" + filt\n mode_3 = \"wfpc2, 3, \" + filt\n mode_4 = \"wfpc2, 4, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n bp_3 = stsyn.band(mode_3)\n bp_4 = stsyn.band(mode_4)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave), bp_3(wave), bp_4(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_WFPC2_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"WFPC2\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of 1, 2, 3, 4\")\n\n # Loop through ACS filters\n for filt in acs_wfc:\n\n # define wfc1, wfc2 modes\n mode_1 = \"acs, wfc1, \" + filt\n mode_2 = \"acs, wfc2, \" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp_1 = stsyn.band(mode_1)\n bp_2 = stsyn.band(mode_2)\n\n # extract the wavelength array\n wave = bp_1.waveset\n\n # compute the average bandpass between uvis1 and uvis2\n bp_avg = np.average([bp_1(wave), bp_2(wave)], axis=0)\n\n # define the filter name\n filter_name = \"HST_ACS_WFC_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp_avg.astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp_avg, name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"HST\")\n instruments.append(\"ACS_WFC\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"avg of wfc1 and wfc2\")\n\n # Loop through GALEX filters:\n for filt in galex:\n # define ir mode\n mode = \"galex,\" + filt\n\n # pull bandpasses from stsynphot for the two uvis modes\n bp = stsyn.band(mode)\n\n # extract the wavelength array\n wave = bp.waveset\n\n # define the filter name\n filter_name = \"GALEX_\" + filt.upper()\n\n # build array of wavelength and throughput\n arr = np.array(\n list(zip(wave.value.astype(np.float64), bp(wave).astype(np.float64))),\n dtype=[(\"WAVELENGTH\", \"float64\"), (\"THROUGHPUT\", \"float64\")],\n )\n\n # append dataset to the hdf5 filters group\n f.create_dataset(filter_name, data=arr)\n\n # generate filter instance to compute relevant info\n newfilt = phot.Filter(wave, bp(wave), name=filt.upper())\n\n # populate contents lists with relevant information\n tablenames.append(filter_name)\n observatories.append(\"GALEX\")\n instruments.append(\"GALEX\")\n names.append(newfilt.name)\n norms.append(newfilt.norm.value)\n cwaves.append(newfilt.cl.value)\n pwaves.append(newfilt.lpivot.value)\n comments.append(\"\")\n\n # smash the contents arrays together\n contents = np.array(\n list(\n zip(\n tablenames,\n observatories,\n instruments,\n names,\n norms,\n cwaves,\n pwaves,\n comments,\n )\n ),\n dtype=[\n (\"TABLENAME\", \"S40\"),\n (\"OBSERVATORY\", \"S30\"),\n (\"INSTRUMENT\", \"S30\"),\n (\"NAME\", \"S10\"),\n (\"NORM\", \"<f8\"),\n (\"CWAVE\", \"<f8\"),\n (\"PWAVE\", \"<f8\"),\n (\"COMMENT\", \"S100\"),\n ],\n )\n\n # add the contents array as an hd5 dataset\n hf.create_dataset(\"content\", data=contents)\n\n # close the file\n hf.close()", "def WriteDiary():\r\n from datetime import datetime\r\n\r\n diaryname = _getPOSCAR()\r\n diary = open(diaryname, \"w\")\r\n diary.write('***' + str(datetime.now()) + '***' + '\\n')\r\n diary.write('## ' + diaryname + '\\n')\r\n diary.close()\r\n _CopyWriteDiary('Readme', diaryname)\r\n _CopyWriteDiary('INCAR', diaryname)\r\n _CopyWriteDiary('KPOINTS', diaryname)\r\n _CopyWriteDiary('POSCAR', diaryname)\r\n _CopyWriteDiary('POTCAR', diaryname)\r\n os.rename(diaryname, diaryname + '.md')", "def gen_metars(obs, filename, convids=False):\n mtime = datetime.datetime.utcnow().strftime(\"%d%H%M\")\n thres = datetime.datetime.utcnow() - datetime.timedelta(hours=3)\n thres = thres.replace(tzinfo=pytz.UTC)\n fp = open(filename, 'w')\n fp.write(\"\\001\\015\\015\\012001\\n\")\n fp.write(\"SAUS43 KDMX %s\\015\\015\\012METAR\\015\\015\\012\" % (mtime, ))\n for sid in obs:\n ob = obs[sid]\n if ob['valid'] < thres:\n continue\n if sid in [\"RIOI4\", \"ROSI4\", \"RSMI4\", 'RMCI4']:\n continue\n metarid = sid[:4]\n remoteid = NT.sts[sid]['remote_id']\n if convids:\n metarid = RWIS2METAR.get(\"%02i\" % (remoteid,), 'XXXX')\n temptxt = \"\"\n t_temptxt = \"\"\n windtxt = \"\"\n if ob.get('sknt') is not None and ob.get('drct') is not None:\n windtxt = METARwind(ob['sknt'], ob['drct'], ob.get('gust'))\n if obs.get('tmpf') is not None and obs.get('dwpf') is not None:\n m_tmpc, t_tmpc = METARtemp(temperature(ob['tmpf'], 'F').value('C'))\n m_dwpc, t_dwpc = METARtemp(temperature(ob['dwpf'], 'F').value('C'))\n temptxt = \"%s/%s\" % (m_tmpc, m_dwpc)\n t_temptxt = \"T%s%s \" % (t_tmpc, t_dwpc)\n fp.write((\"%s %s %s %s RMK AO2 %s%s\\015\\015\\012\"\n \"\") % (metarid, ob['valid'].strftime(\"%d%H%MZ\"),\n windtxt, temptxt, t_temptxt, \"=\"))\n\n fp.write(\"\\015\\015\\012\\003\")\n fp.close()", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def make_data(self, verbose=False):\n if self.h0:\n self.make_cff(verbose)\n else:\n logger.info(\"Got h0=0, not writing an injection .cff file.\")\n self.run_makefakedata()", "def create_CGfiles_using_martinizepy(Ctermini_type, set_charge, name):\n\n os.system('cp %s/%s ./'%(this_path,martini_itp))\n\n os.system('python2 %s/martinize.py -f %s_aa.pdb \\\n -o %s.top -x %s.pdb -name %s -ff martini22 \\\n -nt \\\n -ss CCCCCCCCCCCC '%(this_path,name,name,name,name))\n\n\n # Collect lines defining atoms\n lines_atoms = []\n break1,break2 = None,None\n with open('%s.itp'%name, 'r') as f:\n data = f.readlines()\n start = False\n for i,line in enumerate(data):\n if '[ atoms ]' in line:\n start = True\n break1 = i+1\n continue\n if start:\n if line.split()==[]:\n start = False\n break2 = i\n break\n lines_atoms = lines_atoms + [line]\n \n \n\n # Modify lines_atoms as per Ctermini\n charged_thusfar = 0\n if Ctermini_type.upper() == 'OH':\n for i in range(len(lines_atoms))[::-1]:\n if 'BB' in lines_atoms[i]:\n lines_atoms[i] = lines_atoms[i].replace(' 0.0', '-1.0')\n lines_atoms[i] = lines_atoms[i].replace('P5', 'Qa') \n charged_thusfar += -1\n break\n\n\n # modify charge of side chains,\n # CURRENTLY only neutralizes if Qd SC is found (deprotonation)\n neutralize_ahead = False\n if set_charge < 0: # deprotonation\n for i in range(len(lines_atoms))[::-1]:\n if charged_thusfar == set_charge:\n neutralize_ahead = True\n \n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n if neutralize_ahead:\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n else:\n charged_thusfar += -1\n\n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n\n if charged_thusfar != set_charge:\n raise ValueError('Peptide sequence could not be used to achieve set_charge')\n\n elif set_charge == 0: # protonation-deprotonation\n if Ctermini_type == 'OH':\n raise ValueError('Protonation after deprotonation does not make sense')\n \n for i in range(len(lines_atoms))[::-1]:\n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n\n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n \n elif set_charge > 0: # protonation\n if Ctermini_type == 'OH':\n raise ValueError('Protonation after deprotonation does not make sense')\n\n for i in range(len(lines_atoms))[::-1]:\n if charged_thusfar == set_charge:\n neutralize_ahead = True\n\n if ('SC' in lines_atoms[i]) and ('-1.0' in lines_atoms[i]):\n if 'Qa' not in lines_atoms[i]:\n raise RuntimeError('-1.0 charge without Qa bead is found')\n lines_atoms[i] = lines_atoms[i].replace('-1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qa', 'P1')\n \n if ('SC' in lines_atoms[i]) and (' 1.0' in lines_atoms[i]):\n if 'Qd' not in lines_atoms[i]:\n raise RuntimeError('1.0 charge without Qd bead is found')\n if neutralize_ahead:\n lines_atoms[i] = lines_atoms[i].replace('1.0', ' 0.0')\n lines_atoms[i] = lines_atoms[i].replace('Qd', 'P1')\n else:\n charged_thusfar += 1\n \n if charged_thusfar != set_charge:\n raise ValueError('Peptide sequence could not be used to achieve set_charge')\n\n\n data_new = ''\n for line in data[:break1]:\n data_new += line\n for line in lines_atoms:\n data_new += line\n for line in data[break2:]:\n data_new += line\n \n \n with open('%s.itp'%name, 'w') as f:\n f.write(data_new)", "def createCfg_prep_dcard(self, jobOptions):\n category_output = self.channel\n if jobOptions['label']:\n category_output += \"_%s\" % jobOptions['label']\n lines = []\n lines.append(\"process.fwliteInput.fileNames = cms.vstring('%s')\" % jobOptions['inputFile'])\n lines.append(\"process.fwliteOutput.fileName = cms.string('%s')\" % jobOptions['datacardFile'])\n lines.append(\"process.prepareDatacards.processesToCopy = cms.vstring(%s)\" % self.prep_dcard_processesToCopy)\n lines.append(\"process.prepareDatacards.signals = cms.vstring(%s)\" % self.prep_dcard_signals)\n lines.append(\"process.prepareDatacards.makeSubDir = cms.bool(True)\")\n lines.append(\"process.prepareDatacards.categories = cms.VPSet(\")\n for charge in [\"OS\", \"SS\"]:\n for ptEtaBin in [\n \"BB_LL\", \"BB_ML\", \"BB_MM\", \"BB_HL\", \"BB_HM\", \"BB_HH\",\n \"EE_LL\", \"EE_ML\", \"EE_MM\", \"EE_HL\", \"EE_HM\", \"EE_HH\",\n \"BE_LL\", \"BE_ML\", \"EB_ML\",\"BE_MM\", \"BE_HL\", \"EB_HL\",\n \"BE_HM\", \"EB_HM\", \"BE_HH\", \"total\",\n ]:\n lines.append(\" cms.PSet(\")\n lines.append(\" input = cms.string('%s/%s'),\" % (charge, ptEtaBin))\n lines.append(\" output = cms.string('ttH_%s_%s_%s')\" % (self.channel, charge, ptEtaBin))\n lines.append(\" ),\")\n lines.append(\")\")\n lines.append(\"process.prepareDatacards.histogramToFit = cms.string('%s')\" % jobOptions['histogramToFit'])\n lines.append(\"process.prepareDatacards.sysShifts = cms.vstring(%s)\" % systematics.muon_E)\n create_cfg(self.cfgFile_prep_dcard, jobOptions['cfgFile_modified'], lines)", "def write_data_card(spec, data_card, channels, path):\n with open(path, \"w\") as f:\n f.write(f\"imax {str(size(data_card.bins))}\" + \"\\n\")\n f.write(\n \"jmax \"\n + str(size(data_card.processes) - size(data_card.isSignal.keys()))\n + \"\\n\"\n )\n f.write(f\"kmax {str(size(data_card.systs, 0))}\" + \"\\n\")\n\n if data_card.hasShapes:\n for channel in data_card.shapeMap.keys():\n for sample in data_card.shapeMap[channel].keys():\n f.write(\n f\"shapes {sample} {channel} {data_card.shapeMap[channel][sample][0]} {data_card.shapeMap[channel][sample][1]}\"\n )\n if size(data_card.shapeMap[channel][sample]) > 2:\n f.write(f\" {data_card.shapeMap[channel][sample][2]}\" + \"\\n\")\n else:\n f.write(\"\\n\")\n\n f.write(\"\\n---------------------------------\\n\")\n f.write(\"bin \")\n for bin in data_card.obs.keys():\n f.write(f\"{bin} \")\n f.write(\"\\n\")\n f.write(\"observation \")\n for channel in data_card.obs.keys():\n f.write(f\"{str(data_card.obs[channel])} \")\n f.write(\"\\n---------------------------------\\n\")\n f.write(\"bin \")\n for channel in data_card.obs.keys():\n for sample in data_card.exp[channel].keys():\n f.write(f\"{channel} \")\n f.write(\"\\n\")\n f.write(\"process \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n f.write(f\"{sample} \")\n f.write(\"\\n\")\n f.write(\"process \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n if sample in data_card.signals:\n f.write(f\"{str(-1 * data_card.processes.index(sample))} \")\n else:\n f.write(f\"{str(data_card.processes.index(sample) + 1)} \")\n f.write(\"\\n\")\n f.write(\"rate \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n\n f.write(f\"{str(data_card.exp[channel][sample])} \")\n f.write(\"\\n---------------------------------\\n\")\n for syst in data_card.systs:\n f.write(f\"{syst[0]} {syst[2]} \")\n for bin in syst[4].keys():\n for sample in data_card.exp[bin].keys():\n if syst[4][bin][sample] != 0:\n f.write(f\"{str(syst[4][bin][sample])} \")\n else:\n f.write(\"- \")\n\n f.write(\"\\n\")\n f.write(\"\\n---------------------------------\\n\")\n for cAp in data_card.rateParams.keys():\n _dir = cAp.split(\"AND\")\n for i in range(size(data_card.rateParams[cAp], 0)):\n if size(data_card.rateParams[cAp][i][0]) > 3:\n f.write(\n f\"{str(data_card.rateParams[cAp][i][0][0])} rateParam {_dir[0]} {_dir[1]} {str(data_card.rateParams[cAp][i][0][1])} {data_card.rateParams[cAp][i][0][3]}\"\n )\n else:\n f.write(\n f\"{str(data_card.rateParams[cAp][i][0][0])} rateParam {_dir[0]} {_dir[1]} {str(data_card.rateParams[cAp][i][0][1])}\"\n )\n f.write(\"\\n\")\n f.write(\"\\n---------------------------------\\n\")\n for idxc, channel in enumerate(channels):\n if (\n channel in data_card.binParFlags.keys()\n and data_card.binParFlags[channel] == True\n ):\n # double check to be safe\n shapesys = False\n staterror = False\n for sample in spec[\"channels\"][idxc][\"samples\"]:\n mod_types = [mod[\"type\"] for mod in sample[\"modifiers\"]]\n if \"shapesys\" in mod_types:\n shapesys = True\n elif \"staterror\" in mod_types:\n staterror = True\n\n if shapesys:\n f.write(f\"{channel} autoMCStats 100000 0 2\" + \"\\n\")\n if staterror:\n f.write(f\"{channel} autoMCStats 0 0 2\" + \"\\n\")", "def get_data(output_dir: Path = Path(\"raw_data\")) -> None:\n\n files = [\n \"alignment.sorted.bam\",\n \"names-mdmg.dmp\",\n \"nodes-mdmg.dmp\",\n \"acc2taxid.map.gz\",\n ]\n\n for file in files:\n with resources.path(\"metaDMG.data\", file) as p:\n file_path = p\n\n target_path = Path(output_dir) / file\n target_path.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(file_path, target_path)", "def generate_setups(self,filename=DEFAULT_FILENAME):\n \n self._create_main_shape()\n self._create_margin_shape()\n\n for section, setup in self.setups.iteritems():\n self._generate_section_structures(setup['distance'],\n setup['radius'],\n setup['structure'],\n section)\n self.write(filename)", "def writeFiles(self, directory = \"./\"):\n self.mass = []\n self.zero = 0\n self.natoms = self.numMonomer\n self.nangles = 0\n self.ndihedrals = 0\n\n self.ntypes = 4\n\n # set masses of all beads to be 1\n # in principle, the mass of counterions and salt ions should be smaller\n # expect this difference will no matter in terms of complexation of polyelectrolytes\n for i in range(self.ntypes):\n self.mass.append(1)\n\n\n\n self.bdtypes = 1\n self.angtypes = 0\n self.dihtypes = 0\n self.improtypes = 0\n\n iFileLammpsName = directory + \"data.pe.la{0}.na{1}.lc{2}.nc{3}.rho{4}.r{5}.lammps\".\\\n format(self.lenPa, self.numPa, self.lenPc, self.numPc, self.volRatio, self.chargeRepeat)\n iFileLammps = open(iFileLammpsName, 'w')\n\n iFileXYZName = directory + \"data.pe.la{0}.na{1}.lc{2}.nc{3}.rho{4}.r{5}.xyz\".\\\n format(self.lenPa, self.numPa, self.lenPc, self.numPc, self.volRatio, self.chargeRepeat)\n iFileXYZ = open(iFileXYZName, 'w' )\n\n iFileXYZ.write(\"{0}\\n\".format(self.natoms))\n iFileXYZ.write(\"data.polyelectrolyte.xyz\\n\")\n\n iFileLammpsHeader = \"data file for mixtures of charged polymer chains\\n\" + \\\n \"\\n\" + \\\n \"{0:10d} atoms\\n\".format(self.natoms) + \\\n \"{0:10d} bonds\\n\".format(self.numBonds) + \\\n \"{0:10d} angles\\n\".format(self.nangles) + \\\n \"{0:10d} dihedrals\\n\".format(self.ndihedrals) + \\\n \"{0:10d} impropers\\n\".format(self.zero) + \\\n \"\\n\" +\\\n \"{0:10d} atom types\\n\".format(self.ntypes) + \\\n \"{0:10d} bond types\\n\".format(self.bdtypes) + \\\n \"{0:10d} angle types\\n\".format(self.angtypes) + \\\n \"{0:10d} dihedral types\\n\".format(self.dihtypes) + \\\n \"{0:10d} improper types\\n\".format(self.improtypes) + \\\n \"\\n\" + \\\n \" {0:16.8f} {1:16.8f} xlo xhi\\n\".format(self.lx, self.hx) + \\\n \" {0:16.8f} {1:16.8f} ylo yhi\\n\".format(self.ly, self.hy) + \\\n \" {0:16.8f} {1:16.8f} zlo zhi\\n\".format(self.lz, self.hz) + \\\n \"\\n\" + \\\n \"Masses\\n\" + \\\n \"\\n\"\n\n iFileLammps.write(iFileLammpsHeader)\n for i in range(self.ntypes):\n iFileLammps.write( \"{0} {1:8.3f}\\n\".format(i+1, self.mass[i]))\n\n iFileLammps.write(\"\\nAtoms\\n\\n\")\n \n \n\n for i in range(self.natoms):\n if self.atomsType[i] == 1 or self.atomsType[i] == 3:\n iFileXYZ.write(\"S {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 2:\n iFileXYZ.write(\"P {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 4:\n iFileXYZ.write(\"N {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 5:\n iFileXYZ.write(\"A {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 6:\n iFileXYZ.write(\"C {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 7:\n iFileXYZ.write(\"I {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n elif self.atomsType[i] == 8:\n iFileXYZ.write(\"K {0} {1} {2}\\n\".format(self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n\n iFileLammps.write(\"{0} {1} {2} {3} {4} {5} {6}\\n\".format(i+1, \\\n self.molId[i], \\\n self.atomsType[i], \\\n self.atomsCharge[i], \\\n self.atomsCoords[i][0], \\\n self.atomsCoords[i][1], \\\n self.atomsCoords[i][2]))\n\n iFileLammps.write(\"\\nBonds\\n\\n\")\n for i in range(self.numBonds):\n iFileLammps.write(\"{0} 1 {1} {2}\\n\".format(i+1, self.bondList[i][0], self.bondList[i][1]))\n\n iFileXYZ.close()\n iFileLammps.close()", "def dctCreateDD(pdct, dirName, msgBufSize):\n return _dctmcc.dctCreateDD(pdct, dirName, msgBufSize)", "def save(cartesian, charge, name, comment='',\n mop_dir=str(os.getcwd()), csv_dir=str(os.getcwd()),\n mop_file=True, csv_file=True):\n # Preparation\n del cartesian['num']\n coordinates = 'x', 'y', 'z'\n columns = {'1_1': 2, '1_2': 4, '1_3': 6}\n for coord in coordinates:\n cartesian[coord] = cartesian[coord].astype(float)\n\n # .mop block\n if mop_file is True:\n cartesian = cartesian.round(decimals=5)\n for column in columns:\n cartesian.insert(columns[column], column, 0, True)\n\n # .mop constructor\n with open(mop_dir + 'sp_' + name + '.mop', 'a') as mop:\n mop.writelines('AUX LARGE CHARGE=' + charge + ' SINGLET NOOPT PM6-DH2X' + '\\n')\n mop.writelines(comment + '\\n'*2)\n mop.writelines(cartesian.to_string(header=False, index=False))\n mop.writelines('\\n'*2)\n\n # .csv block\n if csv_file is True:\n cartesian = cartesian.round(decimals=4)\n for column in columns:\n if mop_file is True:\n del cartesian[column]\n cartesian.insert(columns[column], column, 1, True)\n cartesian.to_csv(csv_dir + name + '.csv')", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def export_md(clips):\n for book in clips:\n lines = []\n for pos in sorted(clips[book]):\n lines.append((clips[book][pos] + ' **P%s**' %pos).encode('utf-8'))\n\n filename = os.path.join(OUTPUT_DIR, u\"%s.md\" % book)\n with open(filename, 'w') as f:\n f.write(\"\\n\\n\".join(lines))", "def main():\n\n classes = {\n \"rain\":0,\n \"rooster\":1,\n \"crying_baby\":2,\n \"sea_waves\":3,\n \"clock_tick\":4,\n \"sneezing\":5,\n \"dog\":6,\n \"crackling_fire\":7,\n \"helicopter\":8,\n \"chainsaw\":9,\n }\n\n with open(\"../data/audio/ESC-50-master/meta/esc50.csv\") as f:\n lines = [i[:-1] for i in f.readlines()]\n lines = lines[1:]\n\n os.system(\"rm -rf ../data/audio/ESC-10\")\n os.system(\"mkdir ../data/audio/ESC-10\")\n os.system(\"mkdir ../data/audio/ESC-10/audio\")\n\n meta = []\n for line in lines:\n t = line.split(\",\")\n if (t[-3] == 'True'):\n meta.append(\"../data/audio/ESC-10/audio/%s %d\" % (t[0],classes[t[3]]))\n src = \"../data/audio/ESC-50-master/audio/\"+t[0]\n dst = \"../data/audio/ESC-10/audio/\"+t[0]\n shutil.copy(src,dst)\n\n with open(\"../data/audio/ESC-10/filelist.txt\",\"w\") as f:\n for m in meta:\n f.write(m+\"\\n\")", "def setup(self):\n\n if self.user is 'Daisy':\n import socket\n host = socket.gethostname()\n\n simName = self.name_prefix[:self.name_prefix.find('_')]\n\n if 'ursa' in host:\n self.raw_sim_dir = '/disk01/rad/sim/' + simName + '/' + self.feedback\n self.caesar_dir = '/disk01/rad/sim/' + simName + '/' + self.feedback + 'Groups/'\n self.redshiftFile = '/home/rad/gizmo-extra/outputs_boxspace50.info'\n self.d_data = '/home/dleung/Downloads/SIGAME_dev/sigame/temp/z' + str(int(self.zCloudy)) + '_data_files/'\n elif 'flatironinstitute.org' or 'worker' in host:\n self.raw_sim_dir = '/mnt/ceph/users/daisyleung/simba/sim/' + simName + '/' + self.feedback # dummy\n self.caesar_dir = '/mnt/ceph/users/daisyleung/simba/sim/' + simName + '/' + self.feedback + 'Groups/'\n self.redshiftFile = '/mnt/ceph/users/daisyleung/simba/gizmo-extra/outputs_boxspace50.info'\n self.d_data = '/mnt/home/daisyleung/Downloads/SIGAME_dev/sigame/temp/z' + str(int(self.zCloudy)) + '_data_files/'\n else:\n raise NotImplementedError", "def save_data(dfin, outfile=\"./FPeng_prepped\"):\n dfin.to_csv(outfile+'.csv', sep='\\t', index=False)\n # s3.meta.client.upload_file(outfile+\".csv\", 'p3-engine', 'ETL/FPeng_prepped.csv')\n print(\"csv...\", end=\" \")\n\n dfin.to_pickle(outfile+'.pkl' ,protocol=4)\n # s3.meta.client.upload_file(outfile+'.pkl', 'p3-engine', 'ETL/FPeng_prepped.pkl')\n print(\"pkl...\", end=\" \")\n #dfin.to_msgpack(outfile+'.msg')\n #print(\"msg...\", end=\" \")\n\n #s3.meta.client.upload_file(outfile+\".msg\", 'p3-engine', 'ETL/FPeng_prepped.msg')\n\n # print(\"to s3 complete\", end=\" \")", "def setUpClass(cls):\n import os\n for root in cls.prod_s2_ssc:\n os.makedirs(root)\n metadata = root.split(\".\")[0] + \".HDR\"\n TestFunctions.touch(metadata)\n for root in cls.prod_s2_mus:\n os.makedirs(root)\n metadata = os.path.join(root, root + \"_MTD_ALL.xml\")\n TestFunctions.touch(metadata)\n for root in cls.prod_s2_nat:\n os.makedirs(root)\n metadata = os.path.join(root, \"MTD_MSIL1C.xml\")\n TestFunctions.touch(metadata)", "def make_master_flats(dc):\n\n\t## Make EXTcheck: is there always the same number of extensions in each file\n\tprint \"Making master flats\"\n\t\n\t## Choose extensions you are using\n\t\n\tfor flat_type in ['FFS']: # Currently FFD is unsupported. If you have FFDs, add them to the list but you must have ONLY FFDs or ONLY FFSs in the dir. Otherwise the first element in the list will get overwritten!\n\t\t#~ print \"\\n\", flat_type, \"\\n\"\n\t\tfor i in dc:\n\t\t\tTRIM, TRIM1, VR, PS, PS1, OS, OS1 = CCD_sections((i[0], i[1]))\n\t\t\tfilelist = []\n\t\t\tfor f in glob.glob(RAW+'*'+flat_type+'*fits'):\n\t\t\t\tccd_conf = []\n\t\t\t\theader0 = fits.getheader(f)\n\t\t\t\theader1 = fits.getheader(f, ext=1)\n\t\t\t\tif header0['OBSMODE']==flat_type:\n\t\t\t\t\tfor KW in ['BINX', 'BINY']:\n\t\t\t\t\t\tccd_conf.append(header0[KW])\n\t\t\t\t\tfor KW in ['NAXIS1', 'NAXIS2']:\n\t\t\t\t\t\tccd_conf.append(header1[KW])\n\t\t\t\t\t\tif tuple(ccd_conf)==i:\n\t\t\t\t\t\t\tfilelist.append(f)\n\t\t\tlfl = len(filelist)\n\t\t\tif lfl > 0:\n\t\t\t\tBIN=CD+'/'+str(i[0])+'x'+str(i[1])+'/'\n\t\t\t\tWD=BIN+str(i[-2])+'x'+str(i[-1])+'/' # Bottom level dir with calibrated and master frames\n\t\t\t\tB=check_exist(WD, 'MF.fits', i)\n\t\t\t\tif B=='n':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\thdul = fits.HDUList()\n\t\t\t\t\thdul.append(fits.ImageHDU())\n\t\t\t\t\t#~ MB = fits.open(WD+'MB.fits')\n\t\t\t\t\tx = np.array(range(0,i[-1]))\n\t\t\t\t\tfor EXT in (extensions):\n\t\t\t\t\t\tprint \"##################################################\"\n\t\t\t\t\t\tprint \"Stacking \"+`lfl`+' '+`i[-2]`+'x'+`i[-1]`+' channel '+`EXT`+' flat frames!'\n\t\t\t\t\t\tif EXT==1:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS1\n\t\t\t\t\t\t\tTR=TRIM1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS\n\t\t\t\t\t\t\tTR=TRIM\n\t\t\t\t\t\tsc = -1 # counts how many flats have mean>limit\n\t\t\t\t\t\tfor n, fn in enumerate(filelist):\n\t\t\t\t\t\t\tprint \"Files left:\",`lfl-n`+'/'+`lfl`\n\t\t\t\t\t\t\tim = fits.getdata(fn, ext=EXT)\n\t\t\t\t\t\t\tmeanval = np.mean(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t#~ maxval = np.max(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\tmaxval = stats.scoreatpercentile(im[VR[0]:VR[1], TR[0]:TR[1]], 90)\n\t\t\t\t\t\t\texptime = fits.getheader(fn)['EXPTIME']\n\t\t\t\t\t\t\t#~ if meanval > 15000. and meanval < 40000. and maxval < 50000. and exptime>5.:\n\t\t\t\t\t\t\tif meanval > 16000. and meanval < 40000. and exptime>=5.:\n\t\t\t\t\t\t\t\tsc+=1\n\t\t\t\t\t\t\t\t#~ im[im<1]=1\n\t\t\t\t\t\t\t\tmscrow, sigmarow = median_row(OSC, PSC, TR, im)\n\t\t\t\t\t\t\t\tsh = np.shape(im)\n\t\t\t\t\t\t\t\tfor y in range(0, sh[0]):\n\t\t\t\t\t\t\t\t\tim[y] = im[y]-mscrow[y]\n\t\t\t\t\t\t\t\tF=im\n\t\t\t\t\t\t\t\tnorm = np.median(F[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t\tF = F/norm #+np.min(F)+0.0001\n\t\t\t\t\t\t\t\tif sc==0:\n\t\t\t\t\t\t\t\t\tstack_arr = F\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tstack_arr = np.dstack((stack_arr, F))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\t\t\tprint \"Rejected\", fn, \"AVG =\", meanval, \"EXPTIME =\", exptime\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\tprint 'Will stack a total of', np.shape(stack_arr)[2], 'flats'\n\t\t\t\t\t\tMF = np.median(stack_arr, axis=2)\n\t\t\t\t\t\thdul.append(fits.ImageHDU(MF))\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS1\", np.shape(MF)[1])\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS2\", np.shape(MF)[0])\n\t\t\t\t\thdul[0].header.set(\"CALIBR\", \"T\")\n\t\t\t\t\thdul[0].header.set(\"INSTRUME\", \"MAIA\")\n\t\t\t\t\thdul[0].header.set(\"BINX\", i[0])\n\t\t\t\t\thdul[0].header.set(\"BINY\", i[1])\n\t\t\t\t\thdul[0].header.set(\"CALMODE\", \"MASTER FLAT\")\n\t\t\t\t\thdul.writeto(WD+\"MF.fits\", clobber=True)\n\t\t\t\t\tprint \"############################################################\"\n\tprint \"Completed master flats\"", "def creation_srcmdl(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,outputfile,emin,emax):\n\tf_liste_sour=\"a.txt\"\n\n\tlect_ca(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,f_liste_sour,name)\n\tXML_EC_PL(name, f_liste_sour, outputfile, emin,emax)\n\tos.system(\"rm -rf a.txt\")", "def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict", "def create_fits_file(fits, cols, cdata):\n dlist = []\n for k in range(0, len(cols)):\n aent = numpy.array(cdata[k])\n dcol = pyfits.Column(name=cols[k], format='F', array=aent)\n dlist.append(dcol)\n\n dcols = pyfits.ColDefs(dlist)\n tbhdu = pyfits.BinTableHDU.from_columns(dcols)\n\n mcf.rm_files(fits)\n tbhdu.writeto(fits)", "def _generate_metadata_kind(filename, targets=None, qa_group=None):\n db = LvfsDatabase(os.environ)\n db_firmware = LvfsDatabaseFirmware(db)\n items = db_firmware.get_items()\n store = appstream.Store('lvfs')\n for item in items:\n\n # filter\n if item.target == 'private':\n continue\n if targets and item.target not in targets:\n continue\n if qa_group and qa_group != item.qa_group:\n continue\n\n # add each component\n for md in item.mds:\n component = appstream.Component()\n component.id = md.cid\n component.kind = 'firmware'\n component.name = md.name\n component.summary = md.summary\n component.description = md.description\n if md.url_homepage:\n component.urls['homepage'] = md.url_homepage\n component.metadata_license = md.metadata_license\n component.project_license = md.project_license\n component.developer_name = md.developer_name\n\n # add provide\n if md.guid:\n prov = appstream.Provide()\n prov.kind = 'firmware-flashed'\n prov.value = md.guid\n component.add_provide(prov)\n\n # add release\n if md.version:\n rel = appstream.Release()\n rel.version = md.version\n rel.description = md.release_description\n if md.release_timestamp:\n rel.timestamp = md.release_timestamp\n rel.checksums = []\n rel.location = 'https://secure-lvfs.rhcloud.com/downloads/' + item.filename\n rel.size_installed = md.release_installed_size\n rel.size_download = md.release_download_size\n component.add_release(rel)\n\n # add container checksum\n if md.checksum_container:\n csum = appstream.Checksum()\n csum.target = 'container'\n csum.value = md.checksum_container\n csum.filename = item.filename\n rel.add_checksum(csum)\n\n # add content checksum\n if md.checksum_contents:\n csum = appstream.Checksum()\n csum.target = 'content'\n csum.value = md.checksum_contents\n csum.filename = md.filename_contents\n rel.add_checksum(csum)\n\n # add component\n store.add(component)\n\n # dump to file\n if not os.path.exists(DOWNLOAD_DIR):\n os.mkdir(DOWNLOAD_DIR)\n filename = os.path.join(DOWNLOAD_DIR, filename)\n store.to_file(filename)\n return filename", "def dsinfomaker(compath, backpath, mwb, tcfs, SR=\"SR\"):#yrs, ves,\r\n\tdsinfo = OrderedDict()\r\n\t# ==========\r\n\tdsinfo[\"GFED\"] = ({\"alias\":\"GFED4.1\",\"long_name\":\"FRI\", \"units\":\"yrs\"})\r\n\tdsinfo[\"MODIS\"] = ({\"alias\":\"MCD64A1\", \"long_name\":\"FRI\",\"units\":\"yrs\", \"version\":\"v006\"})\r\n\tdsinfo[\"esacci\"] = ({\"alias\":\"FireCCI5.1\", \"long_name\":\"FRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"COPERN_BA\"] = ({\"alias\":\"CGLS\", \"long_name\":\"FRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"HANSEN_AFmask\"] = ({\"alias\":\"Hansen GFC & MCD14ML\", \"long_name\":f'FRI$_{{{SR}}}$',\"units\":\"yrs\"})\r\n\tdsinfo[\"HANSEN\"] = ({\"alias\":\"Hansen GFC\", \"long_name\":\"DRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"Risk\"] = ({\"alias\":\"Forest Loss Risk\"})\r\n\t# dsinfo[\"FutureRisk\"] = ({\"alias\":\"Forest Loss Risk\"})\r\n\tdsinfo[\"SRfrac\"] = ({\"alias\":\"Stand Replacing Fire Percentage\", \"long_name\":f'FRI$_{{{\"SR\"}}}$ %'})\r\n\r\n\tfor dsnm in dsinfo:\r\n\t\tif dsnm.startswith(\"H\"):\r\n\t\t\t# +++++ make a path +++++\r\n\t\t\tppath = compath + \"/BurntArea/HANSEN/FRI/\"\r\n\t\t\tfname = \"%s%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, tcfs, mwb)\r\n\t\t\t# fname = \"%s%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\telif dsnm == \"Risk\":\r\n\t\t\tppath = compath + \"/BurntArea/Risk/FRI/\"\r\n\t\t\tfname = \"%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\t\tcf.pymkdir(ppath)\r\n\t\t# elif dsnm == \"FutureRisk\":\r\n\t\t# \tppath = compath + \"/BurntArea/Risk/FRI/\"\r\n\t\t# \tfname = f\"{dsnm}_annual_burns_MW_{mwb}degreeBox_{yrs}yrs_{ves}.nc\" \r\n\t\t# \tcf.pymkdir(ppath)\r\n\t\telse:\r\n\t\t\t# fname = \"Hansen_GFC-2018-v1.6_regrided_esacci_FRI_%ddegMW_SIBERIA\" % (mwb)\r\n\t\t\tppath = compath + \"/BurntArea/%s/FRI/\" % dsnm\r\n\t\t\tfname = \"%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\t# +++++ open the datasets +++++\r\n\t\tdsinfo[dsnm][\"fname\"] = ppath+fname\r\n\r\n\r\n\treturn dsinfo", "def loader(filename,wdm=0,verbose=0,kmpers=1):\n with open(filename, 'rb') as f:\n if wdm == False:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n infoBytes = f.tell()\n if verbose>2:\n print(infoBytes)\n #skip darkmatter\n #read the first dm line\n if verbose>2:\n print(f.tell())\n catd = np.fromfile(f,dtype= dmdtype, count=1) \n #get the bytes location and subtract off the bytes location after loading info to get n bytes a line for dm\n if verbose>2:\n print(f.tell())\n current = f.tell()\n dmBytes = current-infoBytes\n f.seek(dmBytes*(info['nd'][0]-1)+current)\n if verbose>2:\n print(f.tell())\n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n else:\n if verbose>1:\n print(filename)\n #file info\n info= np.fromfile(f,dtype=infodtype,count=1)\n if verbose>2:\n print(f.tell())\n # #dark matter setup count is reading the number of ?rows? \n catd= np.fromfile(f,dmdtype, count=info['nd'][0]) \n if verbose>2:\n print(f.tell()) \n # stars setup \n cats= np.fromfile(f,dtype=stellardtype, count=info['ns'][0])\n if verbose>2:\n print('done')\n \n \n #convert to physical units as found in README.md\n if wdm == True:\n catd['mass']*=2.324876e9\n if kmpers == 1:\n catd['vx']*=100.\n catd['vy']*=100.\n catd['vz']*=100.\n cats['mass']*=2.324876e9\n if kmpers == 1:\n cats['vx']*=100.\n cats['vy']*=100.\n cats['vz']*=100.\n \n if wdm == True:\n return(catd,cats,info)\n else:\n return(cats,info)", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def dctCreateD(pdct, dirName, msgBufSize):\n return _dctmcc.dctCreateD(pdct, dirName, msgBufSize)", "def file(self):\n\n corrdict = (self.cat_corr)['correction']\n specdict = (self.cat_corr)['spec'] \n \n fft_dir = direc('fft', self.cat_corr)\n \n if self.type == 'data': \n galdata = Data(self.type, self.cat_corr, **self.kwargs) # data class \n elif self.type == 'random': \n galdata = Random(self.type, self.cat_corr, **self.kwargs) # data class \n\n self.data_file = galdata.file_name # galaxy data file\n\n # FFT label \n fft_str = 'FFT_'\n if specdict['ell'] != 0: \n fft_str += 'Q_'\n \n fft_corr_str = ''\n if (corrdict['name'].lower() in ('floriansn', 'hectorsn')) & (self.type != 'random'):\n fft_corr_str = ''.join(['.', corrdict['name'].lower()])\n\n # FFTs from data file \n fft_file = ''.join([\n fft_dir, \n fft_str, (self.data_file).rsplit('/')[-1], \n fft_corr_str,\n '.grid', str(specdict['Ngrid']), \n '.P0', str(specdict['P0']), \n '.box', str(specdict['Lbox'])\n ])\n\n return fft_file", "def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def write_catalog(format_list,study):\n\n o_format_list=sorted(format_list)\n\n fn=study.sas_file\n\n with open(fn, 'w') as f:\n f.write(f\"*{'-'*73}*\\n\")\n f.write(f\"| SAS Formats Catalog for: {study.display_name}\\n\")\n f.write(f\"| Path: {fn}\\n\")\n f.write(f\"| Autogenerated by Candid on: {datetime.datetime.now().strftime('%d-%b-%Y')}\\n\")\n f.write(f\"| {'-' * 10} Post-testing modifications {'-' * 10}\\n\")\n f.write(\"| Date User Modification\\n|\\n\")\n f.write(f\"*{'-'*73}*;\\n\")\n f.write(\"%include 'init.sas';\\n\\n\")\n\n # delete existing catalogs\n # f.write(\"proc datasets library=out memtype=catalog;\\n\\tdelete formats;\\nrun;\\n\\n\")\n\n f.write(\"proc format library=out;\\n\")\n\n for i in o_format_list:\n\n # Write the invalue statement\n f.write(Format.write_comment(i))\n\n # Write the invalue statement\n if i.fmt_value==Format_val.Invalue:\n f.write(\"\\tinvalue\")\n if i.fmt_type==Format_type.Numeric:\n f.write(f\" ${i.name + '_' + i.category}\\n\")\n else:\n f.write(f\" {i.name+ '_' + i.category}\\n\")\n\n # Write the value statement\n else:\n if i.fmt_type==Format_type.Char:\n f.write(f\"\\tvalue ${i.name + '_' + i.category}\\n\")\n else:\n f.write(f\"\\tvalue {i.name + '_' + i.category}\\n\")\n\n # Loop for each member of format catalog\n for j in i.listvals:\n # start\n if i.fmt_type==Format_type.Numeric:\n f.write(f\"\\t\\t{remove_cc(j[0])} = \")\n else:\n f.write(f\"\\t\\t\\\"{remove_cc(j[0])}\\\" = \")\n # label\n if i.fmt_value == Format_val.Invalue:\n f.write(f\"{remove_cc(j[1])}\\n\")\n else:\n f.write(f'\"{remove_cc(j[1])}\"\\n')\n f.write(\"\\t\\t;\\n\")\n f.write(\"run;\")\n\n return fn", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def generate_database():\n database = {}\n\n for name in images:\n descriptors = []\n for path in images[name]:\n descriptors.append(ConvertToDescriptor.jpeg_to_descriptors(myPath + path))\n database[name] = person.Person(name, descriptors)\n\n output = open('database.p', 'wb')\n pickle.dump(database, output)\n output.close()", "def main() :\n #fname = '/reg/d/psdm/CXI/cxi35711/hdf5/cxi35711-r0009.h5'\n #dsname = '/Configure:0000/Run:0000/CalibCycle:0000/CsPad::ElementV2/CxiDs1.0:Cspad.0/data'\n #event = 1\n\n fname = '/reg/d/psdm/CXI/cxi37411/hdf5/cxi37411-r0039.h5'\n dsname = '/Configure:0000/Run:0000/CalibCycle:0000/CsPad::ElementV2/CxiDsd.0:Cspad.0/data'\n event = 1\n\n print 'Default CSPad configuration pars:'\n cspadconfig.printCSPadConfigPars()\n\n print '\\nCSPad configuration pars: for fname, dsname, event =\\n', fname, '\\n', dsname, '\\n', event\n cspadconfig.setCSPadConfiguration( fname, dsname, event ) # This will set current CSPad configuration\n cspadconfig.printCSPadConfigPars()", "def convert_to_dicom(file_name):\n\tpath = get_testdata_file(\"CT_small.dcm\")\n\tds = pydicom.dcmread(path)\n\timg = Image.open(file_name+\".bmp\")\n\tnpa = np.asarray(img)\n\tds.PixelData = img.tobytes()\n\tname = update_destination_file_name(file_name)\n\tds.save_as(name+'.dcm')\n\tprint(\"DONE\\t \"+name+\".dcm\")", "def save(self, dest_dir):\n try:\n makedirs(dest_dir)\n except FileExistsError:\n pass\n suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)\n gen_path = path.join(dest_dir, 'gen' + suffix)\n disc_path = path.join(dest_dir, 'disc' + suffix)\n self.discriminator.save(disc_path)\n self.generator.save(gen_path)", "def save(self, dest_dir):\n try:\n makedirs(dest_dir)\n except FileExistsError:\n pass\n suffix = '-%d-%d.h5' % (self.num_gen_steps, self.num_disc_steps)\n gen_path = path.join(dest_dir, 'gen' + suffix)\n disc_path = path.join(dest_dir, 'disc' + suffix)\n self.discriminator.save(disc_path)\n self.generator.save(gen_path)", "def make_ctd_file():\n # Get the correct netCDF4 dataset\n __location__ = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__),\n 'output'))\n nc_file = os.path.join(__location__,'test_BM54.nc')\n\n # Be sure to start with the original, unedited CTD data\n test_ambient.test_from_ctd()\n\n # Load the data into a netCDF file\n nc = Dataset(nc_file, 'a')\n\n return nc", "def prepare_dibco(data_dir=DEFAULT_DATA_DIR,\n out_dir=None,\n force=False):\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n if out_dir is None:\n out_dir = data_dir\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n train_record = 'train.record'\n test_record = 'test.record'\n num_train = 'num_train'\n num_test = 'num_test'\n\n if not (os.path.exists(os.path.join(data_dir, train_record)) and\n os.path.exists(os.path.join(data_dir, test_record)) and\n os.path.exists(os.path.join(data_dir, num_train)) and\n os.path.exists(os.path.join(data_dir, num_test))) or force:\n maybe_download(get_dibco_meta_data(data_dir)['url'], data_dir, force=force)\n\n extracted_dir = os.path.join(data_dir, DATA_EXTRACTED_DIR)\n\n with open(os.path.join(extracted_dir, 'train.txt')) as i_f:\n with open(os.path.join(data_dir, num_train), mode='w') as o_f:\n o_f.write(str(len(i_f.readlines())))\n train_writer = tf.python_io.TFRecordWriter(\n os.path.join(out_dir, train_record))\n for data in get_label_map_dict(\n extracted_dir, os.path.join(extracted_dir, 'train.txt')):\n example = dict_to_example(data)\n train_writer.write(example.SerializeToString())\n train_writer.close()\n\n with open(os.path.join(extracted_dir, 'test.txt')) as i_f:\n with open(os.path.join(data_dir, num_test), mode='w') as o_f:\n o_f.write(str(len(i_f.readlines())))\n val_writer = tf.python_io.TFRecordWriter(os.path.join(out_dir, test_record))\n for data in get_label_map_dict(\n extracted_dir, os.path.join(extracted_dir, 'test.txt')):\n example = dict_to_example(data)\n val_writer.write(example.SerializeToString())\n val_writer.close()\n print()\n\n return get_dibco_meta_data(data_dir)", "def make_one_d(prefile, ffile, opath, moddict):\n import codecs\n\n # List of modules used in input file\n imods = used_mods(prefile)\n\n # Query dictionary for filenames of modules used in fortran file.\n # Remove own file name for circular dependencies if more than one\n # module in fortran file.\n if imods:\n imodfiles = list()\n for d in imods:\n if d in moddict: # otherwise external module such as netcdf\n if moddict[d] != ffile:\n imodfiles.append(moddict[d])\n else:\n imodfiles = []\n\n # Write output .d file\n dfile = f2d(ffile, opath)\n ofile = f2o(ffile, opath)\n df = codecs.open(dfile, 'w', encoding='utf-8')\n print(dfile, ':', ffile, file=df)\n print(ofile, ':', ffile + ' ' + dfile, end='', file=df)\n for im in imodfiles:\n print('', f2o(im, opath), end='', file=df)\n print('', file=df)\n df.close()\n\n return", "def create_samfile(self):", "def create_obj(destination,mtl_name):\r\n\tshutil.copyfile(\"file_cube.obj\",destination)\r\n\tf=open(destination,\"r\")\r\n\tlines=f.readlines()\r\n\tlines[0]=\"mtllib \"+mtl_name+\"\\n\"\r\n\tf.close()\r\n\tf=open(destination,\"w\")\r\n\tf.writelines(lines)\r\n\tf.close()", "def generate_data_set(args):\n if args.o is None:\n filename = \"test\"\n else:\n filename = args.o\n\n if args.b is None:\n args.b = 0.5\n if args.p is None:\n path = Path(\"\")\n else:\n path = Path(args.p)\n if args.nf is None:\n args.nf = 1\n\n for i in range(int(args.nf)):\n args.o = path / (filename + \"_t\"+str(args.t) + \"_k\" + str(args.k) + \"_n\"+str(args.n) + \"_m\"+str(args.m)\n + \"_c\"+str(args.c) + \"_\"+str(i) + \".dzn\")\n create_dnz_file(args)", "def dump_dc():\n network = Network(pickle=True)\n skus = {}\n for i in range(0, len(network.dcs)):\n for j in network.dcs[i].inventory.keys():\n if j not in skus:\n skus[j] = [0, 0, 0, 0, 0]\n skus[j][i] += network.dcs[i].inventory[j]\n else:\n skus[j][i] += network.dcs[i].inventory[j]\n arr = []\n for i in skus.keys():\n arr.append([i,skus[i][0], skus[i][1], skus[i][2], skus[i][3], skus[i][4]])\n with open('dc_inv.csv', 'wb') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(arr)", "def write_scram_toolfile(self, contents, filename):\n with open(self.spec.prefix.etc + '/scram.d/' + filename, 'w') as f:\n f.write(contents)\n f.close()", "def mkdirout():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n #outdir = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n if os.path.exists(output_dir()):\n sys.exit(\"ERROR. Unable to create output directory. %s already exists. Please, make sure you choose an output path not containing former results.\" % output_dir() ) # LOGGING?\n else:\n try:\n os.mkdir(output_dir())\n except OSError:\n sys.exit(\"ERROR. Unable to create output directory %s.\" % output_dir() )\n os.mkdir(output_tmpdir())\n os.mkdir(output_tmpdir(\"pisacov\"))\n os.mkdir(output_tmpdir(\"pisa\"))\n os.mkdir(output_tmpdir(\"deepmetapsicov\"))", "def collectInitialeccnStatistics_onefile(self, folder, databaseFilename, multiplicityFactor = 1.0, deformedNuclei = False):\n typeCollections = ((1, 'sn'), (2,'en'))\n for ecc_id, ecc_type_name in typeCollections:\n db = SqliteDB(path.join(folder, databaseFilename % ecc_type_name))\n # first write the ecc_id_lookup table, makes sure there is only one such table\n if db.createTableIfNotExists(\"ecc_id_lookup\", ((\"ecc_id\",\"integer\"), (\"ecc_type_name\",\"text\"))):\n db.insertIntoTable(\"ecc_id_lookup\", (ecc_id, ecc_type_name))\n\n # next create the eccentricities and collisionParameters table\n db.createTableIfNotExists(\"eccentricities\", ((\"event_id\",\"integer\"), (\"ecc_id\", \"integer\"), (\"n\",\"integer\"), (\"ecc_real\",\"real\"), (\"ecc_imag\",\"real\")))\n db.createTableIfNotExists(\"collisionParameters\", ((\"event_id\",\"integer\"), (\"Npart\", \"integer\"), (\"Ncoll\",\"integer\"), (\"b\",\"real\"), (\"total_entropy\",\"real\")))\n if(deformedNuclei):\n db.createTableIfNotExists(\"deformationParameters\", ((\"event_id\",\"integer\"), (\"cosTheta1\", \"real\"), (\"phi1\",\"real\"), (\"cosTheta2\",\"real\"), (\"phi2\",\"real\")))\n\n # the big loop\n data = loadtxt(path.join(folder, '%s_ecc_eccp_10.dat' %(ecc_type_name)))\n Npart = data[:, 36]\n Ncoll = data[:, 37]\n dSdy = data[:, 38]/multiplicityFactor #scale out the multiplicity factor used in superMC\n b = data[:, 39]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"collisionParameters\", (event_id, int(Npart[event_id]), int(Ncoll[event_id]), float(b[event_id]), float(dSdy[event_id])))\n if(deformedNuclei):\n cosTheta1 = data[:, 40]\n phi1 = data[:, 41]\n cosTheta2 = data[:, 42]\n phi2 = data[:, 43]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"deformationParameters\", (event_id, float(cosTheta1[event_id]), float(phi1[event_id]), float(cosTheta2[event_id]), float(phi2[event_id])))\n for iorder in range(1,10):\n eccReal = data[:, 4*iorder - 2]\n eccImag = data[:, 4*iorder - 1]\n for event_id in range(len(eccReal)):\n db.insertIntoTable(\"eccentricities\",(event_id, ecc_id, iorder, float(eccReal[event_id]), float(eccImag[event_id])))\n\n # close connection to commit changes\n db.closeConnection()", "def create_mock_files(self,\n project_id=\"DEV-test\",\n count=3,\n prefix=\"mock_data_file\",\n file_format=\"dcm\",\n outdir=\".\",\n msg = \"This is a mock data file for testing purposes. Delete me!\",\n write_tsv = True\n ):\n prog,proj = project_id.split(\"-\")\n authz = [\"/programs/{}/projects/{}\".format(prog,proj)]\n acl = [prog,proj]\n\n mfiles = {'file_name':[],'md5sum':[],\"file_size\":[],\"object_id\":[],\"storage_urls\":[],\"acl\":[],\"authz\":[]}\n for i in range(count):\n file_name = \"{}_{}.{}\".format(prefix,i+1,file_format)\n object_id = str(uuid.uuid4())\n mfiles['file_name'].append(file_name)\n mfiles['object_id'].append(object_id)\n mfiles['authz'].append(authz)\n mfiles['acl'].append(acl)\n\n\n output = \"{}/{}\".format(outdir,file_name)\n os.system(\"touch {}\".format(output))\n file_msg =\"{} File {} of {}. {} with object_id {}.\".format(msg,i+1,count,file_name,object_id)\n cmd = 'echo \"{}\" > {}'.format(file_msg,file_name)\n os.system(cmd)\n\n with open(output, 'rb') as file_to_check:\n file_contents = file_to_check.read()\n #cmd = \"!md5 mock_data_file_{}.{}\".format(i+1,file_format))\n md5 = hashlib.md5(file_contents).hexdigest() #check in shell: !md5 mock_data_file_3.dcm\n\n mfiles['md5sum'].append(md5)\n mfiles['file_size'].append(os.stat(output).st_size)\n urls=\"s3://this-is-a-fake-url-for:{}\".format(file_name)\n mfiles['storage_urls'].append([urls])\n\n return mfiles", "def write_all_patients():\n\n data_dir = sys.argv[1]\n output_dir = sys.argv[2]\n\n imgs, i_msks, o_msks = load_all_patients(data_dir=data_dir)\n\n for idx, array in enumerate(imgs):\n np.save(output_dir+'/img_'+str(idx), array)\n for idx, array in enumerate(i_msks):\n np.save(output_dir+'/i_msk_'+str(idx), array)\n for idx, array in enumerate(o_msks):\n np.save(output_dir + '/o_msk_' + str(idx), array)\n\n return None", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def write_all_data_tables( phasename, eos_prop_d, output_d ):\n\n dataio.write_data_table( 'temperature_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'Tmesh_a')),\n ('GPa', 'eV', 1), output_d )\n\n dataio.write_data_table( 'density_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'rhomesh_a')),\n ('GPa', 'eV','g_cc'), output_d )\n dataio.write_data_table( 'heat_capacity_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'Cpmesh_a')),\n ('GPa','eV','eV'), output_d )\n\n dataio.write_data_table( 'thermal_exp_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'alphamesh_a')),\n ('GPa','eV',1), output_d )\n\n dataio.write_data_table( 'adiabat_temp_grad_' + phasename + '.dat',\n (eos_prop_d[key] for key in\n ('Pmesh_a', 'Smesh_a', 'dTdP_Smesh_a')),\n ('GPa','eV','GPa-1'), output_d )\n pass", "def create_normalization_file(use_controls, use_nofcd, mods):\n\n for j in range(1, mods + 1):\n fcd_paths = sorted(glob.glob(FCD_FOLDER + f'fcd_*.{j}.nii.gz'))\n nofcd_paths = sorted(glob.glob(FCD_FOLDER + f'nofcd_*.{j}.nii.gz'))\n control_paths = sorted(glob.glob(CONTROL_FOLDER + f'control_*.{j}.nii.gz'))\n\n mri_paths = fcd_paths\n if use_nofcd:\n mri_paths += nofcd_paths\n if use_controls:\n mri_paths += control_paths\n\n t1_landmarks_path = Path(f'./data/t1_landmarks_{j}.npy')\n\n if t1_landmarks_path.is_file():\n continue\n # os.remove(f'./data/t1_landmarks_{j}.npy')\n\n t1_landmarks = (\n t1_landmarks_path\n if t1_landmarks_path.is_file()\n else HistogramStandardization.train(mri_paths)\n )\n\n np.save(t1_landmarks_path, t1_landmarks, allow_pickle=True)", "def generate_mos(laygen, objectname_pfix, placement_grid, routing_grid_m1m2, devname_mos_boundary, devname_mos_body,\n devname_mos_dmy, m=1, m_dmy=0, origin=np.array([0,0])):\n pg = placement_grid\n rg12 = routing_grid_m1m2\n pfix = objectname_pfix\n\n # placement\n imbl0 = laygen.relplace(name=\"I\" + pfix + 'BL0', templatename=devname_mos_boundary, gridname=pg, xy=origin)\n refi=imbl0\n if not m_dmy==0:\n imdmyl0 = laygen.relplace(name=\"I\" + pfix + 'DMYL0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyl0\n else:\n imdmyl0 = None\n im0 = laygen.relplace(name=\"I\" + pfix + '0', templatename=devname_mos_body, gridname=pg, refobj=refi, shape=[m, 1])\n refi=im0\n if not m_dmy==0:\n imdmyr0 = laygen.relplace(name=\"I\" + pfix + 'DMYR0', templatename=devname_mos_dmy, gridname=pg, refobj=refi, shape=[m_dmy, 1])\n refi=imdmyr0\n else:\n imdmyr0 = None\n imbr0 = laygen.relplace(name=\"I\" + pfix + 'BR0', templatename=devname_mos_boundary, gridname=pg, refobj=imdmyr0)\n md=im0.elements[:, 0]\n #route\n #gate\n rg0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['G0'], refobj1=md[-1].pins['G0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['G0'], gridname=rg12)\n #drain\n rdl0=laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=md[0].pins['D0'], refobj1=md[-1].pins['D0'])\n for _md in md:\n laygen.via(name=None, xy=[0, 1], refobj=_md.pins['D0'], gridname=rg12)\n #source\n rs0=laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=md[0].pins['S0'], refobj1=md[-1].pins['S1'])\n for _md in md:\n laygen.via(name=None, xy=[0, 0], refobj=_md.pins['S0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=md[-1].pins['S1'], gridname=rg12)\n #dmy\n if m_dmy>=2:\n mdmyl=imdmyl0.elements[:, 0]\n mdmyr=imdmyr0.elements[:, 0]\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyl[0].pins['D0'], refobj1=mdmyl[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 1], xy1=[0, 1], gridname0=rg12, refobj0=mdmyr[0].pins['D0'], refobj1=mdmyr[-1].pins['D0'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyl[0].pins['S0'], refobj1=mdmyl[-1].pins['S1'])\n laygen.route(name=None, xy0=[0, 0], xy1=[0, 0], gridname0=rg12, refobj0=mdmyr[0].pins['S0'], refobj1=mdmyr[-1].pins['S1'])\n for _mdmyl in mdmyl:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyl.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyl.pins['S0'], gridname=rg12)\n for _mdmyr in mdmyr:\n laygen.via(name=None, xy=[0, 1], refobj=_mdmyr.pins['D0'], gridname=rg12)\n laygen.via(name=None, xy=[0, 0], refobj=_mdmyr.pins['S1'], gridname=rg12)\n return [imbl0, imdmyl0, im0, imdmyr0, imbr0]", "def write_mp_cards(bc_file, bc_class):\n mc = bc_class.model_constants\n bc_file.write('! Global Material Properties\\n')\n bc_file.write('MP MU {}\\n'.format(mc.kinematic_viscosity))\n bc_file.write('MP G {}\\n'.format(mc.gravity))\n bc_file.write('MP RHO {}\\n'.format(mc.density))\n if mc.enable_wetting_drying:\n bc_file.write('MP DTL {}\\n'.format(mc.wet_dry_limit))\n bc_file.write('MP MUC {}\\n'.format(mc.mannings_unit_constant))\n bc_file.write('\\n')\n\n bc_file.write('! Material Properties\\n')\n for id, mat_prop in bc_class.material_properties.items():\n if mat_prop.eddy_viscosity_method == 'Constant (EVS)':\n bc_file.write('MP EVS {} {} {} {}\\n'.format(id, mat_prop.vxx_eddy_viscosity, mat_prop.vyy_eddy_viscosity,\n mat_prop.vxy_eddy_viscosity))\n\n elif mat_prop.eddy_viscosity_method == 'Estimated (EEV)':\n objects = list(mat_prop.param.estimated_eddy_viscosity_method.get_range())\n bc_file.write('MP EEV {} {} {}\\n'.format(id, mat_prop.estimated_eddy_viscosity_weighting_factor,\n mat_prop.estimated_eddy_viscosity_method))\n if mat_prop.coriolis:\n bc_file.write('MP COR {} {}\\n'.format(id, mat_prop.coriolis_latitude))\n bc_file.write('MP SRT {} {}\\n'.format(id, mat_prop.refinement_tolerance))\n bc_file.write('MP ML {} {}\\n'.format(id, mat_prop.max_refinement_level))\n if bc_class.operation_parameters.transport != 0:\n for id1, tran_prop in mat_prop.transport_properties.items():\n bc_file.write('MP TRT {} {} {}\\n'.format(id, id1, tran_prop.refinement_tolerance))\n if tran_prop.refinement_tolerance > 0 and mat_prop.eddy_viscosity_method == 'Constant (EVS)':\n bc_file.write('MP DF {} {} {}\\n'.format(id, id1, tran_prop.turbulent_diffusion_rate))\n if bc_class.operation_parameters.wind:\n wnd = mat_prop.wind_properties\n bc_file.write('MP WND STR {} {}\\n'.format(id, wnd.stress_formulation))\n bc_file.write('MP WND ATT {} {}\\n'.format(id, wnd.attenuation))\n\n bc_file.write('\\n') # blank line at the end of the Material Properties", "def sc2depictions(chromosome, root_name=\"output\", lot=0):\n mol_structure = sc2mol_structure(chromosome, lot=lot)\n mol2pdb(mol_structure, \"{0}.pdb\".format(root_name))\n mol2xyz(mol_structure, \"{0}.xyz\".format(root_name))\n Draw.MolToFile(mol_structure, \"{0}.png\".format(root_name))\n logger.info(\"Generated depictions with root name {0}\".format(root_name))" ]
[ "0.637841", "0.6184567", "0.6101035", "0.6063938", "0.59966964", "0.5898186", "0.5831032", "0.5792886", "0.56806254", "0.5673659", "0.5642974", "0.56186104", "0.5612226", "0.5589771", "0.55712795", "0.55688566", "0.55426204", "0.5509481", "0.5476108", "0.5453811", "0.5441293", "0.54330695", "0.5431424", "0.5414131", "0.5385509", "0.53829294", "0.5375447", "0.5328336", "0.53228354", "0.5321947", "0.5318898", "0.53159505", "0.5315505", "0.5307181", "0.53003657", "0.5295449", "0.5292908", "0.52909136", "0.5288011", "0.5272096", "0.5265378", "0.52650017", "0.5255084", "0.52498764", "0.52448916", "0.52421767", "0.52316004", "0.5228", "0.52202857", "0.5217112", "0.52134454", "0.5209725", "0.52059", "0.519917", "0.51843125", "0.51818025", "0.51816076", "0.5180873", "0.5163552", "0.5162943", "0.51577324", "0.51572305", "0.5152732", "0.51491064", "0.5147658", "0.5146195", "0.51417404", "0.5140839", "0.5134263", "0.5131696", "0.5131328", "0.51250607", "0.5122811", "0.5118456", "0.5117493", "0.51082325", "0.510054", "0.5098085", "0.50858533", "0.50854087", "0.5084667", "0.5084667", "0.5081934", "0.5075706", "0.5075606", "0.5072391", "0.50701183", "0.50694525", "0.5066464", "0.5064185", "0.5063714", "0.5058079", "0.50527436", "0.5052075", "0.50463414", "0.50453717", "0.5038951", "0.50357836", "0.50346065", "0.50335747" ]
0.7661663
0
write compressed meds file
def write(self, filename): assert filename[-3:]=='.fz','name must end in .fz' files.makedir_fromfile(filename) ucfilename=filename[0:-3] bname = os.path.basename(ucfilename) tmp_path = os.path.join( files.get_temp_dir(), bname, ) files.makedir_fromfile(tmp_path) with TempFile(tmp_path) as tfile: super(CosmosMEDSMaker,self).write(tfile.path) self._compress_meds_file(tfile.path, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_compressed(self, filename):\n\n # Define which molecules to use \n # (counting indices of processed data set)\n indices = np.arange(len(self))\n # All charges and position arrays have the same size\n # (the one of the biggest molecule)\n size = np.max( self.num_atoms )\n # Initialize arrays\n num_atoms = np.zeros(len(indices))\n labels = np.zeros(len(indices))\n charges = np.zeros([len(indices),size])\n positions = np.zeros([len(indices),size,3])\n # For each molecule ...\n for j,idx in enumerate(indices):\n # load the data\n sample = self[idx]\n # assign per-molecule data\n labels[j] = sample['data']\n num_atoms[j] = sample['num_atoms']\n # ... and for each atom:\n for ia in range(sample['num_atoms']):\n charges[j,ia] = sample['charges'][ia]\n positions[j,ia,0] = sample['positions'][ia][0] \n positions[j,ia,1] = sample['positions'][ia][1] \n positions[j,ia,2] = sample['positions'][ia][2]\n\n # Merge pairs\n print(labels.shape,charges.shape,positions.shape)\n labels = labels[0::2]\n charges = np.array([np.concatenate((charges[i],charges[i+1])) for i in indices[0::2]])\n positions = np.array([np.concatenate((positions[i],positions[i+1])) for i in indices[0::2]])\n print(labels.shape,charges.shape,positions.shape)\n \n # Create a dictionary with all the values to save\n save_dict = {}\n save_dict['label'] = labels\n save_dict['charges'] = charges\n save_dict['positions'] = positions\n\n # Save as a compressed array \n np.savez_compressed(filename,**save_dict)\n \n return", "def _compress_meds_file(self, ucfilename, fzfilename):\n from os.path import basename\n\n tup=(basename(ucfilename),basename(fzfilename))\n print('compressing file: %s -> %s' % tup)\n tpath=files.expandpath(fzfilename)\n if os.path.exists(tpath):\n os.remove(tpath)\n\n tmpdir = os.path.dirname(ucfilename)\n with StagedOutFile(fzfilename,tmpdir=tmpdir) as sf:\n cmd = self['fpack_command']\n cmd = cmd.format(fname=ucfilename)\n ret=os.system(cmd)\n\n if ret != 0:\n raise RuntimeError(\"failed to compress file\")\n\n print('output is in:',fzfilename)", "def save_compressed(data, filename, compression_type='bz2', create_link=False):\n # write to compressed HDF5 file\n hdf5 = open_compressed(filename, 'w')\n save(data, hdf5)\n close_compressed(filename, hdf5, compression_type, create_link)", "def write_chunk(self, outfile, tag, data):\n outfile.write(struct.pack(\"!i\", len(data)))\n outfile.write(tag)\n outfile.write(data)\n checksum = zlib.crc32(tag)\n checksum = zlib.crc32(data, checksum)\n outfile.write(struct.pack(\"!i\", checksum))", "def save_to_gzip(data,fname):\n with gzip.open(fname + '.gz', 'wb',compresslevel = 9) as f:\n f.write(data.tobytes())", "def saveToFile(fileName):\n outfile = open (fileName, \"w\")\n chunkInfoKeys = gChunkMap.keys()\n chunkInfoKeys.sort()\n\n for chunkInfo in chunkInfoKeys:\n c = gChunkMap[chunkInfo]\n outfile.write(c.printChunkInfo())\n outfile.write(\"\\n\");", "def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)", "def write_metadata(zip, datapack):\n zip.writestr('pack.mcmeta', json.dumps({'pack':{'pack_format':1, 'description':datapack.desc}}, indent=4))\n zip.writestr('data/minecraft/tags/functions/load.json', json.dumps({'values':['{}:reset'.format(datapack.name)]}))\n zip.writestr('data/{}/functions/reset.mcfunction'.format(datapack.name), 'tellraw @a [\"\",{\"text\":\"Loot table randomizer by SethBling, modified by Joelius300\",\"color\":\"green\"}]')", "def _write_dx(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n if FN.endswith('.dx'):\n F = open(FN, 'w')\n else:\n import gzip\n F = gzip.open(FN, 'w')\n\n F.write(\"\"\"object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}\norigin {1[0]} {1[1]} {1[2]}\ndelta {2[0]} 0.0 0.0\ndelta 0.0 {2[1]} 0.0\ndelta 0.0 0.0 {2[2]}\nobject 2 class gridconnections counts {0[0]} {0[1]} {0[2]}\nobject 3 class array type double rank 0 items {3} data follows\n\"\"\".format(data['counts'], data['origin'], data['spacing'], n_points))\n\n for start_n in range(0, len(data['vals']), 3):\n F.write(' '.join(['%6e' % c\n for c in data['vals'][start_n:start_n + 3]]) + '\\n')\n\n F.write('object 4 class field\\n')\n F.write('component \"positions\" value 1\\n')\n F.write('component \"connections\" value 2\\n')\n F.write('component \"data\" value 3\\n')\n F.close()", "def depth_write(filename, depth):\n height,width = depth.shape[:2]\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n np.array(width).astype(np.int32).tofile(f)\n np.array(height).astype(np.int32).tofile(f)\n \n depth.astype(np.float32).tofile(f)\n f.close()", "def write(obj, fpath, flag=jpegio.DECOMPRESSED):\r\n if flag is jpegio.DECOMPRESSED:\r\n obj.write(fpath)\r\n elif flag == jpegio.ZIGZAG_DCT_1D:\r\n raise ValueError(\"ZIGZAG_DCT_1D: not supported yet\")\r\n\r\n return obj", "def write(self, cull=False):\n if cull:\n cull_prefixes(self).write()\n else:\n ser = self.g.serialize(format='nifttl', encoding='utf-8')\n with open(self.filename, 'wb') as f:\n f.write(ser)\n #print('yes we wrote the first version...', self.name)", "def writexyz(self,fname):\n xyzfile = open(fname + \".xyz\",\"a+\")\n xyzfile.write(str(self.natoms) + \"\\n\\n\")\n for a in self.atoms:\n \tcxyz = a.xyz - np.array(self.pbc_correction(a.xyz))\n\t\t\txyzfile.write(str(a.type) + \"\\t\" + str(cxyz[0]) + \"\\t\" + str(cxyz[1]) + \"\\t\" + str(cxyz[2]) + \"\\n\")\n xyzfile.close()", "def saveenergyfile(path, meta, data):\n def serializemeta(meta):\n \"\"\"Convert metadata object to list of comment strings\"\"\"\n return [u\"#CTE_%s: %s\" % (key, meta[key]) for key in meta]\n\n with io.open(path, 'w+') as ff:\n ff.write(u\"\\n\".join(serializemeta(meta)))\n ff.write(u\"\\nvector,tipo,src_dst\\n\")\n for c in data:\n carrier = c['carrier']\n ctype = c['ctype']\n originoruse = c['originoruse']\n values = u\", \".join(u\"%.2f\" % v for v in c['values'])\n comment = u\" # %s\" % c['comment'] if c['comment'] else u\"\"\n ff.write(u\"%s, %s, %s, %s%s\\n\" % (carrier, ctype, originoruse, values, comment))", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def save_file(map_, args): \n if args.segments:\n p = os.path.join(args.res_dir, 'compression_'+args.db+\"_seg\")\n else:\n p = os.path.join(args.res_dir, 'compression_'+args.db)\n with open(p, 'w') as f:\n for file in map_:\n f.write(\"{} {}\\n\".format(file, map_[file]))", "def savemat(self, file_name):\n d = {'dG0_prime': self.dG0_prime,\n 'dG0': self.dG0,\n 'T': self.T,\n 'I': self.I,\n 'pH': self.pH,\n 'pMg': self.pMg,\n 'weight': self.weight,\n 'cids': self.cids,\n 'S': self.S.values}\n savemat(file_name, d, oned_as='row')", "def save(self, fname, compression='blosc'):\n\n bo = {\n 'data': self.data.values,\n 'locs': self.locs,\n 'sessions': self.sessions,\n 'sample_rate': self.sample_rate,\n 'kurtosis': self.kurtosis,\n 'kurtosis_threshold' : self.kurtosis_threshold,\n 'meta': self.meta,\n 'date_created': self.date_created,\n 'minimum_voxel_size': self.minimum_voxel_size,\n 'maximum_voxel_size': self.maximum_voxel_size,\n 'label' : self.label,\n 'filter' : self.filter,\n }\n\n if fname[-3:] != '.bo':\n fname += '.bo'\n\n dd.io.save(fname, bo, compression=compression)", "def write_uncompressed_skims(skims, directory, overwrite=False):\n os.makedirs(directory, exist_ok=True)\n for k in skims:\n filename = os.path.join(directory, f\"{k}.emx\")\n if not os.path.exists(filename) or overwrite:\n skims[k].values.tofile(filename)", "def writepdb(self,fname):\n pdbfile = open(fname + \".pdb\", \"w\")\n for a in self.atoms:\n pdbfile.write(str(a.type) + \"\\t\" + str(a.x) + \"\\t\" + str(a.y) + \"\\t\" + str(a.z) + \"\\n\")\n pdbfile.close()", "def dict2file(dict, filename, foldername):\n if foldername:\n if not os.path.exists(\"../Created_QD/\" + foldername):\n os.makedirs(\"../Created_QD/\" + foldername)\n file = open(\"../Created_QD/\" + foldername + \"/\" + filename + \".xyz\", \"w\")\n else:\n file = open(\"../Created_QD/\" + filename + \".xyz\", \"w\")\n file.write(\" \\n\\n\")\n for atom, values in dict.items():\n file.write(values['element'] + \"\\t\" + str(values['coor'][0]) + \"\\t\\t\" +\n str(values['coor'][1]) + \"\\t\\t\" + str(values['coor'][2]) + \"\\n\")\n file.seek(0)\n file.write(str(len(dict)))\n file.close()\n print(\"\\nQuantum Dot created :)\")", "def _saveBinaryData_compressed(self, file, with_axis=None):\n if with_axis is not None:\n data = self._data_with_axis(with_axis)\n numpy.save_compressed(file, data=data)\n else:\n numpy.savez_compressed(file, data=self.data)", "def write (self, file):\n\t\tfile.write (self.pack ())", "def save_compression():\n added,comp_seq, binary_seq, seq, file = binary_to_utf8()\n huffman_code, binary_seq = huffman_construction(seq)\n \n #save the number of zeroes added to the dictionnary \n huffman_code[\"add\"]= added\n created_file = os.path.splitext(file)[0]\n file_comp = open(created_file + \"_compressed.txt\", \"w\") \n \n #save the dictionnary in the file and the compressed sequence\n json.dump(huffman_code, file_comp)\n file_comp.write(\"\\n\"+comp_seq) \n file_comp.close()\n \n messagebox.showinfo(\"Information\", \"Your compression has been saved in \"+created_file +\"_compressed.txt file.\")\n return comp_seq, binary_seq, seq", "def write_multfile(image_coords, source_z, file_name = 'multfile.in'):\n print 'write_multfile'\n file_in = open(file_name, 'w')\n file_in.write('#REFERENCE 3 0.0 0.0\\n')\n\n for i in range(len(image_coords)):\n image_id = 'A' + str(i+1) + ' '\n data = str(image_coords[i][0]) + ' ' + str(image_coords[i][1]) \\\n + str(' 0.2 0.2 0 ') + str(source_z) + ' 0'\n final = image_id + data + '\\n'\n file_in.write(final)\n file_in.close()", "def compressed_pickle(title, data):\n with bz2.BZ2File(title, 'w') as f:\n cPickle.dump(data, f)", "def writeNMD(filename, modes, atoms, zeros=False):\n\n if not isinstance(modes, (NMA, ModeSet, Mode, Vector)):\n raise TypeError('modes must be NMA, ModeSet, Mode, or Vector, '\n 'not {0}'.format(type(modes)))\n if modes.numAtoms() != atoms.numAtoms():\n raise Exception('number of atoms do not match')\n out = openFile(addext(filename, '.nmd'), 'w')\n\n #out.write('#!{0} -e\\n'.format(VMDPATH))\n out.write('nmwiz_load {0}\\n'.format(abspath(filename)))\n name = modes.getTitle()\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = str(atoms)\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = splitext(split(filename)[1])[0]\n out.write('name {0}\\n'.format(name))\n try:\n coords = atoms.getCoords()\n except:\n raise ValueError('coordinates could not be retrieved from atoms')\n if coords is None:\n raise ValueError('atom coordinates are not set')\n\n try:\n data = atoms.getNames()\n if data is not None:\n out.write('atomnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnames()\n if data is not None:\n out.write('resnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnums()\n if data is not None:\n out.write('resids ')\n data.tofile(out, ' ')\n out.write('\\n')\n except:\n pass\n try:\n data = atoms.getChids()\n if data is not None:\n out.write('chainids {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getSegnames()\n if data is not None:\n out.write('segnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n\n try:\n data = atoms.getBetas()\n if data is not None:\n out.write('bfactors ')\n data.tofile(out, ' ', '%.2f')\n out.write('\\n')\n except:\n pass\n\n format = '{0:.3f}'.format\n out.write('coordinates ')\n coords.tofile(out, ' ', '%.3f')\n out.write('\\n')\n count = 0\n if isinstance(modes, Vector):\n out.write('mode 1 {0:.2f} '.format(abs(modes)))\n modes.getNormed()._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n else:\n if isinstance(modes, Mode):\n modes = [modes]\n for mode in modes:\n if (mode.getEigval() < ZERO) and not zeros:\n continue\n elif (mode.getEigval() < ZERO) and zeros:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, np.sqrt(1/(0.0001*(mode.getIndex()+1)))))\n else:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, mode.getVariance()**0.5))\n arr = mode._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n if count == 0:\n LOGGER.warning('No normal mode data was written. '\n 'Given modes might have 0 eigenvalues.')\n out.close()\n return filename", "def writeChunk(chunk):", "def writeFFDFile(fileName, nBlocks, nx, ny, nz, points):\n\n f = open(fileName, \"w\")\n\n f.write(\"%d\\n\" % nBlocks)\n for i in range(nBlocks):\n f.write(\"%d %d %d \" % (nx[i], ny[i], nz[i]))\n # end\n f.write(\"\\n\")\n for block in range(nBlocks):\n for k in range(nz[block]):\n for j in range(ny[block]):\n for i in range(nx[block]):\n f.write(\"%f \" % points[block][i, j, k, 0])\n # end\n # end\n # end\n f.write(\"\\n\")\n\n for k in range(nz[block]):\n for j in range(ny[block]):\n for i in range(nx[block]):\n f.write(\"%f \" % points[block][i, j, k, 1])\n # end\n # end\n # end\n f.write(\"\\n\")\n\n for k in range(nz[block]):\n for j in range(ny[block]):\n for i in range(nx[block]):\n f.write(\"%f \" % points[block][i, j, k, 2])\n # end\n # end\n # end\n # end\n f.close()\n return", "def write_data():", "def Construct3DMolToFile(fileName,writeFile):\r\n # Writing sets of molecules\r\n \r\n\r\n w = Chem.SDWriter(writeFile)\r\n suppl = Chem.SDMolSupplier(fileName)\r\n mols = [x for x in suppl]\r\n for mol in mols:\r\n \t# print(mol.GetProp(\"Solvent\"))\r\n \t# print(mol.GetPropNames)\r\n \tsignal.signal(signal.SIGALRM, handler)\r\n \tsignal.alarm(100)\r\n \ttry:\r\n \t\tmol3d = GetMolFromMol(mol,dimension=3)\r\n \t\tw.write(mol3d)\r\n \texcept Exception:\r\n \t\tmol3d = mol\r\n \t\tw.write(mol3d)\r\n \t\t# print(mol.GetPropsAsDict())\r\n\r\n\r\n w.close()", "def write(self, filename):\n f = open(filename, 'w')\n f.write(str(self.m) + \"\\n\")\n f.write(str(self.n) + \"\\n\")\n for i in self.values:\n for j in i:\n f.write(str(j)+\"\\n\")\n f.closed", "def dumpData(self,out):\n #--Header\n out.packSub('MAPH','ii',512,9)\n #--Data\n out.pack('4si','MAPD',512*512*3)\n out.write(''.join(self.mapd))", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def save_decompression():\n dna_seq, bin_seq, comp_seq, file_comp = binary_to_seq()\n \n #create a new file containing the original sequence\n file_path = os.path.splitext(file_comp)[0]\n file = open(file_path+ \"_decompressed.txt\", \"w\")\n file.write(dna_seq)\n file.close()\n \n #show a message for saving\n messagebox.showinfo(\"Information\", \"Your decompression has been saved in \"\n +file_path+\"_decompressed.txt.\")\n \n #print(comp_seq, dna_seq, bin_seq)\n return comp_seq, dna_seq, bin_seq", "def _write(self, out_file):\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write((' '*18).encode()) # pad bytes\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write((' '*37).encode()) # pad bytes\n out_file.write('{:1d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n for node in self.nodes:\n if self.format < 2:\n out_file.write(' '.encode())\n out_file.write('-1'.encode())\n if self.format == 0:\n out_file.write('{:5d}'.format(node.number).encode())\n else:\n out_file.write('{:10d}'.format(node.number).encode())\n for i in range(3):\n out_file.write('{:12.5E}'.format(node.pos[i]).encode())\n out_file.write('\\n'.encode())\n else:\n out_file.write(struct.pack('i', node.number))\n if self.format == 2:\n out_file.write(struct.pack('fff', *node.pos))\n else:\n out_file.write(struct.pack('ddd', *node.pos))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def cam_write(filename, M, N):\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n M.astype('float64').tofile(f)\n N.astype('float64').tofile(f)\n f.close()", "def write_features_to_file(filename,locs,desc):\n savetxt(filename, hstack((locs, desc)))", "def _newfile(counter):\n name = '%s/sitemap-%s.xml.gz' % (settings.SITEMAPS_DIR,\n counter)\n fp = gzip.open(name, 'wb')\n fp.write(\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<urlset xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\">\\n\"\"\")\n return fp", "def writeMCToGR3File(filename, mc):\n nodes = np.vstack((mc.x, mc.y)).T\n nodalValues = mc.data[:, 0, 0].squeeze()[:, None]\n connectivity = mc.connectivity\n openBndNodes = []\n landBndNodes = []\n writeGR3File(filename, '', nodes, nodalValues, connectivity, mc.boundaries)", "def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...", "def writeOutFileUMIs(barcode_dict, outFileName):\n with gzip.open(outFileName, 'wb') as out_file:\n \tfor barcode in barcode_dict:\n out_file.write(barcode)\n out_file.write(\"\\t\" + \"\\t\".join(barcode_dict[barcode]))\n out_file.write(\"\\n\")", "def to_file(self, file_path, smirnoff_data):\n pass", "def save_mb_obj(out_file, metaballs, vertices, vcolors=[], vnormals=[]):\n if out_file.endswith(\".gz\"):\n f_out = gzip.open(out_file, 'wt')\n else:\n f_out = open(out_file, 'w')\n\n f_out.write(\"####\\n\")\n f_out.write(\"#\\n\")\n f_out.write(\"# Metaballs: {}\\n\".format(len(metaballs.mbs)))\n f_out.write(\"#\\n\")\n f_out.write(\"# mth {}\\n\".format(str(metaballs.mth)))\n for mb in metaballs.mbs:\n mbstr = \"# \" + str(mb) + \"\\n\"\n f_out.write(mbstr)\n f_out.write(\"#\\n\")\n\n f_out.write(\"####\\n\")\n f_out.write(\"#\\n\")\n f_out.write(\"# Vertices: {}\\n\".format(len(vertices)))\n f_out.write(\"#\\n\")\n f_out.write(\"####\\n\")\n\n for vi, v in enumerate(vertices):\n vstr = \"v {} {} {}\".format(v[0], v[1], v[2])\n if len(vcolors) > 0:\n vc = vcolors[vi]\n vstr += \" {} {} {}\".format(vc[0], vc[1], vc[2])\n vstr += \"\\n\"\n f_out.write(vstr)\n\n f_out.write(\"# {} vertices\\n\\n\".format(len(vertices)))\n\n if len(vnormals) > 0:\n for vn in vnormals:\n vnstr = \"vn {} {} {}\\n\".format(vn[0], vn[1], vn[2])\n f_out.write(vnstr)\n\n f_out.write(\"# {} normals\\n\\n\".format(len(vnormals)))\n\n f_out.write(\"# End of File\")\n f_out.close()\n\n return True", "def save(self, filename, compression=True, transpose=False,\n sparse=False, support=False, compression_opts=1):\n write_ga_file(filename, self.value, self[0].layout.metric, self[0].layout.basis_names,\n compression=compression, transpose=transpose,\n sparse=sparse, support=support, compression_opts=compression_opts)", "def write_zfile(file_handle, data, compress=1):\r\n file_handle.write(_ZFILE_PREFIX)\r\n length = hex(len(data))\r\n if sys.version_info[0] < 3 and type(length) is long:\r\n # We need to remove the trailing 'L' in the hex representation\r\n length = length[:-1]\r\n # Store the length of the data\r\n file_handle.write(asbytes(length.ljust(_MAX_LEN)))\r\n file_handle.write(zlib.compress(asbytes(data), compress))", "def write_density(fname, density):\n K, M, N = density.shape\n output = open(fname, \"w\")\n output.write(\"ARMA_CUB_TXT_FN008\\n\")\n output.write(\"%d %d %d\\n\" % (K, M, N))\n for i in range(N):\n for k in range(K):\n for m in range(M):\n output.write(\" %+.6e\" % density[k, m, i])\n output.write(\"\\n\")\n\n output.close()", "def save_geneset_to_file(geneset, output_file):\n with gzip.open(output_file, \"w\") as handle:\n handle.write(\"\\n\".join(geneset).encode(\"utf-8\"))", "def write_compressed_skims(skims, output=\"emmemat.zarr\"):\n known_exts = (\".zarr\", \".zarr.zip\")\n if not any(output.endswith(k) for k in known_exts):\n raise NotImplementedError(output)\n if output.endswith(\".zarr\"):\n skims.to_zarr(output, mode='a')\n elif output.endswith(\".zarr.zip\"):\n if os.path.exists(output):\n raise FileExistsError(output)\n with zarr.ZipStore(output, mode='w') as store:\n skims.to_zarr(store)", "def write_mm(g, fn):\n f = open(fn, \"w\")\n f.write(\"%d %d %d\\n\" % (g.vcount(), g.vcount(), g.ecount()))\n\n if g.is_weighted():\n for e in g.es():\n f.write(\"%d %d %.4f\\n\" % (e.source, e.target, e[\"weight\"]))\n else:\n for e in g.es():\n f.write(\"%d %d 1\\n\" % (e.source, e.target))\n\n f.close()", "def compress_file(in_file: str, out_file: str) -> None:\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = build_frequency_dict(text)\n tree = build_huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (tree.num_nodes_to_bytes() + tree_to_bytes(tree) +\n int32_to_bytes(len(text)))\n result += compress_bytes(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def write2hdf5(filename, dict2store, compression=\"lzf\"):\n\twith h5py.File(filename,'w') as hf:\n\t\tfor key,value in dict2store.iteritems():\n\t\t\thf.create_dataset(key, data=value,compression=compression)", "def dumpData(self,out):\n #--Get sizes and dump into dataIO\n self.hedr.getSize()\n self.hedr.dump(out)\n for (name,size) in self.masters:\n out.packSub0('MAST',name)\n out.packSub('DATA','Q',size)\n if self.gmdt: \n self.gmdt.getSize()\n self.gmdt.dump(out)\n for other in self.others:\n other.getSize()\n other.dump(out)", "def write_minisat(self):\n num_variables = len(self.label_encodings)\n num_clauses = self.num_clauses\n clauses = self.clauses\n outfile = MinisatRunner.temp_in\n out = open(outfile,\"w\")\n try:\n out.write(\"p cnf %3d %3d\\n\" % (num_variables,num_clauses))\n for clause in clauses:\n for clause_variable in clause:\n out.write(\" %3d\" % self.minisat_encode_label(clause_variable));\n out.write(\" 0\\n\")\n finally:\n out.close()", "def dump_mat(filename, obj, **kwargs):\n return sio.savemat(filename, obj, **kwargs)", "def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)", "def writeOutFileBarcodeCounts(barcode_dict_summary, outFileName):\n with gzip.open(outFileName, 'wb') as out_file:\n for barcode in barcode_dict_summary:\n out_file.write(barcode)\n out_file.write(\"\\t\" + \"\\t\".join(map(str,barcode_dict_summary[barcode])))\n out_file.write(\"\\n\")", "def write_embeddings_to_file(self):\n modes = [self.generator, self.discriminator]\n for i in range(2):\n embedding_matrix = modes[i].embedding_matrix\n embedding_matrix = embedding_matrix.detach().to('cpu').numpy()\n index = np.array(range(self.n_node)).reshape(-1, 1)\n embedding_matrix = np.hstack([index, embedding_matrix])\n embedding_list = embedding_matrix.tolist()\n embedding_str = [str(int(emb[0])) + \"\\t\" + \"\\t\".join([str(x) for x in emb[1:]]) + \"\\n\" \n for emb in embedding_list]\n with open(config.emb_filenames[i], \"w+\") as f:\n lines = [str(self.n_node) + \"\\t\" + str(config.n_emb) + \"\\n\"] + embedding_str\n f.writelines(lines)", "def save_nelder_mead_data(name, simplex, fvals, iters, evals):\n N = simplex.shape[0] # Number of points in simplex\n K = simplex.shape[1] # Total number of parameters\n\n with open(name + \".txt\", \"w\") as f:\n my_writer = csv.writer(f, delimiter=\",\")\n my_writer.writerow(simplex.shape)\n my_writer.writerow([iters, evals])\n for n in range(N):\n my_writer.writerow(simplex[n, :])\n my_writer.writerow(fvals)", "def save_to_xyz(self, filename): \n with open( filename, 'a' ) as F:\n F = open( filename, 'a' )\n F.write( '%d\\n'%self.num_atoms )\n F.write( \"XYZ\\n\" )\n for num,row in enumerate(self.atoms):\n try:\n F.write('%s '%self.species[num])\n except:\n F.write('X%d '%num)\n F.write( mat2str( row, \"%16.10f\" ) )\n F.write( \"\\n\" )", "def dump(self, file):\n if isinstance(file, basestring):\n file = open(file, 'w')\n metadb = self.metadb.dumps()\n file.write('v%s:%d\\n' % (self.VERSION, len(metadb)))\n file.write(metadb)\n file.write(self.datadb.dumps())", "def write_stewicombo_metadata(file_name, metadata_dict, category=''):\n meta = set_stewicombo_meta(file_name, category=category)\n meta.tool_meta = metadata_dict\n write_metadata_to_file(paths, meta)", "def compress_image(filename,k):", "def write_md_file(sip_uuid, filepath):\n sip = SIP.objects.get(pk=sip_uuid)\n md_object = load_file_data_from_db(sip, filepath)\n convert_md_object = json.dumps(\n md_object, sort_keys=True, ensure_ascii=False, indent=4\n )\n filename = os.path.join(filepath, \"metadata_output.json\")\n with open(filename, \"wb\") as f:\n f.write(six.ensure_binary(convert_md_object))", "def write_metadata(metadata_buffer, meta_dir, meta_file_name):\n \n with open(os.path.join(meta_dir, meta_file_name), 'w+') as meta_file:\n writer = csv.writer(meta_file, delimiter=',', quotechar='\"')\n writer.writerows(metadata_buffer)", "def compress(self, file):\n\t\t\n\t\ttext = file.read() \n\t\ttext = text.rstrip() #elimina los espacios en blanco del final\n\n\t\t\n\t\tfrequency = self.make_frequency_dict(text)#obtenemos la frencuencia de cada numero en el texto\n\t\tself.make_heap(frequency)\n\t\tself.merge_nodes()\n\t\tself.make_codes()\n\t\tencoded_text = self.get_encoded_text(text)\n\t\tpadded_encoded_text = self.pad_encoded_text(encoded_text)\n\n\t\tb = self.get_byte_array(padded_encoded_text)\n\n\t\treturn b", "def WriteDataPack(resources, output_file, encoding):\n content = WriteDataPackToString(resources, encoding)\n with open(output_file, \"wb\") as file:\n file.write(content)", "def compress_file(netcdf_file_name):\n\n radar_io.compress_file(netcdf_file_name)", "def write_mir(self, filename):\n raise NotImplementedError", "def close(self):\n # write META-INF\n self.zip.writestr(\"META-INF/container.xml\", str(self.xml))\n # write content.opf\n self.zip.writestr(\"OEBPS/content.opf\", str(self.opf))\n # write toc.ncx\n self.zip.writestr(\"OEBPS/toc.ncx\", str(self.ncx))\n self.zip.close()", "def WriteDataPack(resources, output_file, encoding):\n content = WriteDataPackToString(resources, encoding)\n with open(output_file, 'wb') as file:\n file.write(content)", "def write(self, fname):\n pass", "def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def compression(s):", "def save_progress(filename, derm_counts):\n\n with open(filename, 'w') as f:\n f.write(\"zipcode,derms_within_%d_miles\\n\" % RADIUS)\n prefix = \"\"\n for key, val in derm_counts.iteritems():\n f.write(prefix)\n f.write(key)\n f.write(',')\n f.write(val)\n prefix= \"\\n\"", "def write_xdmf(self, filename: str):\n\n mesh = UnstructuredMesh.from_h5(filename)\n mesh.write_h5(filename)", "def save_as(self, filename: str) -> None:\n save_data = lzma.compress(pickle.dumps(self))\n with open(filename, \"wb\") as f:\n f.write(save_data)", "def write(self, data: bytes, name: str, compress: bool = False):\n filepath = self.join(name)\n if compress:\n self.driver.write(gzip(data), filepath)\n else:\n self.driver.write(data, filepath)", "def save_data(dfin, outfile=\"./FPeng_prepped\"):\n dfin.to_csv(outfile+'.csv', sep='\\t', index=False)\n # s3.meta.client.upload_file(outfile+\".csv\", 'p3-engine', 'ETL/FPeng_prepped.csv')\n print(\"csv...\", end=\" \")\n\n dfin.to_pickle(outfile+'.pkl' ,protocol=4)\n # s3.meta.client.upload_file(outfile+'.pkl', 'p3-engine', 'ETL/FPeng_prepped.pkl')\n print(\"pkl...\", end=\" \")\n #dfin.to_msgpack(outfile+'.msg')\n #print(\"msg...\", end=\" \")\n\n #s3.meta.client.upload_file(outfile+\".msg\", 'p3-engine', 'ETL/FPeng_prepped.msg')\n\n # print(\"to s3 complete\", end=\" \")", "def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)", "def tofile(self, filename:'Union[str, Path]',\n *args, **kwargs):\n\n self._data.tofile(filename, *args, **kwargs)\n dims_filename = filename + '.ini' # '.meta'\n self._dims.tofile(dims_filename)\n\n with open(dims_filename, mode='a') as file:\n file.write(f'dtype={self._data.dtype.name}\\n')", "def write_structure_file(self, file_name):\n self.dna_structure.write(file_name,write_json_format=True)", "def save(filename, points3, tris, metadata):\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)", "def write_data(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data = data, compression='gzip', compression_opts=9)\n f.close()", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def save(self, filename):\n with gzip.open(filename, \"w\") as f:\n f.write(pickle.dumps(self))", "def write_to_gzip(fname, html_body):\n dir_path = os.path.dirname(fname)\n ensure_dir_exists(dir_path)\n\n with gzip.open(fname, 'wb') as html_file:\n html_file.write(html_body)", "def exportMmf(self, filename):\n self.matrix.export_mtx(filename)", "def write(data):", "def write_sigmf(data_file, data, buffer=None, append=True):\n\n packed = pack_bin(data)\n\n write_bin(data_file, packed, buffer, append)", "def write_uem(uemf, uem, n_digits=3):\n with open(uemf, 'wb') as f:\n for file_id in sorted(iterkeys(uem)):\n for onset, offset in sorted(uem[file_id]):\n line = ' '.join([file_id,\n '1',\n format_float(onset, n_digits),\n format_float(offset, n_digits)\n ])\n f.write(line.encode('utf-8'))\n f.write(b'\\n')", "def to_dat(self, **kwargs):\n return self.write(file_format=\"dat\", **kwargs)", "def create_dnz_file(args):\n\n file = open(args.o, 'w')\n\n file.write(\"% ----DATA VARIABLES----\\n\\n\")\n file.write(\"t=\" + str(args.t) + \";\" + \"%number of attributes\\n\")\n file.write(\"k=\" + str(args.k) + \";\" + \"%max length of the support set\\n\")\n file.write(\"n=\" + str(args.n) + \";\" + \"%number of positive instances\\n\")\n file.write(\"m=\" + str(args.m) + \";\" + \"%number of negative instances\\n\")\n file.write(\"c=\" + str(args.c) + \";\" + \"%number of atMostOne Constraints\\n\\n\")\n\n file.write(\"% ----OMEGAS----\\n\\n\")\n\n omega_p = generate_omega_data(args.t, args.n, args.b)\n file.write(\"omegap= \" + omega_to_mz(omega_p) + \"\\n\\n\")\n\n omega_n = generate_disjoint_omega_data(omega_p, args.m, args.b)\n file.write(\"omegan= \" + omega_to_mz(omega_n) + \"\\n\\n\")\n\n file.write(\"% ----CONSTRAINS----\\n\\n\")\n at_most_one = generate_at_most_one(int(args.t/2), args.c, 1, args.t)\n file.write(\"atMostOne=\" + at_most_one_to_mz(at_most_one))", "def save_compressed_image(self, filename):\n if filename[-5:] != '.pbz2':\n filename + '.pbz2'\n self.compressed_pickle(filename, self)", "def write_shortfile_table(self):\n\n # KMEL actually removes duplicate short filenames from this\n # table.\n\n start_of_shortfiles = self.db_file.tell()\n\n shortfiles = {}\n for miEntry in self.mainIndex:\n short_filename = miEntry.encodedShortfile\n if short_filename in shortfiles:\n miEntry.set_shortfile_offset(\n shortfiles[short_filename])\n else:\n shortfiles[short_filename] = \\\n self.db_file.tell() - start_of_shortfiles\n\n miEntry.set_shortfile_offset(\n shortfiles[short_filename])\n self.db_file.write(short_filename)", "def write(self, data, filename):\n id_ = 1\n weightlist_el = Element('weight-list')\n for dataset in data:\n weight_el = SubElement(weightlist_el, 'weight')\n id_el = SubElement(weight_el, 'id')\n id_el.text = str(id_)\n date_el = SubElement(weight_el, 'date')\n date_el.text = str(dataset.date) + 'T12:00:00'\n value_el = SubElement(weight_el, 'value')\n value_el.text = str(dataset.weight)\n comment_el = SubElement(weight_el, 'comment')\n comment_el.text = dataset.note\n id_ += 1\n st_tree = ElementTree(weightlist_el)\n st_tree.write(filename, encoding='UTF-8')", "def write_out4fp(fname,specorder,nspcs,agr,nr,rmax,pairs,nperline=6):\n ndat = nr *len(pairs)\n data = np.zeros(ndat)\n n = 0\n for pair in pairs:\n isid,jsid = pair\n for i in range(nr):\n data[n] = agr[isid,jsid,i]\n n += 1\n\n with open(fname,'w') as f:\n f.write('# RDF for pairs: ')\n for pair in pairs:\n si = specorder[pair[0]-1]\n sj = specorder[pair[1]-1]\n f.write(' {0:s}-{1:s},'.format(si,sj))\n f.write('\\n')\n f.write('# rmax, nr = {0:.3f}, {1:d}\\n'.format(rmax,nr))\n f.write('#\\n')\n #...Num of data, weight for the data\n f.write(' {0:6d} {1:7.3f}\\n'.format(ndat, 1.0))\n j0 = 0\n while True:\n f.write(' '.join('{0:12.4e}'.format(data[j]) for j in range(j0,j0+nperline) if j < ndat))\n f.write('\\n')\n j0 += nperline\n if j0 >= ndat:\n break\n\n return None" ]
[ "0.66734606", "0.6449204", "0.61703485", "0.6010072", "0.5976385", "0.5969885", "0.59663653", "0.59331244", "0.5882864", "0.5877286", "0.5830967", "0.5827414", "0.5769662", "0.574796", "0.57416415", "0.5724244", "0.5712031", "0.56908506", "0.5682276", "0.5674419", "0.56734663", "0.56734544", "0.56269914", "0.5602295", "0.5590598", "0.5577713", "0.55673736", "0.5564312", "0.5549882", "0.5542561", "0.5529909", "0.5528757", "0.5525905", "0.5522067", "0.5517885", "0.55012745", "0.54895926", "0.5484564", "0.547589", "0.54729193", "0.5472285", "0.5460378", "0.5447356", "0.5447068", "0.54409903", "0.54367256", "0.5434682", "0.5422218", "0.54170007", "0.5415363", "0.54104394", "0.540258", "0.53848356", "0.53793126", "0.5366277", "0.5365634", "0.53614926", "0.53592855", "0.535028", "0.53487897", "0.53334194", "0.53329295", "0.5332172", "0.5328979", "0.53226006", "0.53221625", "0.5321325", "0.5316614", "0.53161055", "0.529638", "0.5293982", "0.5293666", "0.52780735", "0.5277023", "0.52758366", "0.52741796", "0.52708393", "0.5269947", "0.5263435", "0.5259244", "0.5257802", "0.5250943", "0.5250753", "0.5250086", "0.5246645", "0.52453583", "0.52408266", "0.5239187", "0.52379507", "0.5235555", "0.5231602", "0.52302814", "0.52266896", "0.5225999", "0.52204233", "0.52186865", "0.52144444", "0.52121115", "0.52114296", "0.52077013" ]
0.6046685
3
build the object data, filling in the stub we read note position offsets appear nowhere in this function
def _build_meds_layout(self): nim = self.image_info.size nobj = self.obj_data.size trim_to_coadd = self.get('trim_to_coadd',False) if trim_to_coadd: print(' trimming to coadd') coadd_wcs, coadd_pos, coadd_bnds, coadd_q = \ self._get_pos_and_bounds(self.obj_data, 0) in_bnds = coadd_bnds.contains_points(coadd_pos['zrow'], coadd_pos['zcol']) w_in_bnds, = np.where(in_bnds == True) assert w_in_bnds.size > 0,"none found in coadd" w_in_bnds = coadd_q[w_in_bnds] self.obj_data = self.obj_data[w_in_bnds] self._do_psf_setup() # box sizes are even half_box_size = self.obj_data['box_size']//2 for file_id in range(nim): wcs, pos, bnds, q = self._get_pos_and_bounds(self.obj_data, file_id) # do the test in_bnds = bnds.contains_points(pos['zrow'], pos['zcol']) q_rc, = np.where(in_bnds == True) print(' second cut: %6d of %6d objects' % (len(q_rc),len(q))) # now make sure everything is there if self['check_in_first_image']: if file_id == 0 and len(self.obj_data['ra']) != len(q_rc): raise MEDSCreationError('Not all objects were found in first image for ' 'MEDS making (which is the coadd/detection ' 'image by convention).') # compose them q = q[q_rc] # fill in the object_data structure # note q_rc since pos was created using obj_data[q] qrow = pos['zrow'][q_rc] qcol = pos['zcol'][q_rc] icut = self.obj_data['ncutout'][q] self.obj_data['file_id'][q,icut] = file_id self.obj_data['orig_row'][q,icut] = qrow self.obj_data['orig_col'][q,icut] = qcol # this results in the object center being close to # the natural center (dim-1.)/2. ostart_row = qrow.astype('i4') - half_box_size[q] + 1 ostart_col = qcol.astype('i4') - half_box_size[q] + 1 crow = qrow - ostart_row ccol = qcol - ostart_col self.obj_data['orig_start_row'][q,icut] = ostart_row self.obj_data['orig_start_col'][q,icut] = ostart_col self.obj_data['cutout_row'][q,icut] = crow self.obj_data['cutout_col'][q,icut] = ccol # do jacobian, in original, not-offset coords # note q_rc since pos was created using self.obj_data[q] jacob = wcs.get_jacobian( x=pos['wcs_col'][q_rc], y=pos['wcs_row'][q_rc]) # jacob is a tuple of arrays self.obj_data['dudcol'][q,icut] = jacob[0] self.obj_data['dudrow'][q,icut] = jacob[1] self.obj_data['dvdcol'][q,icut] = jacob[2] self.obj_data['dvdrow'][q,icut] = jacob[3] # increment self.obj_data['ncutout'][q] += 1 w,=np.where(self.obj_data['ncutout'] > 0) print('%d/%d had ncut > 0' % (w.size, self.obj_data.size)) #self.obj_data = self.obj_data[w] self.obj_data = self._make_resized_data(self.obj_data) print('setting number field as sequential') self.obj_data['number'] = 1+np.arange(self.obj_data.size) self._set_start_rows_and_pixel_count() if self['survey']=='cosmos': self._set_psf_layout_hst() else: self._set_psf_layout_psfex()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_raw_data(self, idx: int):\n info = super().prepare_raw_data(idx)\n if self.cache_reader is not None:\n self.human_data = self.cache_reader.get_item(idx)\n idx = idx % self.cache_reader.slice_size\n\n if 'smplx' in self.human_data:\n smplx_dict = self.human_data['smplx']\n info['has_smplx'] = 1\n else:\n smplx_dict = {}\n info['has_smplx'] = 0\n if 'global_orient' in smplx_dict:\n info['smplx_global_orient'] = smplx_dict['global_orient'][idx]\n info['has_smplx_global_orient'] = 1\n else:\n info['smplx_global_orient'] = np.zeros((3), dtype=np.float32)\n info['has_smplx_global_orient'] = 0\n\n if 'body_pose' in smplx_dict:\n info['smplx_body_pose'] = smplx_dict['body_pose'][idx]\n info['has_smplx_body_pose'] = 1\n else:\n info['smplx_body_pose'] = np.zeros((21, 3), dtype=np.float32)\n info['has_smplx_body_pose'] = 0\n\n if 'right_hand_pose' in smplx_dict:\n info['smplx_right_hand_pose'] = smplx_dict['right_hand_pose'][idx]\n info['has_smplx_right_hand_pose'] = 1\n else:\n info['smplx_right_hand_pose'] = np.zeros((15, 3), dtype=np.float32)\n info['has_smplx_right_hand_pose'] = 0\n\n if 'left_hand_pose' in smplx_dict:\n info['smplx_left_hand_pose'] = smplx_dict['left_hand_pose'][idx]\n info['has_smplx_left_hand_pose'] = 1\n else:\n info['smplx_left_hand_pose'] = np.zeros((15, 3), dtype=np.float32)\n info['has_smplx_left_hand_pose'] = 0\n\n if 'jaw_pose' in smplx_dict:\n info['smplx_jaw_pose'] = smplx_dict['jaw_pose'][idx]\n info['has_smplx_jaw_pose'] = 1\n else:\n info['smplx_jaw_pose'] = np.zeros((3), dtype=np.float32)\n info['has_smplx_jaw_pose'] = 0\n\n if 'betas' in smplx_dict:\n info['smplx_betas'] = smplx_dict['betas'][idx]\n info['has_smplx_betas'] = 1\n else:\n info['smplx_betas'] = np.zeros((self.num_betas), dtype=np.float32)\n info['has_smplx_betas'] = 0\n\n if 'expression' in smplx_dict:\n info['smplx_expression'] = smplx_dict['expression'][idx]\n info['has_smplx_expression'] = 1\n else:\n info['smplx_expression'] = np.zeros((self.num_expression),\n dtype=np.float32)\n info['has_smplx_expression'] = 0\n\n return info", "def __init__(self, starting_point=-1):\n self.i_read = starting_point\n self.data = [['fake_chip_id', 'fake_version'],\n [96, 110, 203, 104, 50, 0, 29, 145, 59, 215, 208, 11,\n 232, 38, 42, 255, 249, 255, 172, 38, 10, 216, 189, 16],\n [75],\n [129, 1, 0, 16, 44, 3, 30],\n [76, 60, 128, 129, 49, 128, 94, 120]]", "def __init__(self):\n self.x = {}\n self.len = 0\n self.annotations = {}", "def _get_observation(self):\n di = super()._get_observation()\n\n # low-level object information\n if self.use_object_obs:\n # Get robot prefix\n if self.env_configuration == \"bimanual\":\n pr0 = self.robots[0].robot_model.naming_prefix + \"left_\"\n pr1 = self.robots[0].robot_model.naming_prefix + \"right_\"\n else:\n pr0 = self.robots[0].robot_model.naming_prefix\n pr1 = self.robots[1].robot_model.naming_prefix\n\n # position and rotation of object\n cube_pos = np.array(self.sim.data.body_xpos[self.cube_body_id])\n cube_quat = T.convert_quat(\n self.sim.data.body_xquat[self.cube_body_id], to=\"xyzw\"\n )\n di[\"cube_pos\"] = cube_pos\n di[\"cube_quat\"] = cube_quat\n\n di[pr0 + \"eef_xpos\"] = self._eef0_xpos\n di[pr1 + \"eef_xpos\"] = self._eef1_xpos\n di[\"handle_0_xpos\"] = np.array(self._handle_0_xpos)\n di[\"handle_1_xpos\"] = np.array(self._handle_1_xpos)\n di[pr0 + \"gripper_to_handle\"] = np.array(self._gripper_0_to_handle)\n di[pr1 + \"gripper_to_handle\"] = np.array(self._gripper_1_to_handle)\n\n di[\"object-state\"] = np.concatenate(\n [\n di[\"cube_pos\"],\n di[\"cube_quat\"],\n di[pr0 + \"eef_xpos\"],\n di[pr1 + \"eef_xpos\"],\n di[\"handle_0_xpos\"],\n di[\"handle_1_xpos\"],\n di[pr0 + \"gripper_to_handle\"],\n di[pr1 + \"gripper_to_handle\"],\n ]\n )\n\n return di", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #oxx, oyy, txy, angle, major, minor, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 8), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n self.node = np.zeros(self.ntotal, dtype='int32')\n #oxx, oyy, ozz, txy, pressure\n self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')\n self.location = np.empty(self.ntotal, dtype='U8')\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def finish_constructing(self, more_data):\n self.draw_aspect = None\n self.obj_type = None\n self.ex_obj_id = None\n self.sub_type = None\n self.persist_id_ref = None\n if self.size != 0x18:\n raise ValueError('ExOleObjAtom has wrong size {0} != 0x18'\n .format(self.size))\n if self.data:\n self.draw_aspect, self.obj_type, self.ex_obj_id, self.sub_type, \\\n self.persist_id_ref, _ = unpack('<LLLLLL', self.data)\n if self.obj_type not in self.OBJ_TYPES:\n logging.warning('Unknown \"type\" value in ExOleObjAtom: {0}'\n .format(self.obj_type))\n if self.sub_type not in self.SUB_TYPES:\n logging.warning('Unknown sub type value in ExOleObjAtom: {0}'\n .format(self.sub_type))", "def getRigBuildData(self):\n\n # Values\n mouthPosition = self.jawCtrl.xfo.tr\n jawEndPosition = self.jawEndCtrl.xfo.tr\n mouthLen = mouthPosition.subtract(jawEndPosition).length()\n\n # Calculate Mouth Xfo\n\n # atVector\n # mouthUpV = Vec3(0.0, 1.0, 0.0)\n\n # rootToEnd = jawEndPosition.subtract(mouthPosition).unit()\n # rootToUpV = mouthUpV.subtract(mouthPosition).unit()\n # bone1ZAxis = rootToUpV.cross(rootToEnd).unit()\n # bone1Normal = bone1ZAxis.cross(rootToEnd).unit()\n\n jawXfo = self.jawEndCtrl.xfo\n # jawXfo.setFromVectors(rootToEnd, bone1Normal, bone1ZAxis, mouthPosition)\n\n\n\n data = super(OSSMouthGuide, self).getRigBuildData()\n\n # should include getCurveData\n data = self.saveAllObjectData(data, \"Control\")\n data = self.saveAllObjectData(data, \"Transform\")\n data['jawXfo'] = self.jawCtrl.xfo\n data['mouthLen'] = mouthLen\n return data", "def _build_base_structure(self):\n result = dict(self.contents)\n # clean out optional fields that were missing\n if not self.contents[DataParticleKey.PORT_TIMESTAMP]:\n del result[DataParticleKey.PORT_TIMESTAMP]\n if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:\n del result[DataParticleKey.INTERNAL_TIMESTAMP]\n return result", "def prepareData(self, *data):\n arguments = 8\n (self.X, self.X_name, self.Y, self.Y_name, self.alignment,\n self.model, self.annotations, self.args) = tuple(data[:arguments])\n \n self.width = self.args.beam_width\n self.mathType = self.args.mathType\n self.io_files = {\n 'input': self.args.intermediate_input_files,\n 'output': self.args.intermediate_output_files\n }\n self.repeat_width = self.args.repeat_width\n self.cons_count = self.args.cons_count\n self.posterior_processors = self.args.posterior_processors \n\n self.positionGenerator = \\\n list(AlignmentBeamGenerator(self.alignment, self.width))\n \n for i in range(len(self.model.states)):\n self.model.states[i].computeHints(self)\n\n return data[arguments:]", "def build_item(\n self,\n part,\n timestamp=1539023700,\n userdata=0,\n position=[0, 0, 0],\n up_vec=[0, 1, 0],\n at_vec=[0, 0, 1],\n skip_power_controls=False):\n # Get the obj path.\n item = self.retrieve_part(part)\n\n # Lock Everything if it's the BASE_FLAG or U_POWERLINE.\n # BASE_FLAG can break things if user moves it around.\n # As it acts as the \"origin\" of the base.\n locked_parts = [\"BASE_FLAG\", \"U_POWERLINE\", \"U_PIPELINE\", \"U_PORTALLINE\"]\n line_parts = [\"U_POWERLINE\", \"U_PIPELINE\", \"U_PORTALLINE\"]\n if part in locked_parts:\n item.lock_location[0] = True\n item.lock_location[1] = True\n item.lock_location[2] = True\n item.lock_rotation[0] = True\n item.lock_rotation[1] = True\n item.lock_rotation[2] = True\n item.lock_scale[0] = True\n item.lock_scale[1] = True\n item.lock_scale[2] = True\n \n # Add custom attributes.\n item[\"ObjectID\"] = part\n item[\"SnapID\"] = part\n item[\"Timestamp\"] = timestamp\n item[\"belongs_to_preset\"] = False\n # Add an order flag to retain order when generating data..\n item[\"Order\"] = self.part_order\n self.part_order += 1\n # Apply Colour\n is_powerline = part in line_parts\n is_pipeline = part in [\"U_PIPELINE\"]\n material.assign_material(\n item,\n userdata,\n powerline=is_powerline,\n pipeline=is_pipeline\n )\n\n # Move\n utils.move_to(item, position=position, up=up_vec, at=at_vec)\n\n # If the object is a powerline, we should create additional controls\n # for it.\n if is_powerline and not skip_power_controls:\n power.create_power_controls(item)\n # Select the new object.\n item.select_set(True)\n return item", "def __init__(self):\n self.dtrs = []\n self.chunkIndex = 0\n self.eventList = [] \n self.position = None\n self.positionCount = 0\n self.parent = None\n self.embeddedTags = []", "def fill_data(self, data):\n self._data = data\n\n self._data_length = data[1:3]\n self._frame_id = data[4]\n self._address = XbeeAddress(data[5:9], data[9:13], data[13:15])\n self._at_command = data[15:17]\n self._command_status = data[17]\n try:\n self._command_data = data[18:21]\n self._checksum = data[22]\n except IndexError:\n self._command_data = None\n self._checksum = data[18]", "def _read_data(self):", "def _dataslicing(self):\n if self._build_properties[\"dataslice0\"] == \"undefined\":\n warnings.warn(\"No _build_properties['dataslice0'] defined. 0 assumed\")\n offset0 = [0,-1]\n else:\n offset0=self._build_properties[\"dataslice0\"]\n if self._build_properties[\"dataslice1\"] == \"undefined\":\n warnings.warn(\"No _build_properties['dataslice1'] defined. 0 assumed\")\n offset1 = [0,-1]\n else:\n offset1=self._build_properties[\"dataslice1\"]\n \n if offset0[1] is None:\n offset0[1] = self.header[\"NAXIS2\"]\n \n if offset0[1] <0:\n offset0[1] = self.height+offset0[1]+1\n \n if offset1[1] is None:\n offset1[1] = self.header[\"NAXIS1\"]\n \n if offset1[1] <0:\n offset1[1] = self.width +offset1[1]+1\n \n return offset0[0],offset1[0],offset0[1]-offset0[0],offset1[1]-offset1[0]", "def prepare_data(self):", "def build_base(self):\n\n # start- and endpoints of lines are nodes, but they do not need to have a point object associated to them\n # in this case, self.geo is None and the no, prop and name attributes stay as the default values set in the constructor\n if (self.geo):\n\n attr = ri.RhinoInput(rs.ObjectName(self.geo))\n\n self.no = attr.get_no()\n if (self.no != -1):\n self.strict_naming = True\n\n self.name = attr.get_name()\n self.prop = attr.get_prop()", "def initialize(self):\n self.muondEdx = []\n self.muondNdx = []\n self.muonmomentum = []\n self.piondEdx = []\n self.piondNdx = []\n self.pionmomentum = []\n self.kaondEdx = []\n self.kaondNdx = []\n self.kaonmomentum = []\n self.protdEdx = []\n self.protdNdx = []\n self.protmomentum = []\n self.elecdEdx = []\n self.elecdNdx = []\n self.elecmomentum = []", "def __init__(self):\n self.notes = []", "def __init__(self, data=b''):\n self.data = data\n self.offset = 0", "def load_data(self):\n super(MudderyObjectCreater, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n \n # set common object's info\n self.obj_list = {}\n\n for obj in data.obj_list.split(\",\"):\n obj_key = \"\"\n number = 0\n arg = obj.split(\":\", 1)\n if len(arg) == 1:\n obj_key = arg[0]\n number = 1\n elif len(arg) >= 2:\n obj_key = arg[0]\n number = int(arg[1])\n\n self.obj_list[obj_key] = number", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def start_data(self, obj_name_parts, game, filename, pos_start, length):\n if len(obj_name_parts) == 0:\n self.filename = filename\n self.pos_start = pos_start\n self.length = length\n self.game = game\n self.has_data = True\n self.data = []\n return self.data\n lower = obj_name_parts[0].lower()\n if lower not in self.children:\n self.children[lower] = Node(obj_name_parts[0])\n return self.children[lower].start_data(\n obj_name_parts[1:],\n game,\n filename,\n pos_start,\n length)", "def __init__(self):\r\n self.data = PositionalList()", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 14), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def __init__(self):\n\n\n self.dtype = np.dtype([\n ('fault_flags', np.uint32),\n ('raw_x', np.int16),\n ('raw_y', np.int16),\n ('raw_z', np.int16),\n ('accel_x', np.float32),\n ('accel_y', np.float32),\n ('accel_z', np.float32), \n ('pitch', np.float32),\n ('roll', np.float32), \n ])\n \n self._accel_indices = [0, 4, 5, 6]\n \n self.data = np.array([(0, 0.1, 12, 1234, 0.12345678901234, 0.12345678901234, 0.12345678901234, 0.12345678901234, 0.12345678901234)], dtype=self.dtype)\n self.data['fault_flags'] = 0\n self.data['raw_x'] = 0.1\n self.data['raw_y'] = 12\n self.data['raw_z'] = 31929\n self.data['accel_x'] = 0.12345678901234\n self.data['accel_y'] = 0.23456789012345\n self.data['accel_z'] = 0.34567890123456\n self.data['pitch'] = 0.1000\n self.data['roll'] = 0.2000\n\n\n #print len(self.data.tostring(order=\"C\"))", "def ani_init(self):\n self.line.set_data([], [])\n self.galactic_centre.set_data([], [])\n self.impactor.set_data([], [])\n self.time_text.set_text(\"\")\n self.KE_text.set_text(\"\")\n self.GPE_text.set_text(\"\")\n self.energy_text.set_text(\"\")\n return (\n self.line,\n self.galactic_centre,\n self.impactor,\n self.time_text,\n self.KE_text,\n self.GPE_text,\n self.energy_text,\n )\n # One might hope that there was a better way to return objects", "def __init__(self, data):\n # loop through data\n for x in data:\n # create pitches list if attribute name is pitches\n if x == 'pitches':\n self.pitches = []\n for y in data[x]:\n self.pitches.append(Pitch(y))\n else:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def prepare(self):\n if self.pin.lower() == \"homo\":\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.Lead_HOMOs_xval[i] for x in line]\n elif self.pin.lower() == \"lumo\":\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.Lead_LUMOs_xval[i] for x in line]\n elif \"vac\" in self.pin.lower():\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.vacuum[i] for x in line]\n elif \"ef\" in self.pin.lower():\n for i,line in enumerate(self.x):\n self.x[i] = [x - self.fermi_levels[i] for x in line]", "def __init__(self):\n self.__deviceselected__ = \"SR-DMS4AP{LOCALBUMP}DEV:Sel-SP\"\n self.__source__ = \"SR-DMS4AP{LOCALBUMP}S-SP\"\n self.__plane__ = \"SR-DMS4AP{LOCALBUMP}PLANE-SP\"\n #self.__xshift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT:X-SP\"\n #self.__yshift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT:Y-SP\"\n #self.__xangle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:X-SP\"\n #self.__yangle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:Y-SP\"\n self.__shift__ = \"SR-DMS4AP{LOCALBUMP}SHIFT-SP\"\n self.__angle__ = \"SR-DMS4AP{LOCALBUMP}ANGLE-SP\"\n # with all offsets\n self.__anglerb__ = \"SR-DMS4AP{LOCALBUMP}ANGLE-I\"\n self.__positionrb__ = \"SR-DMS4AP{LOCALBUMP}POS-I\"\n # with BBA offset only\n self.__anglerb0__ = \"SR-DMS4AP{LOCALBUMP}ANGLE:BBA-I\"\n self.__positionrb0__ = \"SR-DMS4AP{LOCALBUMP}POS:BBA-I\"\n\n self.__bpmposition__ = \"SR-DMS4AP{LOCALBUMP:BPM}Pos-I\"\n self.__bpmorbitx__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:X-I\"\n self.__bpmorbity__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:Y-I\"\n self.__bpmorbitx0__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:X0-I\"\n self.__bpmorbity0__ = \"SR-DMS4AP{LOCALBUMP:BPM}ORB:Y0-I\"\n\n self.__correctorposition__ = \"SR-DMS4AP{LOCALBUMP:COR}Pos-I\"\n self.__hcorrectorcurrent__ = \"SR-DMS4AP{LOCALBUMP:HCOR}PS-SP\"\n self.__hcorrectordiff__ = \"SR-DMS4AP{LOCALBUMP:HCOR}PS:Delta-SP\"\n self.__vcorrectorcurrent__ = \"SR-DMS4AP{LOCALBUMP:VCOR}PS-SP\"\n self.__vcorrectordiff__ = \"SR-DMS4AP{LOCALBUMP:VCOR}PS:Delta-SP\"\n\n self.__undo__ = \"SR-DMS4AP{LOCALBUMP}Enbl:Undo-Cmd\"\n self.__apply__ = \"SR-DMS4AP{LOCALBUMP}Enbl-Cmd\"\n self.__status__ = \"SR-DMS4AP{LOCALBUMP}TS-I\"\n self.__idposinfo__ = \"SR-DMS4AP{LOCALBUMP}S-I\"\n self.__srcposition__ = \"SR-DMS4AP{LOCALBUMP}SRC-SP\"", "def _read(self):\n\t\tself._infoMuscles = []\n\t\tself._infoCommonCellsInMuscles = []\n\t\tself._infoSpecialCells = []\n\t\tself._infoCommonMuscleConnections = []\n\t\tself._infoInterMuscSensorimotorConnections = {}\n\t\tself._infoSpecialConnections = []\n\t\tif rank==0:\n\t\t\tsection = None\n\t\t\tsensorimotorConnections = None\n\t\t\tsensorimotorMatrix = None\n\t\t\tfor line in open(\"../nnStructures/\"+self._inputFile,\"r\"):\n\t\t\t\tif line[0] == \"#\" or line[0] == \"\\n\": continue\n\t\t\t\telif line[0] == \"@\": section = float(line[1])\n\t\t\t\telif section == 1: self._infoMuscles.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 2: self._infoCommonCellsInMuscles.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 3: self._infoSpecialCells.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 4: self._infoCommonMuscleConnections.append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 5:\n\t\t\t\t\tif line[0] == \"+\":\n\t\t\t\t\t\tdictName = line[1:].strip(\"\\n\")\n\t\t\t\t\t\tself._infoInterMuscSensorimotorConnections[dictName] = {}\n\t\t\t\t\t\tsensorimotorConnections = False\n\t\t\t\t\t\tsensorimotorMatrix = False\n\t\t\t\t\telif \"Connections\" in line:\n\t\t\t\t\t\t sensorimotorConnections = True\n\t\t\t\t\t\t self._infoInterMuscSensorimotorConnections[dictName][\"connections\"]=[]\n\t\t\t\t\telif \"WeightsMatrix\" in line:\n\t\t\t\t\t\t sensorimotorConnections = False\n\t\t\t\t\t\t sensorimotorMatrix = True\n\t\t\t\t\t\t self._infoInterMuscSensorimotorConnections[dictName][\"matrix\"]=[]\n\t\t\t\t\telif sensorimotorConnections:\n\t\t\t\t\t\tself._infoInterMuscSensorimotorConnections[dictName][\"connections\"].append(line.strip(\"\\n\").split())\n\t\t\t\t\telif sensorimotorMatrix:\n\t\t\t\t\t\tself._infoInterMuscSensorimotorConnections[dictName][\"matrix\"].append(line.strip(\"\\n\").split())\n\t\t\t\telif section == 6: self._infoSpecialConnections.append(line.strip(\"\\n\").split())\n\n\t\tself._infoMuscles = comm.bcast(self._infoMuscles,root=0)\n\t\tself._infoCommonCellsInMuscles = comm.bcast(self._infoCommonCellsInMuscles,root=0)\n\t\tself._infoSpecialCells = comm.bcast(self._infoSpecialCells,root=0)\n\t\tself._infoCommonMuscleConnections = comm.bcast(self._infoCommonMuscleConnections,root=0)\n\t\tself._infoInterMuscSensorimotorConnections = comm.bcast(self._infoInterMuscSensorimotorConnections,root=0)\n\t\tself._infoSpecialConnections = comm.bcast(self._infoSpecialConnections,root=0)", "def __init__(self):\n self.data = []\n self.idx = {}", "def build(self):\n if self.is_built:\n return\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node_element = np.zeros((self.ntotal, 2), dtype=idtype)\n #oxx, oyy, txy, angle, major, minor, ovm\n self.data = np.zeros((self.ntimes, self.nelements, 8), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def __init__(self, inFilename):\n\n self._prmtopVersion=None\n self._flags=[]\n self._raw_format={}\n self._raw_data={}\n self._has_nbfix_terms = False\n\n with open(inFilename, 'r') as fIn:\n for line in fIn:\n if line[0] == '%':\n if line.startswith('%VERSION'):\n tag, self._prmtopVersion = line.rstrip().split(None, 1)\n elif line.startswith('%FLAG'):\n tag, flag = line.rstrip().split(None, 1)\n self._flags.append(flag)\n self._raw_data[flag] = []\n elif line.startswith('%FORMAT'):\n format = line.rstrip()\n index0=format.index('(')\n index1=format.index(')')\n format = format[index0+1:index1]\n try:\n m = FORMAT_RE_PATTERN.search(format)\n self._raw_format[self._flags[-1]] = (format, m.group(1), m.group(2), int(m.group(3)), m.group(4))\n except:\n # We couldn't parse the format, so just treat the whole line as a single string.\n self._raw_format[self._flags[-1]] = (format, 1, 'a', 80, '')\n elif line.startswith('%COMMENT'):\n continue\n elif self._flags \\\n and 'TITLE'==self._flags[-1] \\\n and not self._raw_data['TITLE']:\n self._raw_data['TITLE'] = line.rstrip()\n else:\n flag=self._flags[-1]\n (format, numItems, itemType,\n iLength, itemPrecision) = self._getFormat(flag)\n line = line.rstrip()\n for index in range(0, len(line), iLength):\n item = line[index:index+iLength]\n if item:\n self._raw_data[flag].append(item.strip())\n # See if this is a CHAMBER-style topology file, which is not supported\n # for creating Systems\n self.chamber = 'CTITLE' in self._flags", "def __init__(self, data):\n\t\tself.protocol_version, self.le_state, self.playback_state, \\\n\t\t self.source, self.le_flags, self.playback_flags, \\\n\t\t self.source_flags, self.fullness, self.point_rate, \\\n\t\t self.point_count = \\\n\t\t\tstruct.unpack(\"<BBBBHHHHII\", data)", "def __init__(self):\n ProcessingUnit.__init__(self)\n print(\" [ START ] init - Metodo Simulator Reader\")\n\n self.isConfig = False\n self.basicHeaderObj = BasicHeader(LOCALTIME)\n self.systemHeaderObj = SystemHeader()\n self.radarControllerHeaderObj = RadarControllerHeader()\n self.processingHeaderObj = ProcessingHeader()\n self.profileIndex = 2**32-1\n self.dataOut = Voltage()\n #code0 = numpy.array([1,1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,1,1,1,0,1,1,0,1,0,0,0,1,1,1,0,1])\n code0 = numpy.array([1,1,1,-1,1,1,-1,1,1,1,1,-1,-1,-1,1,-1,1,1,1,-1,1,1,-1,1,-1,-1,-1,1,1,1,-1,1])\n #code1 = numpy.array([1,1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,1,0,1,1,1,0,0,0,1,0])\n code1 = numpy.array([1,1,1,-1,1,1,-1,1,1,1,1,-1,-1,-1,1,-1,-1,-1,-1,1,-1,-1,1,-1,1,1,1,-1,-1,-1,1,-1])\n #self.Dyn_snCode = numpy.array([code0,code1])\n self.Dyn_snCode = None", "def _build(self):\n xml = ET.parse(self.fn)\n root = xml.getroot()\n\n metadata = None\n trk = None\n self.prefix = root.tag[:-3]\n metadata = root.find(self._get_tag('metadata'))\n # print(metadata.find(self._get_tag('time')))\n trk = root.find(self._get_tag('trk'))\n\n trkseg = trk.find(self._get_tag('trkseg'))\n\n # I just wanted to flatten the track point and get the\n # fields that I am actually interested in.\n def walker(node):\n nonlocal data\n tags = {'lat': float,\n 'lon': float,\n 'ele': float,\n 'time': cvt_time,\n 'temp': float,\n 'hr': float}\n for tag in tags:\n if node.tag.find(tag) >= 0:\n data[tag] = tags[tag](node.text)\n for child in node:\n walker(child)\n\n for trkpt in trkseg.findall(self._get_tag('trkpt')):\n data = {}\n data['lat'] = trkpt.attrib['lat']\n data['lon'] = trkpt.attrib['lon']\n walker(trkpt)\n self.points.append(TrackPoint(**data))", "def initDataParms(self):\n self.xpos = self.pltw.curvelist[self.blkno].xvinfo.vidx\n self.data = self.pltw.blklst[self.blkno] # original data block\n self.idata = None # interpolated data\n (self.nvec, self.npt) = self.data.shape\n self.xmin = (self.data[self.xpos]).min()\n self.xmax = (self.data[self.xpos]).max()\n self.xspan = self.xmax - self.xmin\n if self.parent.test:\n self.dx = self.xspan / (self.npt * 5)", "def build(self, data: dict):", "def process(self):\n dataobj = Data()\n targetmap = {}\n sta_indices = {}\n hdulist = self.hdulist\n # First get all the OI_TARGET, OI_WAVELENGTH and OI_ARRAY tables\n for hdu in hdulist:\n header = hdu.header\n data = hdu.data\n if hdu.name == \"OI_WAVELENGTH\":\n if dataobj.wavelength == None:\n dataobj.wavelength = {}\n insname = header[\"INSNAME\"]\n dataobj.wavelength[insname] = OI_WAVELENGTH(\n data.field(\"EFF_WAVE\"), data.field(\"EFF_BAND\")\n )\n elif hdu.name == \"OI_TARGET\":\n for row in data:\n target_id = row[\"TARGET_ID\"]\n target = OI_TARGET(\n target=row[\"TARGET\"],\n raep0=row[\"RAEP0\"],\n decep0=row[\"DECEP0\"],\n equinox=row[\"EQUINOX\"],\n ra_err=row[\"RA_ERR\"],\n dec_err=row[\"DEC_ERR\"],\n sysvel=row[\"SYSVEL\"],\n veltyp=row[\"VELTYP\"],\n veldef=row[\"VELDEF\"],\n pmra=row[\"PMRA\"],\n pmdec=row[\"PMDEC\"],\n pmra_err=row[\"PMRA_ERR\"],\n pmdec_err=row[\"PMDEC_ERR\"],\n parallax=row[\"PARALLAX\"],\n para_err=row[\"PARA_ERR\"],\n spectyp=row[\"SPECTYP\"],\n )\n dataobj.target = np.append(dataobj.target, target)\n targetmap[target_id] = target\n elif hdu.name == \"OI_ARRAY\":\n if dataobj.array == None:\n dataobj.array = {}\n arrname = header[\"ARRNAME\"]\n frame = header[\"FRAME\"]\n arrxyz = np.array(\n [header[\"ARRAYX\"], header[\"ARRAYY\"], header[\"ARRAYZ\"]]\n )\n dataobj.array[arrname] = OI_ARRAY(frame, arrxyz, stations=data)\n # Save the sta_index for each array, as we will need it\n # later to match measurements to stations\n sta_indices[arrname] = data.field(\"sta_index\")\n\n # Then get any science measurements\n for hdu in hdulist:\n header = hdu.header\n data = hdu.data\n if hdu.name in (\"OI_VIS\", \"OI_VIS2\", \"OI_T3\"):\n if \"ARRNAME\" in header.keys():\n arrname = header[\"ARRNAME\"]\n else:\n arrname = None\n if arrname and dataobj.array:\n array = dataobj.array[arrname]\n else:\n array = None\n wavelength = dataobj.wavelength[header[\"INSNAME\"]]\n if hdu.name == \"OI_VIS\":\n for row in data:\n date = header[\"DATE-OBS\"].split(\"-\")\n timeobs = datetime.datetime(\n int(date[0]), int(date[1]), int(date[2])\n ) + datetime.timedelta(seconds=np.around(row.field(\"TIME\"), 2))\n int_time = row.field(\"INT_TIME\")\n visamp = np.reshape(row.field(\"VISAMP\"), -1)\n visamperr = np.reshape(row.field(\"VISAMPERR\"), -1)\n visphi = np.reshape(row.field(\"VISPHI\"), -1)\n visphierr = np.reshape(row.field(\"VISPHIERR\"), -1)\n if \"CFLUX\" in row.array.names:\n cflux = np.reshape(row.field(\"CFLUX\"), -1)\n else:\n cflux = None\n if \"CFLUXERR\" in row.array.names:\n cfluxerr = np.reshape(row.field(\"CFLUXERR\"), -1)\n else:\n cfluxerr = None\n flag = np.reshape(row.field(\"FLAG\"), -1)\n ucoord = row.field(\"UCOORD\")\n vcoord = row.field(\"VCOORD\")\n target = targetmap[row.field(\"TARGET_ID\")]\n if array:\n sta_index = row.field(\"STA_INDEX\")\n s1 = array.station[sta_indices[arrname] == sta_index[0]][0]\n s2 = array.station[sta_indices[arrname] == sta_index[1]][0]\n station = [s1, s2]\n else:\n station = [None, None]\n dataobj.vis = np.append(\n dataobj.vis,\n OI_VIS(\n timeobs=timeobs,\n int_time=int_time,\n visamp=visamp,\n visamperr=visamperr,\n visphi=visphi,\n visphierr=visphierr,\n flag=flag,\n ucoord=ucoord,\n vcoord=vcoord,\n wavelength=wavelength,\n target=target,\n array=array,\n station=station,\n cflux=cflux,\n cfluxerr=cfluxerr,\n ),\n )\n elif hdu.name == \"OI_VIS2\":\n for row in data:\n date = header[\"DATE-OBS\"].split(\"-\")\n timeobs = datetime.datetime(\n int(date[0]), int(date[1]), int(date[2])\n ) + datetime.timedelta(seconds=np.around(row.field(\"TIME\"), 2))\n int_time = row.field(\"INT_TIME\")\n vis2data = np.reshape(row.field(\"VIS2DATA\"), -1)\n vis2err = np.reshape(row.field(\"VIS2ERR\"), -1)\n flag = np.reshape(row.field(\"FLAG\"), -1)\n ucoord = row.field(\"UCOORD\")\n vcoord = row.field(\"VCOORD\")\n target = targetmap[row.field(\"TARGET_ID\")]\n if array:\n sta_index = row.field(\"STA_INDEX\")\n s1 = array.station[sta_indices[arrname] == sta_index[0]][0]\n s2 = array.station[sta_indices[arrname] == sta_index[1]][0]\n station = [s1, s2]\n else:\n station = [None, None]\n dataobj.vis2 = np.append(\n dataobj.vis2,\n OI_VIS2(\n timeobs=timeobs,\n int_time=int_time,\n vis2data=vis2data,\n vis2err=vis2err,\n flag=flag,\n ucoord=ucoord,\n vcoord=vcoord,\n wavelength=wavelength,\n target=target,\n array=array,\n station=station,\n ),\n )\n elif hdu.name == \"OI_T3\":\n for row in data:\n date = header[\"DATE-OBS\"].split(\"-\")\n timeobs = datetime.datetime(\n int(date[0]), int(date[1]), int(date[2])\n ) + datetime.timedelta(seconds=np.around(row.field(\"TIME\"), 2))\n int_time = row.field(\"INT_TIME\")\n t3amp = np.reshape(row.field(\"T3AMP\"), -1)\n t3amperr = np.reshape(row.field(\"T3AMPERR\"), -1)\n t3phi = np.reshape(row.field(\"T3PHI\"), -1)\n t3phierr = np.reshape(row.field(\"T3PHIERR\"), -1)\n flag = np.reshape(row.field(\"FLAG\"), -1)\n u1coord = row.field(\"U1COORD\")\n v1coord = row.field(\"V1COORD\")\n u2coord = row.field(\"U2COORD\")\n v2coord = row.field(\"V2COORD\")\n target = targetmap[row.field(\"TARGET_ID\")]\n if array:\n sta_index = row.field(\"STA_INDEX\")\n s1 = array.station[sta_indices[arrname] == sta_index[0]][0]\n s2 = array.station[sta_indices[arrname] == sta_index[1]][0]\n s3 = array.station[sta_indices[arrname] == sta_index[2]][0]\n station = [s1, s2, s3]\n else:\n station = [None, None, None]\n dataobj.t3 = np.append(\n dataobj.t3,\n OI_T3(\n timeobs=timeobs,\n int_time=int_time,\n t3amp=t3amp,\n t3amperr=t3amperr,\n t3phi=t3phi,\n t3phierr=t3phierr,\n flag=flag,\n u1coord=u1coord,\n v1coord=v1coord,\n u2coord=u2coord,\n v2coord=v2coord,\n wavelength=wavelength,\n target=target,\n array=array,\n station=station,\n ),\n )\n return dataobj", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def __init__(self):\r\n self.indices = {}\r\n self.data = []\r\n self.len = 0", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n self.ntotal = self.nelements * nnodes * 2\n if self.is_sort1:\n ntimes = self.ntimes\n ntotal = self.ntotal\n else:\n #print(\"ntimes=%s nelements=%s ntotal=%s nnodes=%s\" % (self.ntimes, self.nelements, self.ntotal, nnodes))\n ntimes = self.ntotal\n ntotal = self.nelements // 2\n #self.ntotal = ntotal\n #print(\"**BEND: ntimes=%s ntotal=%s\" % (ntimes, ntotal))\n #self.ntotal = nelements * nnodes * 2\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self._times = np.zeros(ntimes, dtype=dtype)\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((ntotal, 2), dtype=idtype)\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.data = np.zeros((ntimes, ntotal, 9), dtype=fdtype)", "def __init__(self):\n self.data = []\n self.record = {}", "def set_content(self, offset: int, content_dict: dict):\n super().set_content(offset, content_dict)\n\n # Get the list of files from the properties and determine basenames from the path, which\n # will be used as name in the readfs\n for file in content_dict.get('properties').get('files'):\n self.file_paths.append([os.path.basename(file), file])\n\n # First declare the sub-sections so that the right offsets are computed\n\n # Top structure which will gather all sub-sections\n self.top_struct = CStructParent('readfs', parent=self)\n\n # Main header for readfs size and number of files\n self.header = ReadfsHeader('header', parent=self.top_struct)\n\n # One header per file containig file size, name and flash offset\n for i, path in enumerate(self.file_paths):\n filename, filepath = path\n self.file_headers.append(ReadfsFileHeader(f'file{i} header', len(filename)+1,\n parent=self.top_struct))\n\n # File contents\n for i, path in enumerate(self.file_paths):\n filename, filepath = path\n self.files.append(ReadfsFile(f'file{i}', os.path.getsize(filepath),\n parent=self.top_struct))\n\n\n\n # Now that the offsets have been computed, we can fill-in the various fields\n\n # Main header\n header_size = self.header.get_size()\n for file_header in self.file_headers:\n header_size += file_header.get_size()\n\n self.header.set_field('fs_size', header_size)\n self.header.set_field('nb_files', len(self.files))\n\n for i, path in enumerate(self.file_paths):\n filename, filepath = self.file_paths[i]\n file_header = self.file_headers[i]\n file = self.files[i]\n\n # Per-file header\n file_header.set_field('offset', file.get_offset() - self.get_offset())\n file_header.set_field('file_size', os.path.getsize(filepath))\n file_header.set_field('name_len', len(filename)+1)\n file_header.set_field('name', filename.encode('utf-8') + bytes([0]))\n\n # Per-file content\n with open(filepath, 'rb') as file_desc:\n file.set_field('data', file_desc.read())", "def prepare_sFlat_data(notes, track_range = None, enc_shape = (1,), ip_memory = 32, depth = 2, spread = 16):\n track_range = track_range if track_range else [0, 1]\n \n data_in, data_out = [], []\n \n for tr in range(track_range[1] - track_range[0]):\n # trk = tr - track_range[0]\n nt = notes[tr]\n data_in.append([])\n data_out.append([])\n lent = len(notes[tr])\n # for j in range(lent):\n le = len(nt)\n \n chunks_count = le // ip_memory + 1\n \n for i in range(le - ip_memory):\n start, end = i, i + ip_memory\n buf_size = ip_memory if end < le else le - start # only reason due to logic below else not needed\n buffer = numpy.zeros((ip_memory, depth,))\n # print(\"buff shape : \", buffer.shape)\n buffer[:buf_size, :] = nt[start : start + buf_size]\n\n data_in[tr].append(buffer)\n \n data_out[tr].append((nt[end] if end < le else notes[0][0]))\n \n # if track_range[1]- track_range[0] == 1: #is scalar, no track\n # data_in, data_out = data_in[0], data_out[0]\n \n\n return numpy.array(data_in), numpy.array(data_out)", "def __init__(self):\n\n\t\t# PDB fields\n\t\tself.s_name = \"\"\t\t\t\t# Name of the structure\n\t\tself.l_s_leading_data = []\t\t# PDB information written above the atom properties\n\t\tself.l_s_trailing_data = []\t\t# PDB information written under the atom properties\n\n\t\t# Structural fields\n\t\tself.i_atom_count = 0\t\t\t# Number of atoms in the structure\n\t\tself.a_atoms = None\t\t\t\t# Array of atoms properties\n\t\tself.a_max_coord = None\t\t\t# Maximal coordinates for each axis\n\t\tself.a_min_coord = None\t\t\t# Minimal coordinates for each axis\n\n\t\t# Grid fields\n\t\tself.a_grid = None\t\t\t\t# 3D grid containing the structure\n\t\tself.l_l_elements = None\t\t# Set of atoms contained in the structure\n\n\t\t# Solubilization fields\n\t\tself.o_tree = None\t\t\t\t\t\t# A KDTree object representing the exact placement of atoms, used for distance determination\n\n\t\t# Comparison fields\n\t\tself.b_loaded = False\t\t# Keeps tracks of the state of the structure\n\n\t\t# Ligands fields\n\t\tself.l_o_ligands = []\t\t# A list for the ligands\n\n\t\t# Pocket fields\n\t\tself.l_i_pocket_residues = []\t\t# List of the residues included in the pocket\n\t\tself.a_pocket_atoms = None\t\t\t# Array of the pocket atoms properties\n\t\tself.a_pocket_grid = None\t\t\t# 3D grid containing the pocket\n\n\t\t# Miscellaneous fields\n\t\tself.a_min_coord = None\t\t# Minimum coordinates of the structure\n\t\tself.a_max_coord = None\t\t# Maximum coordinates of the structure\n\t\tself.f_mass = 0.0\t\t\t# Mass of the structure", "def setUp(self):\n\n serial_times = {295: '1971-07-31T01:24:11.754',\n 296: '1971-07-31T01:24:36.970',\n 297: '1971-07-31T01:25:02.243',\n 298: '1971-07-31T01:25:27.457',\n 299: '1971-07-31T01:25:52.669',\n 300: '1971-07-31T01:26:17.923'}\n self.serials = ['APOLLO15/METRIC/{}'.format(i) for i in serial_times.values()]\n\n\n x = list(range(5))\n y = list(range(5))\n pid = [0,0,1,1,1]\n idx = pid\n serials = [self.serials[0], self.serials[1], self.serials[2],\n self.serials[2], self.serials[3]]\n\n\n columns = ['x', 'y', 'idx', 'pid', 'nid']\n self.data_length = 5\n\n data = [x,y, idx, pid, serials]\n\n self.creation_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n cnet = C(data, index=columns).T\n\n io_controlnetwork.to_isis('test.net', cnet, mode='wb', targetname='Moon')\n\n self.header_message_size = 85\n self.point_start_byte = 65621", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n if self.is_built:\n return\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n self.ntotal = self.nelements * nnodes * 2\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n self._times = np.zeros(self.ntimes, 'float32')\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((self.ntotal, 2), 'int32')\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf]\n self.data = np.zeros((self.ntimes, self.ntotal, 5), 'complex64')", "def _populate_shapes(self):\n point = Point(self.position.x, self.position.y)\n point_buffered = point.buffer(self.radius + self.buffer, 3)\n self._point_shape = point.buffer(self.radius, 3)\n \n scale = 10.0\n font = truetype(self.fontfile, int(self.fontsize * scale), encoding='unic')\n\n x, y = self.position.x, self.position.y\n w, h = font.getsize(self.name)\n w, h = w/scale, h/scale\n \n for placement in placements:\n label_shape = point_label_bounds(x, y, w, h, self.radius, placement)\n mask_shape = label_shape.buffer(self.buffer, 2).union(point_buffered)\n \n self._label_shapes[placement] = label_shape\n self._mask_shapes[placement] = mask_shape\n \n unionize = lambda a, b: a.union(b)\n self._label_footprint = reduce(unionize, self._label_shapes.values())\n self._mask_footprint = reduce(unionize, self._mask_shapes.values())\n \n # number of pixels from the top of the label based on the bottom of a \".\"\n self._baseline = font.getmask('.').getbbox()[3] / scale", "def read_makerNotes(makerNotes, values=VALUES, i=0, starting_header=val_list[0]):\n nextHeader = starting_header\n while True:\n if makerNotes[i] == nextHeader:\n pos = val_list.index(nextHeader)\n header = key_list[pos]\n print(f'found header {hex(int(makerNotes[i]))} ({header}) at position {i}')\n type = makerNotes[i+2] # read data type\n num = int(struct.unpack(b\"<L\", makerNotes[i+4:i+8])[0]) # read num of occurances\n dataType = b\"<\"\n if type == 0x01: # pad\n dataType = dataType + (b\"x\" * num)\n size = num * 1\n data = struct.unpack(dataType, makerNotes[i+8:i+8+size])\n elif type == 0x02: # char\n dataType = dataType + (b\"c\" * num)\n size = num * 1\n data = struct.unpack(dataType, makerNotes[i+8:i+8+size])\n data = ' '.join([d.decode() for d in data])[:-1]\n elif type == 0x0b: # float\n dataType = dataType + (b\"f\" * num)\n size = num * 4\n data = struct.unpack(dataType, makerNotes[i+8:i+8+size])[num-1]\n else:\n raise ValueError(f'cannot identify type with hexadecimal representation: {type}')\n values[header] = data\n i = i + 8 + size - 1\n nextHeader += 1\n sumNones = sum([1 for key, val in values.items() if val is None])\n if sumNones == 0:\n break\n i+=1\n return values", "def __init__(self, data, parent=None):\n self.parent = parent\n self.bootable_flag = struct.unpack(\"<B\", data[0])[0]\n self.start_chs_address = struct.unpack(\"<BH\", data[1:4])[0]\n self.partition_type = struct.unpack(\"<B\", data[4])[0]\n self.end_chs_address = struct.unpack(\"<BH\", data[5:8])[0]\n # FIXME Check to see how the lba address bytes are used\n if self.get_type() == 'Empty':\n self.lba = 0\n else:\n self.lba = struct.unpack(\"<L\", data[8:12])[0]\n\n self.size = struct.unpack(\"<L\", data[12:16])[0]", "def __initialize(self):\n\t\tself.matrix = [None] * self.size\n\t\tself.__get_log_values()\n\t\tfor row in range(self.size):\n\t\t\tself.matrix[row] = [None] * self.size\n\t\tmax_len = self.__get_max_length()\n\t\tdata = self.__get_data(self.text,max_len)\n\t\tmpoly = self.__get_mpoly(data)\n\t\tgpoly = self.__get_gploy()\n\t\tself.final_data = self.__get_final_data(mpoly,gpoly)\n\t\tself.__set_FIP(FP_num = 1)\n\t\tself.__set_FIP(FP_num = 2)\n\t\tself.__set_FIP(FP_num = 3)\n\t\tself.__set_AP()\n\t\tself.__fill_format_info_area()\n\t\tself.__set_TP()", "def build_data(self):\n\n _header_ = self._header_ + 'build_data(): '\n\n if self.verbose:\n print(_header_ + 'Building data for %s ...' % self.p_data)\n\n self.read_data()\n self.map_data()\n self.partition_data()\n self.composition()\n\n if self.verbose:\n print(_header_ + 'Build complete.')\n\n return self", "def processData(self,data):\n #print 'I GOT DATA',data,[0],data[1]\n # Check for valid data (not null or empty string)\n #print '**************NOTIFICATION***************',type(_RobotCommunicator.WALL_HEADER),type(data[0])\n if data:\n #print '**************NOTIFICATION***************',type(_RobotCommunicator.WALL_HEADER),type(data[0]),_RobotCommunicator.WALL_HEADER==data[0]\n\n # Check header and assign data appropriately\n # TODO: Check length of data for validity\n #print 'Header',data[0]\n if data[0] == _RobotCommunicator.POSE_HEADER:\n self.pose = unpack(_RobotCommunicator.POSE_FORMAT,data[1:])\n elif data[0] == _RobotCommunicator.SENSOR_HEADER:\n\n #for i in range(1, len(data)-1, 2):\n index= unpack('B',data[1])\n value = unpack('?',data[2])\n # Update old values or create new sensor-value pair\n self.sensors[index[0]] = value[0]\n #print 'in csharp: ',[index,value]\n\n elif data[0] == _RobotCommunicator.WAYPOINT_HEADER:\n self.waypoints = [] # Clear old waypoints\n for i in range(1, len(data)-16, 16):\n x,y = unpack(_RobotCommunicator.WAYPOINT_FORMAT,\n data[i:i+15])\n self.waypoints.append((x,y))\n elif data[0] == _RobotCommunicator.DIRECTION_HEADER:\n self.direction = unpack(_RobotCommunicator.DIRECTION_FORMAT,\n data[1:])\n elif data[0] == _RobotCommunicator.ACTUATOR_HEADER:\n self.actuators = [] # Clear old actuator commands for i in range(1, len(data)-1):\n self.actuators.append(unpack(\n _RobotCommunicator.ACTUATOR_FORMAT,data[i]))\n elif data[0] == _RobotCommunicator.WALL_HEADER:\n self.walls = {} # Clear old wall entries\n index = unpack('B', data[1])\n x1,y1,x2,y2 = unpack(_RobotCommunicator.WALL_FORMAT,data[2:34])\n self.walls = (x1,y1,x2,y2)\n #print '**************Coordinates***************',(x1,y1,x2,y2)\n print '****self.walls*********',self.walls\n elif data[0] == _RobotCommunicator.OBS_HEADER:\n index = unpack('B', data[1])\n add,x1,y1 = unpack(_RobotCommunicator.OBS_FORMAT,data[2:26])\n #print '***********self.obs*************'+','.join(map(str,[add,x1,y1]))\n self.obs = [add,x1,round(y1,2)]\n if add == 1:\n a = PolyShapes.Rectangle(self.resolX,self.resolY)\n a.shift(x1,y1)\n self.obsPoly += a\n self.receiveObs = True\n #print \"add obstacle:\" + str(x1) + \",\"+ str(y1)\n elif add == 4:\n if x1 == 0:\n self.STOP = True\n else:\n self.STOP = False\n else:\n a = PolyShapes.Rectangle(self.resolX,self.resolY)\n a.shift(x1,y1)\n self.obsPoly -= a\n self.receiveObs = True\n #print \"del obstacle:\"+ str(x1) + \",\"+ str(y1)\n\n\n else:\n print \"Unexpected or corrupted data packet received.\"", "def __init__(self, data):\n # check if dataset contains time information\n # (fetched from bootloader storage)\n if len(data) == 61:\n (_, seconds, minutes, hours, days, months, years) = struct.unpack(\n '<55sBBBBBB', data)\n self.date = datetime(2000 + years, months, days, hours, minutes,\n seconds)\n\n # Only parse preceding data\n data = data[:55]\n power = [0, 0]\n kWh = [0, 0]\n MWh = [0, 0]\n (_, digital, speed, active, power[0], kWh[0], MWh[0], power[1], kWh[1],\n MWh[1]) = struct.unpack('<32sH4sBLHHLHH', data)\n\n analog = struct.unpack(\n '<{}{}'.format('H' * 16, 'x' * (len(data) - 32)), data)\n\n self.analog = {}\n for channel in range(0, 16):\n self.analog[channel + 1] = round(\n self._convert_analog(analog[channel]), 3)\n\n self.digital = {}\n for channel in range(0, 16):\n self.digital[channel + 1] = self._convert_digital(digital, channel)\n\n '''\n self.speed = {}\n for channel in range(0, 4):\n self.speed[channel + 1] = round(\n self._convert_speed(speed[channel]), 3)\n \n\n self.energy = {}\n for channel in range(0, 2):\n self.energy[channel + 1] = round(\n self._convert_energy(MWh[channel], kWh[channel], active,\n channel), 3)\n \n\n self.power = {}\n for channel in range(0, 2):\n self.power[channel + 1] = round(\n self._convert_power(power[channel], active, channel), 3)\n '''", "def _get_obs(self):\n pos = []\n z = []\n for i in range(params['memory_size']):\n if self._step - i * params['memory_size'] > 1:\n pos.append(self._track_item['joint_pos'][self._step - i * params['memory_size'] - 1].copy())\n z.append(self._track_item['z'][self._step - i * params['memory_size'] - 1].copy())\n else:\n pos.append(self._track_item['joint_pos'][0].copy())\n if len(self._track_item['z']) < 1:\n z.append(self.z.copy())\n else:\n z.append(self._track_item['z'][0].copy())\n out = pos\n if params['observation_version'] == 1:\n out += z\n ob = {\n 'observation' : np.concatenate(out, -1),\n 'desired_goal' : self.desired_goal.copy(),\n 'achieved_goal' : self.achieved_goal.copy(),\n 'z' : self.z.copy()\n }\n return ob", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.sensor_pose_on_robot is None:\n self.sensor_pose_on_robot = geometry_msgs.msg.Pose()\n if self.sensed_data is None:\n self.sensed_data = None\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 96\n (_x.sensor_pose_on_robot.position.x, _x.sensor_pose_on_robot.position.y, _x.sensor_pose_on_robot.position.z, _x.sensor_pose_on_robot.orientation.x, _x.sensor_pose_on_robot.orientation.y, _x.sensor_pose_on_robot.orientation.z, _x.sensor_pose_on_robot.orientation.w, _x.min_sensor_distance, _x.max_sensor_distance, _x.sensor_std_range, _x.sensor_std_yaw, _x.sensor_std_pitch,) = _get_struct_12d().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.sensed_data = []\n for i in range(0, length):\n val1 = mrpt_msgs.msg.SingleRangeBearingObservation()\n _x = val1\n start = end\n end += 28\n (_x.range, _x.yaw, _x.pitch, _x.id,) = _get_struct_3di().unpack(str[start:end])\n self.sensed_data.append(val1)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def get_map_notes(map_json, **kwargs):\n length = kwargs.get(\"length\", -1)\n divisor = kwargs.get(\"divisor\", 4)\n tick_times = get_map_timing_array(map_json, length=length, divisor=divisor)\n\n objs = map_json[\"obj\"]\n obj_times = list(map(lambda obj: obj[\"time\"], objs))\n\n # 1 for circle, 2 for slider, 3 for spinner\n def get_note_type(obj):\n if not obj:\n return 0\n if obj[\"type\"] & 2:\n return 2\n elif obj[\"type\"] & 8:\n return 3\n return 1\n\n po = 0\n note_max_wait_time = kwargs.get(\"note_max_wait_time\", 1000)\n start_time = obj_times[0] - note_max_wait_time\n last_obj_time = start_time\n sliding = 0\n slider_end_time = 0\n spinning = 0\n spinner_end_time = 0\n data = []\n flow_data = []\n\n # constant multipliers and subtractions\n tlen_mp = 1/500\n tlen_s = 1\n bpm_mp = 1/120\n bpm_s = 1\n slen_mp = 1/150\n slen_s = 1\n\n # tick count from start of uninherited timing section\n uts_i = 0\n\n # tick is timestamp here\n for i, tick in enumerate(tick_times):\n\n if is_uts_begin(map_json, tick):\n uts_i = 0\n else:\n uts_i += 1\n\n # Attach extra vars at the end of each note data row\n tlen = get_tick_len(map_json, tick)\n bpm = 60000 / tlen\n slen = get_slider_len(map_json, tick)\n ex1 = tlen * tlen_mp - tlen_s\n ex2 = bpm * bpm_mp - bpm_s\n ex3 = slen * slen_mp - slen_s\n\n while obj_times[po] < tick - 5 and po < len(obj_times) - 1:\n po += 1\n if obj_times[po] >= tick - 5 and obj_times[po] <= tick + 5: # found note\n last_obj_time = tick\n note_type = get_note_type(objs[po])\n\n # calculate momentum\n if po >= 1:\n momentum = get_momentum(objs[po], objs[po-1], slen/tlen)\n else:\n momentum = 0\n\n # flow data\n if po >= 1:\n input_vector = get_input_vector(objs[po], objs[po-1])\n output_vector = get_output_vector(objs[po], objs[po-1])\n else:\n input_vector = [0, 0]\n output_vector = [0, 0]\n if input_vector is None or input_vector[0] is None or input_vector[1] is None:\n input_vector = [0, 0]\n if output_vector is None or output_vector[0] is None or output_vector[1] is None:\n output_vector = [0, 0]\n\n # end point\n endpoint = get_end_point(objs[po])\n flow_data.append([uts_i, tick, note_type, objs[po][\"x\"], objs[po][\"y\"], input_vector[0],\n input_vector[1], output_vector[0], output_vector[1], endpoint[0], endpoint[1]])\n\n # put data\n if note_type == 1:\n spinning = 0\n sliding = 0\n elif note_type == 2:\n sliding = 1\n slider_end_time = objs[po][\"sliderData\"][\"endTime\"]\n elif note_type == 3:\n spinning = 1\n spinner_end_time = objs[po][\"spinnerEndTime\"]\n # because the spinner sometimes get over 3 secs\n last_obj_time = spinner_end_time\n\n # TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3\n data.append([uts_i, tick, 1, note_type, sliding,\n spinning, momentum, ex1, ex2, ex3])\n elif spinning == 1:\n if tick >= spinner_end_time - 5:\n spinning = 0\n data.append([uts_i, tick, 1, 5, 0, 0, 0, ex1, ex2, ex3])\n else:\n data.append([uts_i, tick, 0, 0, 0, 1, 0, ex1, ex2, ex3])\n elif sliding == 1:\n if tick >= slider_end_time - 5:\n sliding = 0\n data.append([uts_i, tick, 1, 4, 0, 0, 0, ex1, ex2, ex3])\n else:\n data.append([uts_i, tick, 0, 0, 1, 0, 0, ex1, ex2, ex3])\n else: # not found\n if tick - last_obj_time < note_max_wait_time and tick >= start_time:\n data.append([uts_i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3])\n return data, flow_data", "def __init__(self, data):\n # add play_guid as it sometimes doesn't exist\n if 'play_guid' not in data:\n data['play_guid'] = ''\n # loop through data\n for x in data:\n # set information as correct data type\n mlbgame.object.setobjattr(self, x, data[x])", "def build(self, file_number, data):\n pass", "def deserialize(self, str):\n try:\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.obstacleinfo is None:\n self.obstacleinfo = nubot_common.msg.ObstaclesInfo()\n if self.oppinfo is None:\n self.oppinfo = nubot_common.msg.ObstaclesInfo()\n if self.robotinfo is None:\n self.robotinfo = None\n if self.ballinfo is None:\n self.ballinfo = None\n if self.coachinfo is None:\n self.coachinfo = nubot_common.msg.CoachInfo()\n if self.pass_cmd is None:\n self.pass_cmd = nubot_common.msg.PassCommands()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 12\n (_x.obstacleinfo.header.seq, _x.obstacleinfo.header.stamp.secs, _x.obstacleinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.obstacleinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.obstacleinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.obstacleinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.obstacleinfo.polar_pos.append(val1)\n _x = self\n start = end\n end += 12\n (_x.oppinfo.header.seq, _x.oppinfo.header.stamp.secs, _x.oppinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.oppinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.oppinfo.header.frame_id = str[start:end]\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.Point2d()\n _x = val1\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.oppinfo.polar_pos = []\n for i in range(0, length):\n val1 = nubot_common.msg.PPoint()\n _x = val1\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n self.oppinfo.polar_pos.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.robotinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.RobotInfo()\n _v12 = val1.header\n start = end\n end += 4\n (_v12.seq,) = _get_struct_I().unpack(str[start:end])\n _v13 = _v12.stamp\n _x = _v13\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v12.frame_id = str[start:end].decode('utf-8')\n else:\n _v12.frame_id = str[start:end]\n _x = val1\n start = end\n end += 28\n (_x.AgentID, _x.targetNum1, _x.targetNum2, _x.targetNum3, _x.targetNum4, _x.staticpassNum, _x.staticcatchNum,) = _get_struct_7i().unpack(str[start:end])\n _v14 = val1.pos\n _x = _v14\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v15 = val1.heading\n start = end\n end += 4\n (_v15.theta,) = _get_struct_f().unpack(str[start:end])\n start = end\n end += 4\n (val1.vrot,) = _get_struct_f().unpack(str[start:end])\n _v16 = val1.vtrans\n _x = _v16\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 9\n (_x.iskick, _x.isvalid, _x.isstuck, _x.isdribble, _x.current_role, _x.role_time,) = _get_struct_5Bf().unpack(str[start:end])\n val1.iskick = bool(val1.iskick)\n val1.isvalid = bool(val1.isvalid)\n val1.isstuck = bool(val1.isstuck)\n val1.isdribble = bool(val1.isdribble)\n _v17 = val1.target\n _x = _v17\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n self.robotinfo.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.ballinfo = []\n for i in range(0, length):\n val1 = nubot_common.msg.BallInfo()\n _v18 = val1.header\n start = end\n end += 4\n (_v18.seq,) = _get_struct_I().unpack(str[start:end])\n _v19 = _v18.stamp\n _x = _v19\n start = end\n end += 8\n (_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n _v18.frame_id = str[start:end].decode('utf-8')\n else:\n _v18.frame_id = str[start:end]\n start = end\n end += 4\n (val1.ballinfostate,) = _get_struct_i().unpack(str[start:end])\n _v20 = val1.pos\n _x = _v20\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _v21 = val1.real_pos\n _x = _v21\n start = end\n end += 8\n (_x.angle, _x.radius,) = _get_struct_2f().unpack(str[start:end])\n _v22 = val1.velocity\n _x = _v22\n start = end\n end += 8\n (_x.x, _x.y,) = _get_struct_2f().unpack(str[start:end])\n _x = val1\n start = end\n end += 2\n (_x.pos_known, _x.velocity_known,) = _get_struct_2B().unpack(str[start:end])\n val1.pos_known = bool(val1.pos_known)\n val1.velocity_known = bool(val1.velocity_known)\n self.ballinfo.append(val1)\n _x = self\n start = end\n end += 12\n (_x.coachinfo.header.seq, _x.coachinfo.header.stamp.secs, _x.coachinfo.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.coachinfo.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.coachinfo.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 54\n (_x.coachinfo.MatchMode, _x.coachinfo.MatchType, _x.coachinfo.TestMode, _x.coachinfo.pointA.x, _x.coachinfo.pointA.y, _x.coachinfo.pointB.x, _x.coachinfo.pointB.y, _x.coachinfo.angleA, _x.coachinfo.angleB, _x.coachinfo.idA, _x.coachinfo.idB, _x.coachinfo.kickforce, _x.pass_cmd.pass_id, _x.pass_cmd.catch_id, _x.pass_cmd.pass_pt.x, _x.pass_cmd.pass_pt.y, _x.pass_cmd.catch_pt.x, _x.pass_cmd.catch_pt.y, _x.pass_cmd.is_passout, _x.pass_cmd.is_dynamic_pass, _x.pass_cmd.is_static_pass, _x.pass_cmd.is_valid,) = _get_struct_3B4f2h3B2I4f4B().unpack(str[start:end])\n self.pass_cmd.is_passout = bool(self.pass_cmd.is_passout)\n self.pass_cmd.is_dynamic_pass = bool(self.pass_cmd.is_dynamic_pass)\n self.pass_cmd.is_static_pass = bool(self.pass_cmd.is_static_pass)\n self.pass_cmd.is_valid = bool(self.pass_cmd.is_valid)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def refresh(self):\n self.info = str(self.ser.readline())[2:-5].split(',')\n self.x, self.y, self.z, self.xa, self.ya, self.za, self.o = self.info", "def _make_observation(self) -> Dict[str, np.ndarray]:\n return {\n \"cur_pos\": np.array([self.cur_pos], dtype=int),\n }", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def deserialize(self, str):\n try:\n if self.base is None:\n self.base = rwrc12_msgs.msg.CellBase()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.base.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 20\n (_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z,) = _struct_5f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.base.points = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point32()\n _x = val1\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n self.base.points.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.base.intensity = struct.unpack(pattern, str[start:end])\n start = end\n end += 1\n (self.base.cost,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.label = str[start:end].decode('utf-8')\n else:\n self.base.label = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.mean_height, _x.mean_intensity,) = _struct_2f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, data, startLat, startLon, delta, numX, numY):\n self.data = data\n self.startLat = startLat\n self.startLon = startLon\n self.delta = delta\n self.xCells = numX\n self.yCells = numY", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def _get_data(\n self,\n vis_hdu,\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n lsts,\n lst_range,\n polarizations,\n blt_inds,\n phase_center_ids,\n catalog_names,\n keep_all_metadata,\n fix_old_proj,\n fix_use_ant_pos,\n ):\n # figure out what data to read in\n blt_inds, freq_inds, pol_inds, history_update_string = self._select_preprocess(\n antenna_nums,\n antenna_names,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n time_range,\n lsts,\n lst_range,\n polarizations,\n blt_inds,\n phase_center_ids,\n catalog_names,\n )\n\n if blt_inds is not None:\n blt_frac = len(blt_inds) / float(self.Nblts)\n else:\n blt_frac = 1\n\n if freq_inds is not None:\n freq_frac = len(freq_inds) * float(self.Nspws) / float(self.Nfreqs)\n else:\n freq_frac = 1\n\n if pol_inds is not None:\n pol_frac = len(pol_inds) / float(self.Npols)\n else:\n pol_frac = 1\n\n min_frac = np.min([blt_frac, freq_frac, pol_frac])\n\n if min_frac == 1:\n # no select, read in all the data\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :]\n else:\n # do select operations on everything except data_array, flag_array\n # and nsample_array\n self._select_by_index(\n blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata\n )\n\n # just read in the right portions of the data and flag arrays\n if blt_frac == min_frac:\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :, :]\n if freq_frac < 1:\n raw_data_array = raw_data_array[:, :, freq_inds, :, :]\n if pol_frac < 1:\n raw_data_array = raw_data_array[:, :, :, pol_inds, :]\n elif freq_frac == min_frac:\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[:, :, :, :, freq_inds, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[:, :, :, freq_inds, :, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :, :]\n\n if blt_frac < 1:\n raw_data_array = raw_data_array[blt_inds, :, :, :, :]\n if pol_frac < 1:\n raw_data_array = raw_data_array[:, :, :, pol_inds, :]\n else:\n if vis_hdu.header[\"NAXIS\"] == 7:\n raw_data_array = vis_hdu.data.data[:, :, :, :, :, pol_inds, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]\n assert self.Nspws == raw_data_array.shape[1]\n else:\n # in many uvfits files the spw axis is left out,\n # here we put it back in so the dimensionality stays the same\n raw_data_array = vis_hdu.data.data[:, :, :, :, pol_inds, :]\n raw_data_array = raw_data_array[:, 0, 0, :, :, :]\n raw_data_array = raw_data_array[:, np.newaxis, :, :, :]\n\n if blt_frac < 1:\n raw_data_array = raw_data_array[blt_inds, :, :, :, :]\n if freq_frac < 1:\n raw_data_array = raw_data_array[:, :, freq_inds, :, :]\n\n assert len(raw_data_array.shape) == 5\n\n # Reshape the data array to be the right size if we are working w/ multiple\n # spectral windows to be 'flex_spw' compliant\n if self.Nspws > 1:\n raw_data_array = np.reshape(\n raw_data_array,\n (self.Nblts, 1, self.Nfreqs, self.Npols, raw_data_array.shape[4]),\n )\n\n # FITS uvw direction convention is opposite ours and Miriad's.\n # So conjugate the visibilities and flip the uvws:\n self.data_array = (\n raw_data_array[:, :, :, :, 0] - 1j * raw_data_array[:, :, :, :, 1]\n )\n self.flag_array = raw_data_array[:, :, :, :, 2] <= 0\n self.nsample_array = np.abs(raw_data_array[:, :, :, :, 2])\n\n if fix_old_proj:\n self.fix_phase(use_ant_pos=fix_use_ant_pos)", "def _prepare(self):\n # Time list\n self.time_list = []\n # Distance array\n if self._fxn[0] is True:\n self.res_dists, self.res_keys = build_reslist_dict(self._rpl)\n\n # Distance between alpha carbons\n if self._fxn[1] is True:\n self.ca_dists, self.ca_keys = build_reslist_dict(self._rpl)\n\n # Distance between resid center of mass\n if self._fxn[2] is True:\n self.cm_dists, self.cm_keys = build_reslist_dict(self._rpl)\n\n # Distance between resid center of geometry\n if self._fxn[3] is True:\n self.cg_dists, self.cg_keys = build_reslist_dict(self._rpl)", "def func_init(self):\n self.points.set_data([], [])\n for line in self.lines:\n line.set_data([],[])\n self.annotation.set_text('')\n\n return tuple(self.lines) + (self.points, self.annotation)", "def __init__(self, data_id, course_fields, speed_fields, heading_fields,\n wind_dir_fields, wind_speed_fields,\n update_on_fields=None,\n zero_line_reference=0,\n convert_wind_factor=1,\n convert_speed_factor=1,\n output_nmea=False):\n super().__init__(input_format=formats.Python_Record,\n output_format=formats.Text)\n self.data_id = data_id\n self.course_fields = course_fields.split(',')\n self.speed_fields = speed_fields.split(',')\n self.heading_fields = heading_fields.split(',')\n self.wind_dir_fields = wind_dir_fields.split(',')\n self.wind_speed_fields = wind_speed_fields.split(',')\n\n if update_on_fields:\n self.update_on_fields = update_on_fields.split(',')\n else:\n self.update_on_fields = self.wind_dir_fields\n self.zero_line_reference = zero_line_reference\n\n self.convert_wind_factor = convert_wind_factor\n self.convert_speed_factor = convert_speed_factor\n self.output_nmea = output_nmea\n \n self.course_val = None\n self.speed_val = None\n self.heading_val = None\n self.wind_dir_val = None\n self.wind_speed_val = None\n\n self.last_timestamp = 0", "def _build_parsed_values(self):\n\n # \n # Generate a velocity data particle.\n # Note that raw_data already contains the individual fields\n # extracted and unpacked from the velocity data record.\n #\n global flags\n particle = []\n field = 0\n for flag in range(0, FLAG_RECORD_SIZE):\n #\n # If the flags indicated that this field is to be expected,\n # store the next unpacked value into the data particle.\n #\n key = VEL3D_PARAMETERS[flag][INDEX_KEY]\n if flags[flag]:\n if flag == INDEX_FLAG_Time:\n #\n # This returns a tuple, but particle wants a list.\n #\n time_array = self.raw_data[field:field + OUTPUT_TIME_SIZE]\n\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: list(time_array)})\n field += OUTPUT_TIME_SIZE\n else:\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: self.raw_data[field]})\n field += 1\n\n #\n # If flags indicate that this field is not present,\n # output a value of None.\n #\n else:\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: None})\n\n return particle", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def prepare_data(self, context_size, model_name):\n self.context_size = context_size\n data_x = []\n data_y = []\n oob = self.word2idx['OOB']\n\n for item in self.docs:\n data = [oob] * context_size + self.doc2token(item) + [oob] * context_size #padding\n for i in range(context_size, len(data) - context_size):\n data_x.append(data[i - context_size: i] + data[i + 1: i + context_size + 1])\n data_y.append(data[i])\n \n if model_name.lower() == 'skipgram':\n data_x, data_y = data_y, data_x\n self.data_x = Variable(torch.LongTensor(data_x))\n self.data_y = Variable(torch.LongTensor(data_y))\n logging.info(f'data preprocessed, data shape: {self.data_x.shape}, {self.data_y.shape}')", "def _build_accessor(bufferview, ele_type, comptype_id, count, max_vals, min_vals, byte_offset, normalized):\n normalized = None if not normalized else normalized\n\n new_accessor = {\n \"bufferView\": bufferview,\n \"componentType\": comptype_id,\n \"type\": ele_type,\n \"count\": count,\n }\n\n properties_keys = [\"byteOffset\", \"normalized\", \"max\", \"min\"]\n properties_values = [byte_offset, normalized, max_vals, min_vals]\n\n for key, val in zip(properties_keys, properties_values):\n if val is not None:\n new_accessor[key] = val\n\n return new_accessor", "def init(self):\n logger.info(mm_cnofs.ackn_str)\n self.acknowledgements = mm_cnofs.ackn_str\n self.references = '\\n'.join((mm_cnofs.refs['mission'],\n mm_cnofs.refs['vefi']))\n\n return", "def _read_info(self):\n my_filelines = self.file_lines\n info = dict()\n\n for i, line in enumerate(my_filelines):\n if line.startswith(\"VEHICLE\"):\n vehicle_pro_start = i + 2\n elif line.startswith(\"CUSTOMER\"):\n customer_pro_start = i + 3\n\n elif line.startswith(\"NUMBER\"):\n splited = line.split(' ')\n info[splited[0]] = 0\n info[splited[-1]] = 0\n return info, (vehicle_pro_start, customer_pro_start)", "def _init():\n line.set_data([], [])\n return line,", "def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict", "def init_data_array(self, mess = None): \n if self.verbose > 1:\n print(\"MultiLinearSpectra.init_data_array()\") \n \n if mess is None:\n if self.mess is None:\n warnings.warn(\"MultiLinearSpectra.init_data_array(): no data to initialize\")\n return None\n else:\n self.mess = mess\n \n\n \n \n for m in range(len(self.mess)):\n \n self.mess[m][\"index\"] = m\n \n kwargs = {}\n for k, v in self.mess[m].items():\n kwargs[k] = v\n \n if self.mess[m][\"class\"] == \"PASGas\" and flag_ST:\n self.mess[m][\"object\"] = PASG.PASGas(verbose = self.verbose, **kwargs)\n\n elif self.mess[m][\"class\"] == \"PASLiquid\" and flag_ST:\n self.mess[m][\"object\"] = PASL.PASLiquid(verbose = self.verbose, **kwargs)\n\n\n # x_unit = self.mess[0].x_unit\n # y_unit = self.mess[0].y_unit\n\n # for m in range(1, len(self.mess)):\n # if x_unit != self.mess[m].x_unit:\n # self.mess.x_unit", "def _setup_metadata(self):\n # loom_metadata is what we use to pass all the information about\n # the loom (max_depth, which typeshapes are supported, and the signatures of\n # the LoomOps) to scheduler.cc\n loom_metadata = loom_pb2.LoomMetadata()\n loom_metadata.max_depth = self._max_depth\n for ts, tensor_names in zip(\n self._type_shapes, self._ts_idx_to_tensor_names):\n type_shape_metadata = loom_metadata.type_shape_metadata.add()\n type_shape_metadata.dtype = ts.dtype_enum\n type_shape_metadata.shape.extend(ts.shape)\n type_shape_metadata.tag = ts.tag\n type_shape_metadata.name = str(ts) # Debug string.\n type_shape_metadata.tensor_names.extend(tensor_names)\n type_shape_metadata.is_batch_input = (\n (ts in self._batch_inputs) or self._direct_feed_dict)\n\n for op_name, op in zip(self._loom_op_names, self._loom_ops):\n op_metadata = loom_metadata.op_metadata.add()\n op_metadata.name = op_name\n op_metadata.input_ts_idx.extend(\n self._type_shape_to_idx[ts] for ts in op.input_type_shapes)\n op_metadata.output_ts_idx.extend(\n self._type_shape_to_idx[ts] for ts in op.output_type_shapes)\n\n self._loom_metadata_str = (\n loom_metadata.SerializeToString())", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def __init__(self, keep_last_n_lines=5) :\r\n self.contextLines_ = keep_last_n_lines\r\n self.data_ = CircularBuffer(1024)\r\n self.lineNumber_ = 1\r\n self.charNumber_ = 0", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.poly_contact_offset = vector(0.5*contact.poly.width,0.5*contact.poly.height)\n\n # M1/M2 routing pitch is based on contacted pitch\n self.m1_pitch = max(contact.m1m2.width,contact.m1m2.height) + max(self.m1_space,self.m2_space)\n self.m2_pitch = max(contact.m2m3.width,contact.m2m3.height) + max(self.m2_space,self.m3_space)\n \n # This corrects the offset pitch difference between M2 and M1\n self.offset_fix = vector(0.5*(self.m2_width-self.m1_width),0)\n\n # delay chain will be rotated 90, so move it over a width\n # we move it up a inv height just for some routing room\n self.rbl_inv_offset = vector(self.delay_chain.height, self.inv.width)\n # access TX goes right on top of inverter, leave space for an inverter which is\n # about the same as a TX. We'll need to add rails though.\n self.access_tx_offset = vector(1.25*self.inv.height,self.rbl_inv_offset.y) + vector(0,2.5*self.inv.width)\n self.delay_chain_offset = self.rbl_inv_offset + vector(0,4*self.inv.width)\n\n # Replica bitline and such are not rotated, but they must be placed far enough\n # away from the delay chain/inverter with space for three M2 tracks\n self.bitcell_offset = self.rbl_inv_offset + vector(2*self.m2_pitch, 0) + vector(0, self.bitcell.height + self.inv.width)\n\n self.rbl_offset = self.bitcell_offset\n\n \n self.height = self.rbl_offset.y + self.rbl.height + self.m2_pitch\n self.width = self.rbl_offset.x + self.bitcell.width", "def __init__(self, extra_fields=None):\n if extra_fields:\n self.fields.extend(extra_fields)\n self.data = {k: [] for k in self.fields}\n self.last_r = 0.0", "def _nanosims_header(self, hdr):\n # Called MaskNano in OpenMIMS; BFieldTab separated out; create extra sub-dict PeakCenter\n d = {}\n d['PeakCenter'] = {}\n d['nanosimsheader version'], d['regulation mode'], d['mode'], \\\n d['grain mode'], d['semigraphic mode'], d['stage delta x'], \\\n d['stage delta y'], d['working frame width'], \\\n d['working frame height'], d['scanning frame x'], \\\n d['scanning frame width'], d['scanning frame y'], \\\n d['scanning frame height'], d['counting frame x start'], \\\n d['counting frame x end'], d['counting frame y start'], \\\n d['counting frame y end'], d['detector type'], d['electron scan'], \\\n d['scanning mode'], d['beam blanking'], \\\n d['PeakCenter']['peakcenter enabled'], d['PeakCenter']['start'], \\\n d['PeakCenter']['frequency'], d['b fields'] = \\\n unpack(self._bo + '25i', hdr.read(100))\n\n d['PeakCenter']['peakcenter enabled'] = bool(d['PeakCenter']['peakcenter enabled'])\n d['regulation mode'] = bool(d['regulation mode'])\n d['grain mode'] = bool(d['grain mode'])\n d['semigraphic mode'] = bool(d['semigraphic mode'])\n d['scanning mode'] = bool(d['scanning mode'])\n\n # Set a few extra variables.\n d['counting frame width'] = d['counting frame x end'] - d['counting frame x start'] + 1\n d['counting frame height'] = d['counting frame y end'] - d['counting frame y start'] + 1\n\n # Found in at least one version (file v11, nsHeader v8) a repeat of\n # Poly_list and this first part of nanoSIMSHeader. Total of repeat\n # adds up to 288. After last Poly_list, 288 byte padding zone, not all\n # null-bytes.\n hdr.seek(288, 1)\n\n # Is this the nPrintRed from OpenMIMS?\n d['print results'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])\n\n d['SibCenterHor'] = self._sib_center(hdr)\n d['SibCenterVert'] = self._sib_center(hdr)\n\n # Duplicate and store these two in sub dicts\n b_field_index, has_sib_center = \\\n unpack(self._bo + '2i', hdr.read(8))\n if b_field_index < 0:\n b_field_index = None\n has_sib_center = bool(has_sib_center)\n\n d['SibCenterHor']['b field index'] = b_field_index\n d['SibCenterVert']['b field index'] = b_field_index\n d['SibCenterHor']['sib center enabled'] = has_sib_center\n d['SibCenterVert']['sib center enabled'] = has_sib_center\n\n d['EnergyCenter'] = self._energy_center(hdr)\n d['E0SCenter'] = self._e0s_center(hdr)\n\n d['EnergyCenter']['wait time'], d['presputtering raster'], \\\n d['PeakCenter']['E0P offset'], d['E0SCenter']['steps'], \\\n d['baseline measurement'], d['baseline offset'], \\\n d['baseline frequency'] = \\\n unpack(self._bo + '5i d i', hdr.read(32))\n return d", "def _create_dnp3_object_map(self):\n\n feeders = self.file_dict.get(\"feeders\", [])\n measurements = list()\n capacitors = list()\n regulators = list()\n switches = list()\n solarpanels = list()\n batteries = list()\n fuses = list()\n breakers = list()\n reclosers = list()\n energyconsumers = list()\n for x in feeders:\n measurements = x.get(\"measurements\", [])\n capacitors = x.get(\"capacitors\", [])\n regulators = x.get(\"regulators\", [])\n switches = x.get(\"switches\", [])\n solarpanels = x.get(\"solarpanels\", [])\n batteries = x.get(\"batteries\", [])\n fuses = x.get(\"fuses\", [])\n breakers = x.get(\"breakers\", [])\n reclosers = x.get(\"reclosers\", [])\n energyconsumers = x.get(\"energyconsumers\", [])\n\n # Unique grouping of measurements - GroupBy Name, Type and Connectivity node\n groupByNameTypeConNode = defaultdict(list) \n for m in measurements:\n groupByNameTypeConNode[m['name']+m.get(\"measurementType\")+m.get(\"ConnectivityNode\")].append(m)\n\n # Create Net Phase DNP3 Points\n for grpM in groupByNameTypeConNode.values():\n\n if grpM[0]['MeasurementClass'] == \"Analog\" and grpM[0].get(\"measurementType\") == \"VA\":\n measurement_type = grpM[0].get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n \n\n name1 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VAR-value'\n name2 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-Watts-value'\n name3 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VA-value'\n\n description1 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VAR\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description2 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-Watts\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description3 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VA\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n # Create Each Phase DNP3 Points\n for m in measurements:\n attribute = attribute_map['regulators']['attribute']\n measurement_type = m.get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n name= m['name'] + '-' + m['phases']\n description = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + measurement_type + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name, description, measurement_type, measurement_id)\n self.c_ai += 1\n\n if m.get(\"measurementType\") == \"VA\":\n measurement_id = m.get(\"mRID\")\n name1 = m['name'] + '-' + m['phases'] + '-VAR-value'\n name2 = m['name'] + '-' + m['phases'] + '-Watts-value'\n name3 = m['name'] + '-' + m['phases'] + '-angle'\n\n description1 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"VAR\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description2 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"Watt\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description3 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"angle\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") + \",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n\n elif m['MeasurementClass'] == \"Discrete\" and measurement_type == \"Pos\":\n if \"RatioTapChanger\" in m['name'] or \"reg\" in m[\"SimObject\"]:\n # TODO: Do we need step?\n for r in range(5, 7): # [r==4]: Step, [r==5]: LineDropR, [r==6]:LineDropX \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, attribute[r])\n self.c_ao += 1\n else:\n self.assign_val_a(\"DI\", 1, 2, self.c_di, name, description, measurement_type, measurement_id)\n self.c_di += 1\n\n for m in capacitors:\n measurement_id = m.get(\"mRID\")\n cap_attribute = attribute_map['capacitors']['attribute'] # type: List[str]\n\n for l in range(0, 4):\n # publishing attribute value for capacitors as Bianry/Analog Input points based on phase attribute\n name = m['name']\n description = \"Name:\" + m['name'] + \"ConductingEquipment_type:LinearShuntCompensator\" + \",Attribute:\" + cap_attribute[l] + \",Phase:\" + m['phases']\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, cap_attribute[l])\n self.c_ao += 1\n for p in range(0, len(m['phases'])):\n name = m['name'] + m['phases'][p]\n description = \"Name:\" + m['name'] + \",ConductingEquipment_type:LinearShuntCompensator\" + \",controlAttribute:\" + cap_attribute[p] + \",Phase:\" + m['phases'][p]\n # description = \"Capacitor, \" + m['name'] + \",\" + \"phase -\" + m['phases'][p] + \", and attribute is - \" + cap_attribute[4]\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, cap_attribute[4])\n self.c_do += 1\n\n for m in regulators:\n reg_attribute = attribute_map['regulators']['attribute']\n # bank_phase = list(m['bankPhases'])\n for n in range(0, 4):\n measurement_id = m.get(\"mRID\")\n name = m['bankName'] + '-' + m['bankPhases']\n description = \"Name:\" + m['bankName'] + \",ConductingEquipment_type:RatioTapChanger_Reg\" +\",Phase:\" + m['bankPhases'] + \",Attribute:\" + reg_attribute[n]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id[0], reg_attribute[n])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id[0], reg_attribute[n])\n self.c_ai += 1\n for i in range(5, 7):\n for j in range(0, len(m['bankPhases'])):\n measurement_id = m.get(\"mRID\")[j]\n name = m['tankName'][j] + '-' + m['bankPhases'][j]\n description = \"Name:\" + m['tankName'][j] + \",ConductingEquipment_type:RatioTapChanger_Reg\"+ \",Phase:\" + m['bankPhases'][j] + \",controlAttribute:\" + reg_attribute[i]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id,reg_attribute[i])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id,reg_attribute[i])\n self.c_ai += 1\n \n for m in solarpanels:\n for k in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-Watts-value'\n description = \"Solarpanel:\" + m['name'] + \",Phase:\" + m['phases'] + \",measurementID:\" + measurement_id\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n \n name1 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name2 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name2, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name3 = \"Solar\"+ m['name'] + '-' + m['phases'][k] + '-Watts-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name3, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n\t\t\t\n for m in batteries:\n for l in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = m['name'] + '-' + m['phases'][l] + '-Watts-value'\n description = \"Battery, \" + m['name'][l] + \",Phase: \" + m['phases'] + \",ConductingEquipment_type:PowerElectronicConnections\"\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description,measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n name1 = m['name'] + '-' + m['phases'][l] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description,measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n for m in switches:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][k]\n description = \"Name:\" + m[\"name\"] + \",ConductingEquipment_type:LoadBreakSwitch\" + \"Phase:\" + phase_value[k] +\",controlAttribute:\"+switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in fuses:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for l in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][l]\n description = \"Name:\" + m[\"name\"] + \",Phase:\" + phase_value[l] + \",Attribute:\" + switch_attribute + \",mRID\" + measurement_id\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in breakers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for n in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][n]\n description = \"Name: \" + m[\"name\"] + \",Phase:\" + phase_value[n] + \",ConductingEquipment_type:Breaker\" + \",controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n \n for m in reclosers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for i in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][i]\n description = \"Recloser, \" + m[\"name\"] + \"Phase: - \" + phase_value[i] + \",ConductingEquipment_type:Recloser\"+\"controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in energyconsumers:\n measurement_id = m.get(\"mRID\")\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name']+\"phase:\" + m['phases'][k]\n description = \"EnergyConsumer, \" + m[\"name\"] + \"Phase: \" + phase_value[k] \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"EnergyConsumer.p\")\n self.c_ao += 1\n \n name1 = m['name']+\"phase:\" + m['phases'][k] + \"control\"\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name1, description, measurement_id, \"EnergyConsumer.p\")\n self.c_do += 1\n\n return self.out_json", "def _populate(self):\n if not hasattr(self, 'multiline'):\n start = self.start\n end = self.end\n txt = self.filetext\n self.start_line = txt.count('\\n', 0, start) + 1\n self.start_column = start - txt.rfind('\\n', 0, start) - 1\n self.end_line = txt.count('\\n', start, end) + self.start_line\n self.end_column = end - txt.rfind('\\n', 0, end) - 1\n self.multiline = self.start_line != self.end_line", "def build_parts_from_dict(self, data, skip_power_controls=False):\n \n # Validate Objects information.\n if \"Objects\" not in data:\n return\n\n # Start creating parts.\n parts = []\n for part_data in data[\"Objects\"]:\n part = part_data[\"ObjectID\"].replace(\"^\", \"\")\n timestamp = part_data[\"Timestamp\"]\n user_data = part_data[\"UserData\"]\n part_position = part_data[\"Position\"]\n up_vec = part_data[\"Up\"]\n at_vec = part_data[\"At\"]\n # Build the item.\n item = self.build_item(\n part,\n timestamp,\n user_data,\n part_position,\n up_vec,\n at_vec,\n skip_power_controls\n )\n parts.append(item)\n\n return parts", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def _build_parsed_values(self):\n\n SERIAL_NUMBER = \"SerialNumber\"\n CALIBRATION = \"Calibration\"\n ID = \"id\"\n TEMPERATURE_SENSOR_ID = \"Main Temperature\"\n CONDUCTIVITY_SENSOR_ID = \"Main Conductivity\"\n PRESSURE_SENSOR_ID = \"Main Pressure\"\n VOLT0 = \"Volt 0\"\n VOLT1 = \"Volt 1\"\n VOLT2 = \"Volt 2\"\n VOLT3 = \"Volt 3\"\n VOLT4 = \"Volt 4\"\n VOLT5 = \"Volt 5\"\n EXTERNAL_FREQUENCY_CHANNEL = \"external frequency channel\"\n\n # check to make sure there is a correct match before continuing\n match = SBE19CalibrationParticle.regex_compiled().match(self.raw_data)\n if not match:\n raise SampleException(\"No regex match of parsed calibration data: [%s]\" %\n self.raw_data)\n\n dom = parseString(self.raw_data)\n root = dom.documentElement\n log.debug(\"root.tagName = %s\", root.tagName)\n serial_number = root.getAttribute(SERIAL_NUMBER)\n result = [{DataParticleKey.VALUE_ID: SBE19CalibrationParticleKey.SERIAL_NUMBER,\n DataParticleKey.VALUE: serial_number}]\n\n calibration_elements = self._extract_xml_elements(root, CALIBRATION)\n for calibration in calibration_elements:\n id_attr = calibration.getAttribute(ID)\n if id_attr == TEMPERATURE_SENSOR_ID:\n result.append(\n self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TEMP_SENSOR_SERIAL_NUMBER, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TEMP_CAL_DATE, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TA3))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.TOFFSET))\n elif id_attr == CONDUCTIVITY_SENSOR_ID:\n result.append(\n self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.COND_SENSOR_SERIAL_NUMBER, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.COND_CAL_DATE, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDG))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDH))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDI))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CONDJ))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CPCOR))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CTCOR))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.CSLOPE))\n elif id_attr == PRESSURE_SENSOR_ID:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PRES_SERIAL_NUMBER, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PRES_CAL_DATE, str))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCB0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCB1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTCB2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTEMPA0))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTEMPA1))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PTEMPA2))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.POFFSET))\n result.append(\n self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.PRES_RANGE, self.float_to_int))\n elif id_attr == VOLT0:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT0_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT0_SLOPE))\n elif id_attr == VOLT1:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT1_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT1_SLOPE))\n elif id_attr == VOLT2:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT2_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT2_SLOPE))\n elif id_attr == VOLT3:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT3_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT3_SLOPE))\n elif id_attr == VOLT4:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT4_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT4_SLOPE))\n elif id_attr == VOLT5:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT5_OFFSET))\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_VOLT5_SLOPE))\n elif id_attr == EXTERNAL_FREQUENCY_CHANNEL:\n result.append(self._get_xml_parameter(calibration, SBE19CalibrationParticleKey.EXT_FREQ))\n\n return result", "def _build_parsed_values(self):\n\n # \n # Generate a time data particle.\n # Note that raw_data already contains the individual fields\n # extracted and unpacked from the time data record.\n #\n particle = [\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.TIME_ON, \n DataParticleKey.VALUE: self.raw_data[INDEX_TIME_ON]\n },\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.TIME_OFF,\n DataParticleKey.VALUE: self.raw_data[INDEX_TIME_OFF]\n },\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.NUMBER_OF_RECORDS, \n DataParticleKey.VALUE: self.raw_data[INDEX_RECORDS]\n }\n ]\n\n return particle", "def __init__(self):\n self._data = PositionalList()", "def __init__(self):\n self._data = PositionalList()", "def init_meta(self):\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=0, card='OBJRA')\n meta['dec'] = dict(ext=0, card='OBJDEC')\n meta['target'] = dict(ext=0, card='OBJECT')\n meta['decker'] = dict(ext=0, card='ALAPRTNM')\n meta['binning'] = dict(card=None, compound=True)\n\n meta['mjd'] = dict(ext=0, card=None, compound=True)\n meta['exptime'] = dict(ext=0, card='EXPTIME')\n meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=0, card='ALGRNM')\n meta['idname'] = dict(ext=0, card='IMAGETYP')\n # Lamps\n # Use Keck/LRIS approach\n\n # Ingest\n self.meta = meta", "def _build_parsed_values(self):\r\n # match the data inside the wrapper\r\n if len(self.raw_data) < ACCEL_BYTES or self.raw_data[0] != ACCEL_ID:\r\n raise SampleException(\"MopakODclAccelParserDataParticle: Not enough bytes provided in [%s]\",\r\n self.raw_data)\r\n fields = struct.unpack('>fffffffffI', self.raw_data[1:ACCEL_BYTES - 2])\r\n\r\n result = [self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ACCELX, fields[0], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ACCELY, fields[1], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ACCELZ, fields[2], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ANG_RATEX, fields[3], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ANG_RATEY, fields[4], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_ANG_RATEZ, fields[5], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_MAGX, fields[6], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_MAGY, fields[7], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_MAGZ, fields[8], float),\r\n self._encode_value(MopakODclAccelParserDataParticleKey.MOPAK_TIMER, fields[9], int)]\r\n\r\n return result", "def create_observation(self):", "def create_observation(self):" ]
[ "0.6157046", "0.6072608", "0.58471173", "0.58050644", "0.57371134", "0.5713524", "0.5703575", "0.5679709", "0.56751573", "0.5645145", "0.5609161", "0.5597858", "0.5583448", "0.556738", "0.55536085", "0.5551123", "0.5549294", "0.5537869", "0.5534043", "0.55129117", "0.55097", "0.54995054", "0.54811347", "0.54799044", "0.54686415", "0.5433186", "0.5424546", "0.5421587", "0.54006356", "0.539491", "0.5390095", "0.5385168", "0.53821373", "0.5378212", "0.5368411", "0.53660816", "0.5365773", "0.5364958", "0.5364525", "0.53469825", "0.5344724", "0.5340566", "0.53395283", "0.533932", "0.53388256", "0.5336162", "0.53319293", "0.5330364", "0.5319766", "0.53186613", "0.53157556", "0.5311225", "0.5307367", "0.53037626", "0.5297951", "0.5290392", "0.5287088", "0.5282958", "0.52800566", "0.52773386", "0.5276146", "0.5270409", "0.5268794", "0.52658683", "0.5262592", "0.5259623", "0.52594256", "0.525852", "0.52580935", "0.5252423", "0.5251241", "0.52380085", "0.5229968", "0.52269745", "0.5217946", "0.5204737", "0.5204122", "0.5201803", "0.51947206", "0.5192479", "0.51877606", "0.51836234", "0.5183403", "0.51825905", "0.5179669", "0.5172083", "0.51702887", "0.516931", "0.5166824", "0.5165168", "0.51647574", "0.516351", "0.51628596", "0.51578915", "0.5155275", "0.51543987", "0.51543987", "0.5145753", "0.5145331", "0.51435316", "0.51435316" ]
0.0
-1
write the cutouts for the specified type
def _write_psf_cutouts_hst(self): print('writing psf cutouts') obj_data=self.obj_data psf_data=self.psf_data nfile=self.image_info.size nobj=obj_data.size cutout_hdu = self.fits['psf'] for iobj in range(nobj): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # HST psf is same for every cutout, in fact ncut should always # be 1 try: psf_im = self.psf_data.get_psf(iobj) except AttributeError: psf_im = None ncut=obj_data['ncutout'][iobj] for icut in range(ncut): if psf_im is None: row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = self.psf_data[file_id] psf_im = p.get_rec(row,col) expected_psf_shape = ( obj_data['psf_row_size'][iobj,icut], obj_data['psf_col_size'][iobj,icut], ) file_id = obj_data['file_id'][iobj, icut] row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] start_row = obj_data['psf_start_row'][iobj, icut] if psf_im.shape != expected_psf_shape: raise ValueError("psf size mismatch, expected %s " "got %s" % (expected_psf_shape, psf_im.shape)) cutout_hdu.write(psf_im, start=start_row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _write_moleculetype(top_file: IO, mol_name: str, nrexcl: int = 3):\n top_file.write(\"[ moleculetype ]\\n\")\n top_file.write(\"; Name\\tnrexcl\\n\")\n top_file.write(f\"{mol_name}\\t{nrexcl}\\n\\n\")", "def write(self, out):", "def write_output(self):", "def write(self):", "def write(self):", "def writeOutput(self, output):", "def write_readouts(path, dataset_dict, image_list, datasettype, mask_part,\n do_wt1_signal, do_dach1_signal, do_stereology_pred, do_stereology_gt):\n\n titles = []\n for i in range(len(image_list)):\n image_name = os.path.split(image_list[i])[1]\n titles.append(image_name[:-4])\n\n # Segmentation of only 1 class was applied (e.g. glomerulus or podocytes)\n if len(mask_part) == 1:\n mask_el = mask_part.pop()\n\n if mask_el == \"glomerulus\":\n network_area = \"glomerulus_area\"\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_WT1_signal_in_glom': pd.Series(dataset_dict['mean_WT1_glom_preds']),\n 'var_WT1_signal_in_glom': pd.Series(dataset_dict['var_WT1_glom_preds']),\n 'median_WT1_signal_in_glom': pd.Series(dataset_dict['median_WT1_glom_preds']),\n 'min_WT1_signal_in_glom': pd.Series(dataset_dict['min_WT1_glom_preds']),\n 'max_WT1_signal_in_glom': pd.Series(dataset_dict['max_WT1_glom_preds']),\n 'perc25_WT1_signal_in_glom': pd.Series(dataset_dict['perc25_WT1_glom_preds']),\n 'perc75_WT1_signal_in_glom': pd.Series(dataset_dict['perc75_WT1_glom_preds'])})\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n elif mask_el == \"podocytes\":\n network_count = \"podocyte_count\"\n network_area = \"podocyte_nuclear_area\"\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_DACH1_signal_in_podo': pd.Series(dataset_dict['mean_DACH1_podo_preds']),\n 'var_DACH1_signal_in_podo': pd.Series(dataset_dict['var_DACH1_podo_preds']),\n 'median_DACH1_signal_in_podo': pd.Series(dataset_dict['median_DACH1_podo_preds']),\n 'min_DACH1_signal_in_podo': pd.Series(dataset_dict['min_DACH1_podo_preds']),\n 'max_DACH1_signal_in_podo': pd.Series(dataset_dict['max_DACH1_podo_preds']),\n 'perc25_DACH1_signal_in_podo': pd.Series(dataset_dict['perc25_DACH1_podo_preds']),\n 'perc75_DACH1_signal_in_podo': pd.Series(dataset_dict['perc75_DACH1_podo_preds'])\n })\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n else:\n raise ValueError('The name of the segmentation is not known:', mask_el)\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_el))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n\n # Segmentation of 2 classes were applied (e.g. glomerulus and podocytes)\n elif len(mask_part) == 2:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n \"glomerulus_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[0]]),\n \"podocyte_count\": pd.Series(dataset_dict['count_preds_%s' % mask_part[1]]),\n \"podocyte_nuclear_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[1]])})\n\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df['mean_WT1_signal_in_glom'] = dataset_dict['mean_WT1_glom_preds']\n df['var_WT1_signal_in_glom'] = dataset_dict['var_WT1_glom_preds']\n df['median_WT1_signal_in_glom'] = dataset_dict['median_WT1_glom_preds']\n df['min_WT1_signal_in_glom'] = dataset_dict['min_WT1_glom_preds']\n df['max_WT1_signal_in_glom'] = dataset_dict['max_WT1_glom_preds']\n df['perc25_WT1_signal_in_glom'] = dataset_dict['perc25_WT1_glom_preds']\n df['perc75_WT1_signal_in_glom'] = dataset_dict['perc75_WT1_glom_preds']\n\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df['mean_DACH1_signal_in_podo'] = dataset_dict['mean_DACH1_podo_preds']\n df['var_DACH1_signal_in_podo'] = dataset_dict['var_DACH1_podo_preds']\n df['median_DACH1_signal_in_podo'] = dataset_dict['median_DACH1_podo_preds']\n df['min_DACH1_signal_in_podo'] = dataset_dict['min_DACH1_podo_preds']\n df['max_DACH1_signal_in_podo'] = dataset_dict['max_DACH1_podo_preds']\n df['perc25_DACH1_signal_in_podo'] = dataset_dict['perc25_DACH1_podo_preds']\n df['perc75_DACH1_signal_in_podo'] = dataset_dict['perc75_DACH1_podo_preds']\n\n if do_stereology_pred:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='pred')\n # Add it to df\n df['stereology_on_prediction-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_prediction-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_prediction-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n if do_stereology_gt:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='gt')\n # Add it to df\n df['stereology_on_groundtruth-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_groundtruth-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_groundtruth-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_part[0] + mask_part[1]))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n return", "def writetif(self,outputname,):\n pass", "def write(self, out):\r\n out.write('# {0:<11} {1:<6} {2:<6} {3:<6} {4}\\n'\r\n .format('Time(s)', 'X(mm)', 'Y(mm)', 'Z(um)', 'Tile'))\r\n for i in self: out.write(self.format_pt(i))", "def write_report(report, ftype):\n if ftype == 'text':\n msg = '{} disks have been removed\\n'.format(len(report))\n msg += 'To replace them, run:\\n'\n for device, action_args in report.items():\n args = json.dumps(action_args, separators=(' ', '='))\n args = args.replace('{', '').replace('}', '').replace('\"', '')\n msg += 'juju run-action {} add-disk {} {}'.format(\n hookenv.local_unit(), 'osd-devices=' + device, args)\n else:\n msg = json.dumps(report)\n\n hookenv.action_set({'message': msg})", "def do_write(self, args):\n\t\tasplit = args.split(\" \")\n\t\tfname = asplit[0]\n\t\twhat = asplit[1]\n\n\t\tif what == \"summary\" or what == \"oldsummary\":\n\t\t\twith open(fname, 'w') as f:\n\t\t\t\tform = DresherInterface.summary_format if what == \"summary\" else DresherInterface.oldsummary_format\n\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\tf.write(x)\n\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t#for lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t#\tdw.writerow(dict(zip(form, [self.get_language_info(lang, x) for x in form])))\n\t\t\t\tfor lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\t\tf.write(str(self.get_language_info(lang, x)))\n\t\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write(\"\\t\")\n\t\tif what == \"hierarchies\":\n\t\t\t# format: #vowels, langname, hierarchy, len(hier), #of marks, lfeats, inv, freq, \n\t\t\t# how many times each feat marked, the actual marks, vowel:feature set, unused features\n\t\t\t# take fname to be name of directory to write outfiles to\n\t\t\tif not os.path.exists(fname):\n\t\t\t\tos.mkdir(fname)\n\t\t\tfor lang in self.languages:\n\t\t\t\tnum_vowels = self.get_language_info(lang, \"linv\")\n\t\t\t\tname = lang.name\n\t\t\t\tnum_feats = self.get_language_info(lang, \"lfeats\")\n\t\t\t\tinv = self.get_language_info(lang, \"inv\")\n\t\t\t\tfreq = self.get_language_info(lang, \"freq\")\n\t\t\t\tinv_feats = lang.phone_feat_dict\n\t\t\t\twith open(os.path.join(fname,name.replace(\" \",\"\")+\".txt\"), 'w') as f:\n\t\t\t\t\tf.write(\"num_vowels\\tname\\thierarchy\\tlen_hier\\tnum_marks\\tnumfeats\\tinv\\tfreq\\tfeat_marks\\tinv_marks\\tinv_feats\\tunused_feats\\n\")\n\t\t\t\t\tfor h in lang.hierarchies:\n\t\t\t\t\t\tf.write(str(num_vowels))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(name)\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(h))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(len(h)))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tspec = SDA(lang._phones, lang._features, h)\n\t\t\t\t\t\tmarkedness = sum([x for phone in spec.keys() for x in spec[phone] if x == 1])\n\t\t\t\t\t\tf.write(str(markedness))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(num_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(freq))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tfeat_counts = {f:sum([spec[phone][i] for phone in spec.keys() if spec[phone][i] == 1]) for i, f in enumerate(h)}\n\t\t\t\t\t\tf.write(str(feat_counts))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(spec))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(list(set(lang._features)-set(h))))\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t# make sure all the threads that need to be finished have finished\n\t\t# using .join() on the appropriate groups of threads", "def to_logchunk(self):\n\t\tdemo_name = os.path.splitext(self.demo_name)[0]\n\t\tto_write = [(\"Killstreak\", value, tick, date) for value, tick, date in self.killstreaks]\n\t\tto_write.extend((\"Bookmark\", value, tick, date) for value, tick, date in self.bookmarks)\n\n\t\tto_write.sort(key = lambda t: t[2])\n\n\t\treturn \"\\n\".join(\n\t\t\tf'[{date}] {type_} {value} (\"{demo_name}\" at {tick})'\n\t\t\tfor type_, value, tick, date in to_write\n\t\t)", "def write_output(self,content):\n text=\"\"\"# typ eta phi pt jmass ntrk btag had/em dummy dummy\\n\"\"\"\n self.output.writelines(text)\n text=\"0 \"+str(self.nb_data)+\" \"+str(len(content))+\"\\n\"\n self.output.writelines(text)\n\n i=1\n for particle in content:\n text=str(i)+' '+particle.lhco_line()+'\\n'\n self.output.writelines(text)\n i+=1", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def dump_cuts_list(self, file_name):\n assert(file_name is not None)\n with open(file_name, 'w') as fd:\n uids = self._cuts.keys()\n uids.sort()\n for cut_uid in uids:\n cut = self._cuts[cut_uid]\n fd.write(cut.get_cost_var() + \" \" + str(cut.get_cost()) + \"\\n\")\n return", "def write_out(c2ptmk, ofn):\n print \"Writing out to [{}]\".format(ofn)\n with codecs.open(ofn, \"w\", \"utf8\") as ofd:\n for co, infos in sorted(c2ptmk.items()):\n ofd.write(u\"{}\\t{}\\t{}\\n\".format(\n co, infos[\"uri\"], \",\".join(\n [unicode(x) for x in infos[\"ptmks\"]])))", "def write_run(run):\n r=Run(run)\n r.write_all()", "def write_analysis(path, dataset_dict, datasettype, mask_part, start_time, supervised=True):\n for mask_el in mask_part:\n if mask_el == 'podocytes':\n filename = datasettype + '_podos.txt'\n filestr = 'podos images'\n elif mask_el == 'glomerulus':\n filename = datasettype + '_gloms.txt'\n filestr = 'gloms images'\n else:\n filename = datasettype + 'unknown.txt'\n filestr = 'unknown type'\n\n write_txt = open(str(os.path.join(path, filename)), \"w\")\n\n if supervised:\n dc_mean = np.sum(np.array(dataset_dict['dice_coeffs_%s' % mask_el])) / len(dataset_dict['dice_coeffs_%s'\n % mask_el])\n dc_min = np.min(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))\n dc_max = np.max(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))\n object_dc_mean = np.sum(np.array(dataset_dict['object_dc_%s' % mask_el])) / len(dataset_dict['object_dc_%s'\n % mask_el])\n object_dc_min = np.min(np.array(dataset_dict['object_dc_%s' % mask_el]))\n object_dc_max = np.max(np.array(dataset_dict['object_dc_%s' % mask_el]))\n pearson = calculate_pearson(dataset_dict['count_masks_%s' % mask_el], dataset_dict['count_preds_%s'\n % mask_el])\n\n write_txt.write(str(\"Mean dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_mean) + '\\n')\n write_txt.write(str(\"Min dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_min) + '\\n')\n write_txt.write(str(\"Max dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_max) + '\\n')\n write_txt.write(str(\"Pearson correlation coefficient on objects of \" + filestr +\n \" compared to groundtruth: \") + str(pearson) + '\\n')\n write_txt.write(str(\"Mean dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_mean) + '\\n')\n write_txt.write(str(\"Min dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_min) + '\\n')\n write_txt.write(str(\"Max dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_max) + '\\n')\n write_txt.write('\\n')\n\n duration = time.time() - start_time\n duration_std = int(duration / 3600)\n duration_min = int((duration % 3600) / 60)\n duration_sec = int(duration % 60)\n\n write_txt.write(str(\"Test time: \") + str(duration_std) + \"h \" + str(duration_min)\n + \"min \" + str(duration_sec) + 'sec \\n')\n write_txt.close()\n return", "def writeClumptoDump(self,ID):\n clumpxyz = self.clumpcat[ID][:3]\n r2 = (self.disc.xyzh[0]-clumpxyz[0])**2 + (self.disc.xyzh[1]-clumpxyz[1])**2\n members = np.sqrt(r2) < self.annulus #members are all particles within radial annulus\n\n gas = self.disc.itype == 1\n dust = self.disc.itype == 2\n\n dustfrac = self.disc.dustfrac*1e8 #as I originally set dust-to-gas=1e-10\n\n #Calculate temperatures from thermal energies\n k = 1.38064852e-16 #ergs\n mH = 1.6735575e-24 #grams\n gmw = 2.381 #mean mass taken from Phantom\n N = sum(gas*self.disc.massofgas)*self.umass/mH/gmw #number of atoms\n temp = 2.*self.disc.utherm*self.uenerg/3./k/N\n\n\t\tutime = self.utime/(60*60*24*365.25)\n\n\t\t#create arrays of particle masses\n\t\tmass = np.zeros(len(self.disc.xyzh[0,:]))\n\t\tmass[self.disc.itype == 1] = self.disc.massofgas\n\t\tmass[self.disc.itype == 2] = self.disc.massofdust\n\n\t\tclumpdata = zip(self.disc.xyzh[0,members], self.disc.xyzh[1,members], self.disc.xyzh[2,members], \n self.disc.xyzh[3,members], self.disc.density[members], mass[members], \n temp[members], dustfrac[members], self.disc.itype[members])\n\t\tclumpdata = np.asarray(clumpdata)\n\t\theader = (\"time: %s utime (yrs^-1): %s \\n x, y, z, h, density, mass, temp, \" %(str(self.disc.time), str(utime)) +\n\t\t\t \"dustfrac, itype \\n %s, %s, %s, %s, %s, %s, 0.0, 0.0, 0.0 \\n \\n\" %(str(self.udist), str(self.udist),\n\t\t\t \t\t\t\t\t\t\t\t str(self.udist), str(self.udist),\n\t\t\t\t\t\t\t\t\t\t\t str(self.udens), str(self.umass)))\n\t\tnp.savetxt('%s/clumpfiles/clumpdata_%.0f.txt' %(self.wd,self.disc.time), clumpdata, header=header)", "def write_pc_cards(bc_file, bc_class):\n bc_file.write('! Output Control\\n')\n oc = bc_class.output_control\n objects = list(oc.param.output_control_option.get_range())\n if oc.output_control_option == objects[0]:\n bc_file.write('OC {}\\n'.format(oc.oc_time_series_id))\n ofs = oc.output_flow_strings\n if not ofs.empty:\n bc_file.write(ofs.to_csv(sep=' ', na_rep='', index=False, header=False,).replace('\\r\\n', '\\n'))\n\n if oc.print_adaptive_mesh:\n bc_file.write('PC ADP\\n')\n if oc.print_numerical_fish_surrogate:\n bc_file.write('PC ELM\\n')\n if oc.screen_output_residual:\n bc_file.write('SOUT RESID\\n')\n if oc.screen_output_all:\n bc_file.write('SOUT ALL\\n')\n if oc.screen_output_mass_error:\n bc_file.write('SOUT MERROR\\n')\n if oc.screen_output_worst_nonlinear_node:\n bc_file.write('SOUT NLNODE\\n')\n if oc.screen_output_worst_linear_node:\n bc_file.write('SOUT LNODE\\n')\n if oc.file_output_wind:\n bc_file.write('FOUT WIND\\n')\n if oc.file_output_wave:\n bc_file.write('FOUT WAVE\\n')\n if oc.file_output_adapted_grid:\n bc_file.write('FOUT ADAPT GRID\\n')\n if oc.file_output_adapted_solution:\n bc_file.write('FOUT ADAPT SW\\n')\n if oc.file_output_adapted_transport:\n bc_file.write('FOUT ADAPT CON\\n')\n if oc.file_output_sediment:\n bc_file.write('FOUT SED\\n')\n\n bc_file.write('\\n') # blank line after Output Control", "def write_tcv(self):\n suffix = '_'+str(self.shot)+'_'+str(int(self.t*1e3))\n self.write_input(suffix=suffix)", "def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False):\n self.logger.info('Writing KG correlations to %s',file_name)\n precision = self.config.get('precision', 4) if precision is None else precision\n name = 'main' if write_patch_results else None\n with make_writer(file_name, precision, file_type, self.logger) as writer:\n self._write(writer, name, write_patch_results)", "def sitofp(self, typ):", "def write_fits(self, name=None, output_path=None):\n pass", "def write_to(self, stream: StreamWrapper):\n stream.write_int(len(self.moves))\n for element in self.moves:\n element.write_to(stream)\n stream.write_int(len(self.buildings))\n for element in self.buildings:\n element.write_to(stream)\n if self.choose_specialty is None:\n stream.write_bool(False)\n else:\n stream.write_bool(True)\n stream.write_int(self.choose_specialty)", "def write(self, outfile):\n outfile.write(\n '\\t'.join(\n [\n str(i) for i in [\n self.chrom, self.start, self.end, self.name,\n self.count, self.fold_change, self.log10p\n ]\n ]\n )\n )\n outfile.write('\\n')", "def write_output_files(self, file_type, output, expected):\n actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)\n expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)\n\n self._write_file(actual_filename, output)\n self._write_file(expected_filename, expected)", "def export_summary(\n self,\n output_dir=None,\n solution_name=None,\n type=\"Object\",\n geometryType=\"Volume\",\n quantity=\"Temperature\",\n variation=\"\",\n variationlist=[],\n ):\n all_objs = list(self.modeler.oeditor.GetObjectsInGroup(\"Solids\"))\n all_objs_NonModeled = list(self.modeler.oeditor.GetObjectsInGroup(\"Non Model\"))\n all_objs_model = [item for item in all_objs if item not in all_objs_NonModeled]\n arg = []\n self.logger.glb.info(\"Objects lists \" + str(all_objs_model))\n for el in all_objs_model:\n try:\n self.osolution.EditFieldsSummarySetting(\n [\"Calculation:=\", [type, geometryType, el, quantity, \"\", \"Default\"]]\n )\n arg.append(\"Calculation:=\")\n arg.append([type, geometryType, el, quantity, \"\", \"Default\"])\n except Exception as e:\n self.logger.glb.error(\"Object \" + el + \" not added.\")\n self.logger.glb.error(str(e))\n if not output_dir:\n output_dir = self.project_path\n self.osolution.EditFieldsSummarySetting(arg)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n if not solution_name:\n solution_name = self.nominal_sweep\n if variation:\n for l in variationlist:\n self.osolution.ExportFieldsSummary(\n [\n \"SolutionName:=\",\n solution_name,\n \"DesignVariationKey:=\",\n variation + \"='\" + str(l) + \"'\",\n \"ExportFileName:=\",\n os.path.join(output_dir, \"IPKsummaryReport\" + quantity + \"_\" + str(l) + \".csv\"),\n ]\n )\n else:\n self.osolution.ExportFieldsSummary(\n [\n \"SolutionName:=\",\n solution_name,\n \"DesignVariationKey:=\",\n \"\",\n \"ExportFileName:=\",\n os.path.join(output_dir, \"IPKsummaryReport\" + quantity + \".csv\"),\n ]\n )\n return True", "def write(self):\n pass", "def write(self):\n pass", "def cutPaper(self, cut='partial', feed=True):\n if cut not in ['partial', 'full']:\n raise ValueError('cut must be \\'partial\\' or \\'full\\'')\n elif type(feed) is not bool:\n raise ValueError('feed must be True or False')\n else:\n value = 0 if cut == 'full' else 1\n value += 65 if feed else 0\n self._write(self.__class__.__GS + 'V' + chr(value))", "def cut_to_summary(file_name, directory, leave_out=[]):\n i = 0\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(file_name) as to_cut:\n line = to_cut.readline()\n while line != \"\" and line is not None:\n if i in leave_out:\n i += 1\n print(line)\n f = open(\"{}/summary_{}.txt\".format(directory, i), \"w\")\n f.write(line)\n f.close()\n i += 1\n line = to_cut.readline()\n print(file_name, i)", "def export(self, file: TextIO) -> None:\n file.write(f'\"{self.name}\"\\n\\t{{\\n')\n file.write(f'\\tchannel {self.channel}\\n')\n file.write(f'\\tsoundlevel {join_float(self.level)}\\n')\n\n if self.volume != (1, 1):\n file.write(f'\\tvolume {join_float(self.volume)}\\n')\n if self.pitch != (100, 100):\n file.write(f'\\tpitch {join_float(self.pitch)}\\n')\n\n if len(self.sounds) != 1:\n file.write('\\trndwave\\n\\t\\t{\\n')\n for wav in self.sounds:\n file.write(f'\\t\\twave \"{wav}\"\\n')\n file.write('\\t\\t}\\n')\n else:\n file.write(f'\\twave \"{self.sounds[0]}\"\\n')\n\n if self.force_v2 or self.stack_start or self.stack_stop or self.stack_update:\n file.write(\n '\\t' 'soundentry_version 2\\n'\n '\\t' 'operator_stacks\\n'\n '\\t\\t' '{\\n'\n )\n if self.stack_start:\n file.write(\n '\\t\\t' 'start_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_start:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n if self.stack_update:\n file.write(\n '\\t\\t' 'update_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_update:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n if self.stack_stop:\n file.write(\n '\\t\\t' 'stop_stack\\n'\n '\\t\\t\\t' '{\\n'\n )\n for prop in self.stack_stop:\n for line in prop.export():\n file.write('\\t\\t\\t' + line)\n file.write('\\t\\t\\t}\\n')\n file.write('\\t\\t}\\n')\n file.write('\\t}\\n')", "def write(self):\n raise NotImplementedError", "def save(self):\n \n fileName=self.characterName+\"_\"+self.race+\"_\"+self.classType+\"_lvl_\"+str(self.level)\n new_file = open(str(fileName)+\".txt\",\"w\")\n new_file.write(\"~~~~~~~~~~~ \"+self.characterName+\" the \"+self.race+\" \"+self.classType+\" ~~~~~~~~~~~\\n\\n\")\n new_file.write(\"Level: \"+str(self.level)+\" HP: \"+str(self.hp)+\" XP: \"+str(self.xp)+\" Hit Dice: \"+str(self.level)+str(self.hit_dice[self.classType])+\"\\n\")\n new_file.write(str(self.abilityScores()))\n new_file.write(\"\\n\\n~~~~~~~~~ Skills ~~~~~~~~~\\n\")\n for i in self.skills:\n new_file.write(\"\\n\"+i+\" \"+\"(\"+skills[i.lower()].upper()+\")\")\n new_file.write(\"\\n\\n~~~~~~~~~ Traits ~~~~~~~~~\\n\")\n for i in self.traits:\n new_file.write(\"\\n ~~\"+i+\"~~\\n \"+str(self.traits[i])+\"\\n\")\n new_file.write(\"\\n\\n~~~~~~~~~ Specialty: \"+self.specialty+\" ~~~~~~~~\\n\")\n new_file.write(\"\\n \"+self.specialtyStory+\"\\n\")\n new_file.write(\"\\n ~~~~ Feats ~~~~\\n\")\n for i in range(1,self.level+1):\n if i == 1 or i%3 == 0:\n new_file.write(\"\\n Level \"+str(i)+\": \"+self.feats[i]['name']+' '\\\n \"(\"+self.feats[i]['type']+\")\\n\"\\\n ' \"'+self.feats[i]['description']+'\"\\n\\n')\n if 'prereq' in self.feats[i]:\n new_file.write(\" Prerequisite: \"+self.feats[i]['prereq']+\"\\n\")\n if 'benefit' in self.feats[i]:\n new_file.write(\" Benefit: \"+self.feats[i]['benefit']+\"\\n\")\n if 'effect' in self.feats[i]:\n new_file(\" Effect: \"+self.feats[i]['effect']+\"\\n\")\n \n new_file.write(\"\\n\\n~~~~~~~~~ Background: \"+self.background+\" ~~~~~~~~\\n\")\n if self.backgroundProfession == '':\n pass\n else:\n new_file.write(\"Profession: \"+self.backgroundProfession)\n new_file.write(\"\\n \"+self.backgroundStory)\n \n new_file.close()\n print \"File \"+str(fileName)+\".txt saved.\"", "def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")", "def writeDataCards(opt,sigExp,bkgExp,shapesURL):\n\n #create a card per category\n dcList=[]\n for icat in range(len(opt.categs)):\n cat='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n dcTxt='%s/shapes-parametric.datacard_%s.dat'%(opt.output,cat)\n dcList.append(dcTxt)\n with open(dcTxt,'w') as dc:\n dc.write('#\\n')\n dc.write('# datacard was automatically generated with generateWorkspace.py\\n')\n dc.write('# the options passed are printed below\\n')\n dc.write('# %s\\n'%opt)\n dc.write('#\\n')\n dc.write('imax *\\n')\n dc.write('jmax *\\n')\n dc.write('kmax *\\n')\n dc.write('-'*50+'\\n')\n dc.write('shapes * * {0} $PROCESS_{1} $PROCESS_$SYSTEMATIC\\n'.format(shapesURL,cat))\n dc.write('shapes data_obs * {0} $PROCESS_{1}\\n'.format(shapesURL,cat))\n dc.write('-'*50+'\\n')\n dc.write('bin %s\\n'%cat)\n dc.write('observation -1\\n')\n dc.write('-'*50+'\\n')\n dc.write('%15s %15s %15s\\n'%('bin',cat,cat))\n dc.write('%15s %15s %15s\\n'%('process','sig','bkg'))\n dc.write('%15s %15s %15s\\n'%('process','0', '1'))\n dc.write('%15s %15s %15s\\n'%('rate','%3.2f'%sigExp[icat], '%3.2f'%bkgExp[icat]))\n dc.write('-'*50+'\\n')\n \n #float the background normalization as well as the signal\n dc.write('mu_bkg{0} rateParam {0} bkg 1\\n'.format(cat))\n\n #uncertainties\n dc.write('lumi %8s %15s %15s\\n'%('lnN','1.027','-'))\n dc.write('%s_sigShape %8s %15s %15s\\n'%(cat,'shape','1','-'))\n dc.write('%s_bkgShape %8s %15s %15s\\n'%(cat,'shape','-','1'))\n dc.write('{0} autoMCStats 0.0 1\\n'.format(cat))\n \n print '\\tshapes available @',shapesURL\n print '\\tgenerated the following datacards',dcList", "def _write_sets(self, size, card_writer):\n msg = []\n if (self.sets or self.setsSuper or self.asets or self.bsets or\n self.csets or self.qsets):\n msg.append('$SETS\\n')\n for (unused_id, set_obj) in sorted(self.sets.iteritems()): # dict\n msg.append(set_obj.write_bdf(size, card_writer))\n for set_obj in self.asets: # list\n msg.append(set_obj.write_bdf(size, card_writer))\n for set_obj in self.bsets: # list\n msg.append(set_obj.write_bdf(size, card_writer))\n for set_obj in self.csets: # list\n msg.append(set_obj.write_bdf(size, card_writer))\n for set_obj in self.qsets: # list\n msg.append(set_obj.write_bdf(size, card_writer))\n for (set_id, set_obj) in sorted(self.setsSuper.iteritems()): # dict\n msg.append(set_obj.write_bdf(size, card_writer))\n return ''.join(msg)", "def write_collected(self, names_file, kb_file, cat_file):\n with open(names_file, 'w') as fp:\n for kb_id, name in self.collected_names.items():\n fp.write('\\t'.join(['name', kb_id, name]) + '\\n')\n with open(kb_file, 'w') as fp:\n for kb_id, tail_set in self.collected_edges.items():\n for (rel, tail_id) in tail_set:\n fp.write('\\t'.join([rel, kb_id, tail_id]) + '\\n')\n with open(cat_file, 'w') as fp:\n for c, ms in self.collected_cat_mems.items():\n fp.write(c + '\\t' + self.kb[c].name + '\\t')\n fp.write('|'.join(ms) + '\\n')", "def write():\n pass", "def writeKittiSubmission(data, prefix, index, type='stereo'):\r\n if type=='stereo':\r\n data = data*256.0\r\n data[data<1.0] = 1.0\r\n data = data.astype(np.uint16)\r\n cv2.imwrite(prefix+'/%06d_10.png' % index, data)\r\n elif type=='flow':\r\n # TODO: I didn't check this part\r\n # please refer to io_flow.h in KITTI 2015 development KIT, if you have any questions\r\n # in BGR order\r\n flow = np.zeros(data.shape[:2]+(3,))\r\n flow[:, :, 1] = data[:, :, 1]\r\n flow[:, :, 2] = data[:, :, 0]\r\n flow[:, :, 1:] = flow[:, :, 1:] * 64.0 + 32768.0\r\n flow[flow<0.0] = 0.0\r\n flow[flow>65535] = 65535\r\n flow[:, :, 0] = 1\r\n flow = flow.astype(np.uint16)\r\n cv2.imwrite(prefix + '/%06d_10.png' % index, flow)", "def write(self):\n self.output_directory.mkdir(parents=True, exist_ok=True)\n parameter_set_files = [pathlib.Path(set_name) for set_name in\n self.parameter_study.coords[_set_coordinate_key].values]\n if self.write_meta and self.provided_output_file_template:\n self._write_meta(parameter_set_files)\n if self.output_file_type == 'h5':\n self._write_dataset()\n elif self.output_file_type == 'yaml':\n self._write_yaml(parameter_set_files)\n else:\n raise ValueError(f\"Unsupported output file type '{self.output_file_type}'\")", "def write(self, unknown_category):\n\n self.unknown_category = unknown_category\n rules, lexicon = self.generate_rules_and_lexicon()\n self.rules_generated = u' '.join(map(u''.join, rules))\n cPickle.dump(lexicon, open(self.get_file_path('lexicon'), 'wb'))\n if not self.rich_upper:\n dictionary = self.generate_dictionary(lexicon)\n cPickle.dump(dictionary, open(self.get_file_path('dictionary'), 'wb'))\n script_path = self.get_file_path('script')\n binary_path = self.get_file_path('binary')\n compiler_path = self.get_file_path('compiler')\n with open(compiler_path, 'w') as f:\n if self.script_type == 'lexc':\n f.write('#!/bin/sh\\nfoma -e \"read lexc %s\" -e \"save stack %s\" -e \"quit\"' % (\n script_path, binary_path))\n else:\n f.write('#!/bin/sh\\nfoma -e \"source %s\" -e \"regex morphology;\" '\n '-e \"save stack %s\" -e \"quit\"' % (script_path, binary_path))\n os.chmod(compiler_path, 0744)\n morphology_generator = self.get_morphology_generator(rules, lexicon)\n with codecs.open(script_path, 'w', 'utf8') as f:\n for line in morphology_generator:\n f.write(line)", "def write_type_scores(path, lines):\n\n print \"Opening %s for score output\" % base_name(path)\n headers = [\"Type\", \"Total\", \"Detection\", \"Detection (%)\",\n \"Recognition\", \"Recognition (%)\",\n \"Correction\", \"Correction (%)\"]\n\n write_csv(path, lines, headers)", "def _write_executive_control_deck(self):\n msg = ''\n if self.executive_control_lines:\n msg = '$EXECUTIVE CONTROL DECK\\n'\n if self.sol == 600:\n newSol = 'SOL 600,%s' % self.solMethod\n else:\n newSol = 'SOL %s' % self.sol\n\n if self.iSolLine is not None:\n self.executive_control_lines[self.iSolLine] = newSol\n\n for line in self.executive_control_lines:\n msg += line + '\\n'\n return msg", "def write(self, mode):\n if mode == \"pretrain\":\n out_tmp = self.log_buffer.output\n log_string = (\"Pre-Tr ep [{}/{}] it [{}/{}] BT {:.3f} DT {:.3f} acc {:.3f}\\n\"\n \"loss_total {:.3f}\").format(\n self.epoch+1, self.max_epochs, self.iter, self.max_iters,\n out_tmp[\"batch_time\"], out_tmp[\"data_time\"],\n self.acc, out_tmp[\"train/loss/total\"])\n self.logger.info(log_string)\n\n elif mode == \"train\":\n out_tmp = self.log_buffer.output\n log_string = (\"Tr loop {} ep [{}/{}] it [{}/{}] BT {:.3f} DT {:.3f} acc {:.3f}\\n\"\n \"loss_total {:.3f} loss_C {:.3f} loss_GD {:.3f}\\n\"\n \"score: max {:.2f} min {:.2f} mean {:.2f} select {} samples in latest iteration\").format(\n self.loop+1, self.epoch+1, self.max_epochs+self.cfg.warmup_epochs, self.iter, self.actual_max_iters,\n out_tmp[\"batch_time\"], out_tmp[\"data_time\"], self.acc,\n out_tmp[\"train/loss/total\"], out_tmp[\"train/loss/loss_C\"],\n out_tmp[\"train/loss/loss_GD\"],\n *self.meta[\"score_statistic\"],\n self.meta[\"n_select\"])\n\n self.logger.info(log_string)\n type(self).__base__.__base__.__base__.write(self)\n\n elif mode == \"eval\":\n print_string = (\"Te it [{}/{}] Time {:.3f} \"\n \"Target acc {:.3f} Best acc so far {:.3f} in epoch {}\").format(\n self.iter, self.actual_max_iters,\n self.timer.since_last(),\n self.acc, self.best_acc, self.best_epoch)\n self.logger.info(print_string + \"\\n\")\n self.tb_writer.add_scalars(\n \"acc\", {\n \"test\": self.acc,\n \"best_acc\": self.best_acc\n }, self.iter)\n\n elif mode == \"loop\":\n print_string = \" Test acc after loop {}: {:.2f}, the best is {:.2f} in loop {}\".format(\n self.loop + 1, self.acc,\n self.best_acc_loop, self.best_loop + 1)\n self.logger.info(print_string + \"\\n\")\n else:\n raise NotImplementedError(\"mode: {} for Solver.write()\".format(mode))", "def write_data_card(spec, data_card, channels, path):\n with open(path, \"w\") as f:\n f.write(f\"imax {str(size(data_card.bins))}\" + \"\\n\")\n f.write(\n \"jmax \"\n + str(size(data_card.processes) - size(data_card.isSignal.keys()))\n + \"\\n\"\n )\n f.write(f\"kmax {str(size(data_card.systs, 0))}\" + \"\\n\")\n\n if data_card.hasShapes:\n for channel in data_card.shapeMap.keys():\n for sample in data_card.shapeMap[channel].keys():\n f.write(\n f\"shapes {sample} {channel} {data_card.shapeMap[channel][sample][0]} {data_card.shapeMap[channel][sample][1]}\"\n )\n if size(data_card.shapeMap[channel][sample]) > 2:\n f.write(f\" {data_card.shapeMap[channel][sample][2]}\" + \"\\n\")\n else:\n f.write(\"\\n\")\n\n f.write(\"\\n---------------------------------\\n\")\n f.write(\"bin \")\n for bin in data_card.obs.keys():\n f.write(f\"{bin} \")\n f.write(\"\\n\")\n f.write(\"observation \")\n for channel in data_card.obs.keys():\n f.write(f\"{str(data_card.obs[channel])} \")\n f.write(\"\\n---------------------------------\\n\")\n f.write(\"bin \")\n for channel in data_card.obs.keys():\n for sample in data_card.exp[channel].keys():\n f.write(f\"{channel} \")\n f.write(\"\\n\")\n f.write(\"process \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n f.write(f\"{sample} \")\n f.write(\"\\n\")\n f.write(\"process \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n if sample in data_card.signals:\n f.write(f\"{str(-1 * data_card.processes.index(sample))} \")\n else:\n f.write(f\"{str(data_card.processes.index(sample) + 1)} \")\n f.write(\"\\n\")\n f.write(\"rate \")\n for channel in data_card.bins:\n for sample in data_card.exp[channel].keys():\n\n f.write(f\"{str(data_card.exp[channel][sample])} \")\n f.write(\"\\n---------------------------------\\n\")\n for syst in data_card.systs:\n f.write(f\"{syst[0]} {syst[2]} \")\n for bin in syst[4].keys():\n for sample in data_card.exp[bin].keys():\n if syst[4][bin][sample] != 0:\n f.write(f\"{str(syst[4][bin][sample])} \")\n else:\n f.write(\"- \")\n\n f.write(\"\\n\")\n f.write(\"\\n---------------------------------\\n\")\n for cAp in data_card.rateParams.keys():\n _dir = cAp.split(\"AND\")\n for i in range(size(data_card.rateParams[cAp], 0)):\n if size(data_card.rateParams[cAp][i][0]) > 3:\n f.write(\n f\"{str(data_card.rateParams[cAp][i][0][0])} rateParam {_dir[0]} {_dir[1]} {str(data_card.rateParams[cAp][i][0][1])} {data_card.rateParams[cAp][i][0][3]}\"\n )\n else:\n f.write(\n f\"{str(data_card.rateParams[cAp][i][0][0])} rateParam {_dir[0]} {_dir[1]} {str(data_card.rateParams[cAp][i][0][1])}\"\n )\n f.write(\"\\n\")\n f.write(\"\\n---------------------------------\\n\")\n for idxc, channel in enumerate(channels):\n if (\n channel in data_card.binParFlags.keys()\n and data_card.binParFlags[channel] == True\n ):\n # double check to be safe\n shapesys = False\n staterror = False\n for sample in spec[\"channels\"][idxc][\"samples\"]:\n mod_types = [mod[\"type\"] for mod in sample[\"modifiers\"]]\n if \"shapesys\" in mod_types:\n shapesys = True\n elif \"staterror\" in mod_types:\n staterror = True\n\n if shapesys:\n f.write(f\"{channel} autoMCStats 100000 0 2\" + \"\\n\")\n if staterror:\n f.write(f\"{channel} autoMCStats 0 0 2\" + \"\\n\")", "def _write_nover():\n return []", "def writeHoc(self):\n print('Writing output file %s ...' % self.outFile)\n with open(self.outFile, 'w') as fOut:\n \n def createSection(secNum):\n fOut.write('create section_%i\\n' %secNum)\n fOut.write('section_%i {\\n' %secNum)\n fOut.write('pt3dclear()\\n')\n for node in xrange(len(self.sections[secNum])):\n fOut.write('pt3dadd(%.6f, %.6f, %.6f, %.6f)\\n' \\\n % (self.sections[secNum][node][0],\n self.sections[secNum][node][1],\n self.sections[secNum][node][2],\n self.secRads[secNum][node]))\n fOut.write('}\\n')\n \n def createConnection():\n for c in xrange(len(self.connections)):\n fOut.write('connect section_%i(1), section_%i(0)\\n' \\\n % (self.connections[c][0],self.connections[c][1]))\n \n \n for sec in self.sections.keys():\n createSection(sec)\n createConnection()\n \n \n return", "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "def writeOutput(self):\n\n self.collect.writeOutput()", "def uitofp(self, typ):", "def output_file(cls, pki_type_enum, pki_id, raw_data, output_path):\n pki_type = None\n\n for pki_type in cls.TYPE:\n if cls.TYPE[pki_type] == pki_type_enum:\n break\n else:\n raise StandardError(\"pki type not supported: 0x%02x\" % pki_type_enum)\n\n folder_path = os.path.join(output_path, pki_type)\n if os.path.exists(folder_path) is False:\n os.makedirs(folder_path)\n\n file_name = \"%s_%d.%s\" % (pki_type, pki_id, \"key\" if \"key\" in pki_type else \"cer\")\n\n with open(os.path.join(folder_path, file_name), \"wb+\") as _file:\n _file.write(raw_data)", "def write(self, file):\n\n # Initialize output buffer\n out = ''\n\n # Print specification\n for key, value in self.specification.items():\n out += f'{key} : {value}\\n'\n\n # Print the tour\n if self.tour:\n out += 'TOUR_SECTION\\n'\n for s in self.tour:\n out += str(s) + '\\n'\n out += '-1\\n'\n\n # Append EOF\n out += 'EOF\\n'\n\n # Write to file\n with open(file, 'w') as f:\n f.write(out)", "def _write_endcy():\n return []", "def Writedata(self, tstep):\n \n nc = Dataset(self.outfile, 'a')\n \n nc.variables['time'][tstep] = self.time\n nc.variables['salt'][tstep] = self.salt\n nc.variables['temp'][tstep] = self.temp\n nc.variables['uc'][tstep] = self.uc\n nc.variables['vc'][tstep] = self.vc\n nc.variables['nu_v'][tstep] = self.nu_v\n nc.variables['rho'][tstep] = self.rho\n nc.variables['tau_x'][tstep] = self.tau_x\n nc.variables['tau_y'][tstep] = self.tau_y\n nc.variables['eta'][tstep] = self.eta\n \n nc.close()", "def write_data():", "def write(self, entry):\n if entry is \"all\":\n for k, item in self.nodes.items():\n if type(item) is list:\n for node in item:\n node.write()\n else:\n item.write()\n else:\n # other option is 'rooms'\n for node in self.nodes[entry]:\n node.write()", "def write_output_shifts_to_file(self, shift_output):\n pass", "def save_to_poscar(self, filename,direct=False,species_line=False): \n with open( filename, 'w' ) as F:\n F.write( self.name )\n F.write( \" 1.0\\n\" )\n F.write( mat2str( self.unit_cell, \"%16.10f\" ) )\n if species_line:\n pos = 0\n for n in self.num_per_type: \n F.write('%s '%self.species[pos])\n pos += n\n F.write('\\n')\n F.write(' '.join([str(n) for n in self.num_per_type]) )\n F.write('\\n')\n if not direct:\n F.write(\"Cart\\n\")\n F.write( mat2str( self.atoms, \"%16.10f\" ) )\n else:\n F.write(\"Direct\\n\")\n F.write( mat2str( dot(self.atoms,self.recip_cell), \"%16.10f\" ) )", "def write_minisat(self):\n num_variables = len(self.label_encodings)\n num_clauses = self.num_clauses\n clauses = self.clauses\n outfile = MinisatRunner.temp_in\n out = open(outfile,\"w\")\n try:\n out.write(\"p cnf %3d %3d\\n\" % (num_variables,num_clauses))\n for clause in clauses:\n for clause_variable in clause:\n out.write(\" %3d\" % self.minisat_encode_label(clause_variable));\n out.write(\" 0\\n\")\n finally:\n out.close()", "def write(data):", "def write(self):\n #\n if self.what == 'ecutwfc':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.set_ecutwfc(self.values[i])\n self.pwinput.write()\n #\n elif self.what == 'ecutrho':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.ecutrho = self.values[i]\n self.pwinput.write()\n elif self.what == 'kpoints':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.Nk = self.values[i]\n self.pwinput.write()\n #\n else:\n raise RuntimeError('what = %s is not implemented yet' % (self.what))\n #\n self.inputs_have_been_written = True", "def _write(self, preset_type, data):\n logger.debug('write presets for %s', self._device.name)\n with self._file_open_rlock(preset_type) as f:\n f.seek(0)\n yaml.dump(data, f, default_flow_style=False)\n f.truncate()", "def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()", "def _prepare_for_write(self):\r\n self._writer = tf.summary.FileWriter(self._model.output_path,\r\n self._tensorflow_session.graph)\r\n for mode in ('train', 'test', 'full_test'):\r\n self._expensive_ops[mode].update(self._cheap_ops[mode])\r\n self._ready_to_write = True", "def _writeOutput(self):\n head = \"Station\\tX\\tY\\tZ\\tUEast\\tUNorth\\tUUp\\tSigEast\\tSigNorth\\tSigUp\\n\"\n outFmt = \"%s\" + 9 * \"\\t%g\" + \"\\n\"\n\n f = open(self.outputFile, 'w')\n f.write(head)\n\n for stationNum in range(self.numStations):\n outLine = outFmt % (self.stations[stationNum],\n self.coords[stationNum, 0], self.coords[stationNum, 1],\n self.coords[stationNum, 2],\n self.dispNoise[stationNum, 0],\n self.dispNoise[stationNum, 1],\n self.dispNoise[stationNum, 2],\n self.sigmaEast, self.sigmaNorth, self.sigmaUp)\n f.write(outLine)\n\n f.close()\n\n return", "def write_all(self):\r\n pass", "def _write_table(self, var_type, var_data, hierarchical, print_arrays, out_stream):\n if out_stream is None:\n return\n\n # Make a dict of variables. Makes it easier to work with in this method\n var_dict = OrderedDict()\n for name, vals in var_data:\n var_dict[name] = vals\n\n # determine pathname of the system\n if self.source in ('root', 'driver', 'problem', 'root.nonlinear_solver'):\n pathname = ''\n elif '|' in self.source:\n pathname = get_source_system(self.source)\n else:\n pathname = self.source.replace('root.', '')\n if pathname.endswith('.nonlinear_solver'):\n pathname = pathname[:-17] # len('.nonlinear_solver') == 17\n\n # vars should be in execution order\n if 'execution_order' in self._var_info:\n var_order = self._var_info['execution_order']\n var_list = [var_name for var_name in var_order if var_name in var_dict]\n else:\n # don't have execution order, just sort for determinism\n var_list = sorted(var_dict.keys())\n\n top_name = pathname if pathname else 'model'\n write_var_table(pathname, var_list, var_type, var_dict,\n hierarchical=hierarchical, top_name=top_name,\n print_arrays=print_arrays, out_stream=out_stream)", "def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])", "def write_location(self, param_type, path, pool=None, cont=None):\n self.run_ior_with_params(param_type, path, pool, cont,\n self.test_file, self.ior_flags[0])", "def _save_chromosome_at_index(self, index, file_name):\n how_to_open = 'w' if index == 0 else 'a'\n with open(file_name, how_to_open) as out_file:\n for category in self.population[index].get_genes():\n out_file.write(''.join(category) + '\\t')\n out_file.write(\n '\\n{}\\n'.format(self.population[index].get_fitness())\n )", "def _write_transact_types(self, file):\n for tp in self._transact_types:\n tp.write(file)\n file.write('\\n')", "def write_compact(self, fout):\n for ignored in self.ignored_rules:\n fout.write(ignored)\n self.sort_decls()\n for (pos, (ss, pp)) in enumerate(self.cliques):\n fout.write(','.join(sorted(ss)))\n fout.write('{')\n fout.write(';'.join(pp))\n fout.write('}')", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "def write_pypeit(self, output_path=None, cfg_lines=None,\n write_bkg_pairs=False, write_manual=False,\n configs=None, config_subdir=True,\n version_override=None, date_override=None):\n # Set output path\n if output_path is None:\n output_path = os.getcwd()\n\n # Find unique configurations, always ignoring any 'None'\n # configurations...\n cfg = self.unique_configurations(copy=True, rm_none=True)\n\n # Get the setups to write\n if configs is None or configs == 'all' or configs == ['all']:\n cfg_keys = list(cfg.keys())\n else:\n _configs = configs if isinstance(configs, list) else [configs]\n cfg_keys = [key for key in cfg.keys() if key in _configs]\n\n if len(cfg_keys) == 0:\n msgs.error('No setups to write!')\n\n # Grab output columns\n output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,\n write_manual=write_manual)\n\n # Write the pypeit files\n ofiles = [None]*len(cfg_keys)\n for j,setup in enumerate(cfg_keys):\n # Create the output directory\n root = '{0}_{1}'.format(self.spectrograph.name, setup)\n if config_subdir:\n odir = os.path.join(output_path, root)\n if not os.path.isdir(odir):\n os.makedirs(odir)\n else:\n odir = output_path\n # Create the output file name\n ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root))\n\n # Setup dict\n setup_dict = {}\n setup_dict[f'Setup {setup}'] = {}\n for key in cfg[setup]:\n setup_dict[f'Setup {setup}'][key] = cfg[setup][key]\n \n # Get the paths\n in_cfg = np.array([setup in _set for _set in self.table['setup']])\n if not np.any(in_cfg):\n continue\n paths = np.unique(self['directory'][in_cfg]).tolist()\n\n # Get the data lines\n subtbl = self.table[output_cols][in_cfg]\n if 'calib' in output_cols:\n # calib can be a str with a list of values because in some cases (e.g. MOSFIRE) the same\n # calibration files are used for different setups. Here we update calib to have only the\n # value relevant for this setup.\n # find the calib value in this setup that is not a list (which is probably a science/standard)\n no_list = np.array([',' not in str(cc) for cc in subtbl['calib']])\n if np.any(no_list):\n # assign the calib value in this setup that is not a list to frames that have calib as a list\n subtbl['calib'][np.logical_not(no_list)] = subtbl['calib'][no_list][0]\n subtbl.sort(['frametype','filename'])\n #with io.StringIO() as ff:\n # subtbl.write(ff, format='ascii.fixed_width')\n # data_lines = ff.getvalue().split('\\n')[:-1]\n\n # Config lines\n if cfg_lines is None:\n cfg_lines = ['[rdx]']\n cfg_lines += [' spectrograph = {0}'.format(self.spectrograph.name)]\n\n # Instantiate a PypeItFile\n pypeItFile = inputfiles.PypeItFile(cfg_lines, paths, subtbl, setup_dict)\n # Write\n pypeItFile.write(ofiles[j], version_override=version_override,\n date_override=date_override) \n\n # Return\n return ofiles", "def _write_dataset(self):\n if self.output_file:\n if self.dryrun:\n sys.stdout.write(f\"{self.output_file.resolve()}\\n{self.parameter_study}\\n\")\n else:\n self.output_file.parent.mkdir(parents=True, exist_ok=True)\n self._conditionally_write_dataset(self.output_file, self.parameter_study)\n else:\n for parameter_set_file, parameter_set in self.parameter_study.groupby(_set_coordinate_key):\n parameter_set_file = pathlib.Path(parameter_set_file)\n # If no output file template is provided, print to stdout\n if not self.provided_output_file_template:\n sys.stdout.write(f\"{parameter_set_file.name}\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n # If overwrite is specified or if file doesn't exist\n elif self.overwrite or not parameter_set_file.is_file():\n # If dry run is specified, print the files that would have been written to stdout\n if self.dryrun:\n sys.stdout.write(f\"{parameter_set_file.resolve()}:\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n else:\n self._conditionally_write_dataset(parameter_set_file, parameter_set)", "def export_manual_pipetting(args):\n if args.type == 'purify':\n clarity_epp.export.manual_pipetting.samplesheet_purify(lims, args.process_id, args.output_file)\n elif args.type == 'dilute_library_pool':\n clarity_epp.export.manual_pipetting.samplesheet_dilute_library_pool(lims, args.process_id, args.output_file)\n elif args.type == 'multiplex_library_pool':\n clarity_epp.export.manual_pipetting.samplesheet_multiplex_library_pool(lims, args.process_id, args.output_file)\n elif args.type == 'multiplex_sequence_pool':\n clarity_epp.export.manual_pipetting.samplesheet_multiplex_sequence_pool(lims, args.process_id, args.output_file)\n elif args.type == 'normalization':\n clarity_epp.export.manual_pipetting.samplesheet_normalization(lims, args.process_id, args.output_file)\n elif args.type == 'capture':\n clarity_epp.export.manual_pipetting.samplesheet_capture(lims, args.process_id, args.output_file)\n elif args.type == 'exonuclease':\n clarity_epp.export.manual_pipetting.sammplesheet_exonuclease(lims, args.process_id, args.output_file)\n elif args.type == 'pcr_exonuclease':\n clarity_epp.export.manual_pipetting.sammplesheet_pcr_exonuclease(lims, args.process_id, args.output_file)\n elif args.type == 'mip_multiplex_pool':\n clarity_epp.export.manual_pipetting.samplesheet_mip_multiplex_pool(lims, args.process_id, args.output_file)\n elif args.type == 'mip_dilute_pool':\n clarity_epp.export.manual_pipetting.samplesheet_mip_pool_dilution(lims, args.process_id, args.output_file)\n elif args.type == 'pool_samples':\n clarity_epp.export.manual_pipetting.samplesheet_pool_samples(lims, args.process_id, args.output_file)\n elif args.type == 'pool_magnis_pools':\n clarity_epp.export.manual_pipetting.samplesheet_pool_magnis_pools(lims, args.process_id, args.output_file)", "def write_species(mcmc_set_name, model):\n species_filename = '%s_species.txt' % mcmc_set_name\n try:\n with open(species_filename, 'w') as f:\n #f.write('\\n'.join([str(s) for s in model.rules]))\n #f.write('\\n')\n f.write('\\n'.join([str(s) for s in model.species]))\n except IOError as e:\n pass\n return species_filename", "def dump_parts(self, io):\n\n # XXX refactor with Tempita\n title = \"Parts created by the docutils writer '%s'\" % self.strategy.name\n io.say(title + os.linesep)\n io.say(len(title) * '-')\n io.say(2 * os.linesep)\n io.say('Part keys: ' + 2 * os.linesep)\n\n parts = self.publish_parts(io)\n io.say(os.linesep.join(sorted(parts.keys())))\n io.say(2 * os.linesep)\n for part in parts:\n io.say(\"Value of part '%s':%s\" % (part, os.linesep))\n io.say(parts[part].encode('utf-8') + os.linesep)\n io.say(80*'-'+os.linesep)\n io.say(os.linesep)", "def Writefile(self, outfile, verbose=True):\n \n self.outfile = outfile\n \n # Write SUNTANS grid to file\n nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')\n nc.Description = 'SUNTANS subsetted history file'\n nc.Author = ''\n nc.Created = datetime.now().isoformat()\n nc.type = 'SUNTANS HIS file'\n #pdb.set_trace()\n nc.createDimension('Nc', self.Nc)\n nc.createDimension('Np', self.Np)\n nc.createDimension('Ne', self.Ne)\n nc.createDimension('Nk', self.Nk)\n nc.createDimension('numsides', self.numsides)\n \n nc.createDimension('time', None)\n \n def write_nc_var(var, name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n if verbose:\n print ' ... wrote ', name\n \n def create_nc_var(name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n if verbose:\n print ' ... wrote ', name\n \n # Grid variables\n write_nc_var(self.xv, 'xv', ('Nc'))\n write_nc_var(self.yv, 'yv', ('Nc'))\n write_nc_var(self.xp, 'xp', ('Np'))\n write_nc_var(self.yp, 'yp', ('Np'))\n write_nc_var(self.xe, 'xe', ('Ne'))\n write_nc_var(self.ye, 'ye', ('Ne'))\n write_nc_var(self.dz, 'dz', ('Nk'))\n write_nc_var(self.dv, 'dv', ('Nc'))\n write_nc_var(self.Ac, 'Ac', ('Nc'))\n write_nc_var(self.Nk, 'Nk', ('Nc'))\n write_nc_var(self.face, 'face', ('Nc','numsides'))\n write_nc_var(self.mark, 'mark', ('Ne'))\n write_nc_var(self.cells, 'cells', ('Nc','numsides'))\n \n \n # Create the data variables\n create_nc_var('time',('time'),'seconds since 1990-01-01 00:00:00')\n create_nc_var('salt',('time','Nk','Nc'),'psu')\n create_nc_var('temp',('time','Nk','Nc'),'degrees C')\n create_nc_var('uc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('vc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('nu_v',('time','Nk','Nc'),'m2 s-1')\n create_nc_var('rho',('time','Nk','Nc'),'kg m-3')\n create_nc_var('tau_x',('time','Nc'),'N m-2')\n create_nc_var('tau_y',('time','Nc'),'N m-2')\n create_nc_var('eta',('time','Nc'),'m')\n \n nc.close()", "def save(self, file):\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j]!= 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == \"Elephant\":\n elephants.append(\"(\" + str(i) + \",\" + str(j)+ \") : np.array([\"+str(L[0])+ \",\" + str(L[1])+\"])\")\n elif piece.species == \"Rhinoceros\":\n rhinos.append(\"(\"+str(i)+\",\" +str(j)+ \") : np.array([\"+str(L[0]) + \",\" + str(L[1])+\"])\")\n elif isinstance(piece, Boulder):\n boulders.append(\"(\" + str(i) + \",\" + str(j) + \")\")\n file.write(\"# King of Siam GameFile \\n\\nplayer_turn {\\n \" + self.playerTurn + \"\\n}\\n\\n\")\n file.write(\"Boulder {\")\n for k in range(len(boulders)):\n file.write(\"\\n \" + boulders[k] + \";\")\n file.write(\"\\n}\\n\\nElephant {\")\n for elt in elephants:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\\n\\nRhinoceros {\")\n for elt in rhinos:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\")\n\n file.close()", "def write(self, out, ignored, displayOptions=None):\n # \"ignored\" argument for bw compat with rmake\n if displayOptions is None:\n displayOptions = {}\n\n if displayOptions.get('showLineOrigins', False):\n lineStrs = []\n curPath = None\n for path, lineNum in self.origins:\n if path == curPath:\n continue\n else:\n lineStrs.append('%s' % (path,))\n curPath = path\n if lineStrs:\n out.write('# %s: %s\\n' % (self.name, ' '.join(lineStrs)))\n for line in self.valueType.toStrings(self.value, displayOptions):\n out.write('%-25s %s\\n' % (self.name, line))", "def write_out(self, content_content: str):\n print(\"[write_out] Computation name: \", self.comp_name)\n # meta_title_content object creation to return as a first part\n if self.write_out_part_counter < 0:\n metatitle_content = Content(self.comp_name, \"sdo:\\n\" + str(self.comp_name) + \"/streaming/p*\")\n self.queue_to_lower.put((self.packetid, metatitle_content))\n # self.cs.add_content_object(metatitle_content) TODO not needed? \n\n # actual content_object for streaming\n self.write_out_part_counter += 1\n content_name = self.comp_name\n content_name += \"/streaming/p\" + str(self.write_out_part_counter)\n content_object = Content(content_name, content_content)\n self.cs.add_content_object(content_object)\n print(\"[write_out] Last entry in content store:\", self.cs.get_container()[-1].content.name,\n self.cs.get_container()[-1].content.content)", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)", "def write_out_v(vc2ptmk, ofn):\n print \"Writing out verbose to [{}]\".format(ofn)\n with codecs.open(ofn, \"w\", \"utf8\") as ofd:\n for (co, uri), infos in sorted(vc2ptmk.items()):\n for di in infos:\n ol = u\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(\n co, uri, di[\"ptmk\"], di[\"spk\"], di[\"sco\"])\n ofd.write(ol)", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')", "def write(self,vname,kmz='out.kmz'):\n\n imgs=[] # to store a list of all images created\n content=[] # the content of the main kml\n vstr='files/%s_%05i.png' # format specification for images (all stored in `files/' subdirectory)\n\n # create empty files subdirectory for output images\n try:\n shutil.rmtree('files')\n except:\n pass\n os.makedirs('files')\n\n # loop through all time slices and create the image data\n # appending to the kml content string for each image\n for i in xrange(0,self.nstep,1):\n kml=ncNWRC(self.filename,istep=i)\n img=vstr % (vname,i)\n imgs.append(img)\n content.append(kml.image2kml(vname,img))\n\n # create the main kml file\n kml=ncNWRC.kmlstr % \\\n {'content':'\\n'.join(content),\\\n 'prog':ncNWRC.progname}\n\n # create a zipfile to store all images + kml into a single compressed file\n z=zipfile.ZipFile(kmz,'w',compression=zipfile.ZIP_DEFLATED)\n z.writestr(kmz[:-3]+'kml',kml)\n for img in imgs:\n z.write(img)\n z.close()", "def write(self, cull=False):\n if cull:\n cull_prefixes(self).write()\n else:\n ser = self.g.serialize(format='nifttl', encoding='utf-8')\n with open(self.filename, 'wb') as f:\n f.write(ser)\n #print('yes we wrote the first version...', self.name)", "def write(self, text):\n text = open(text, 'w')\n text.write('File type = \"ooTextFile\"\\n')\n text.write('Object class = \"TextGrid\"\\n\\n')\n text.write('xmin = %f\\n' % self.__xmin)\n text.write('xmax = %f\\n' % self.__xmax)\n text.write('tiers? <exists>\\n')\n text.write('size = %d\\n' % self.__n)\n text.write('item []:\\n')\n for (tier, n) in zip(self.__tiers, range(1, self.__n + 1)):\n text.write('\\titem [%d]:\\n' % n)\n if tier.__class__ == IntervalTier: \n text.write('\\t\\tclass = \"IntervalTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tintervals: size = %d\\n' % len(tier))\n for (interval, o) in zip(tier, range(1, len(tier) + 1)): \n text.write('\\t\\t\\tintervals [%d]:\\n' % o)\n text.write('\\t\\t\\t\\txmin = %f\\n' % interval.xmin())\n text.write('\\t\\t\\t\\txmax = %f\\n' % interval.xmax())\n text.write('\\t\\t\\t\\ttext = \"%s\"\\n' % interval.mark())\n else: # PointTier\n text.write('\\t\\tclass = \"TextTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tpoints: size = %d\\n' % len(tier))\n for (point, o) in zip(tier, range(1, len(tier) + 1)):\n text.write('\\t\\t\\tpoints [%d]:\\n' % o)\n text.write('\\t\\t\\t\\ttime = %f\\n' % point.time())\n text.write('\\t\\t\\t\\tmark = \"%s\"\\n' % point.mark())\n text.close()", "def _write_data_out(solutions, unable_to_resolve, unresolvables):\n print('')\n print('------------------------')\n print('--- Progress So Far: ---')\n print('Solved: ' + str(len(solutions)))\n print('Error while resolving: ' + str(len(unable_to_resolve)))\n print('Unresolvable conflicts: ' + str(len(unresolvables)))\n print('Saving progress to json.')\n print('------------------------')\n print('')\n json.dump(solutions, open(fname_solutions, 'w'))\n json.dump(unable_to_resolve, open(fname_errors, 'w'))\n json.dump(unresolvables, open(fname_unresolvables, 'w'))", "def ior_write_dataset(self):\n for oclass in self.obj_class:\n for sizes in self.ior_chu_trs_blk_size:\n # Skip the object type if server count does not meet the minimum\n # EC object server count\n if oclass[1] > self.server_count:\n continue\n self.ior_param_update(oclass, sizes)\n\n # Create the new container with correct redundancy factor\n # for EC object type\n self.ec_contaier_create(oclass[0])\n self.update_ior_cmd_with_pool(oclass=oclass[0],\n create_cont=False)\n # Start IOR Write\n self.container.uuid = self.ec_container.uuid\n self.start_ior_load(operation=\"WriteRead\", percent=1,\n create_cont=False)\n self.cont_uuid.append(self.ior_cmd.dfs_cont.value)", "def _write_rocks(parameters):\n # Reorder rocks\n if parameters[\"rocks_order\"] is not None:\n order = parameters[\"rocks_order\"]\n for rock in parameters[\"rocks\"].keys():\n if rock not in order:\n order.append(rock)\n else:\n order = parameters[\"rocks\"].keys()\n\n # Formats\n fmt = block_to_format[\"ROCKS\"]\n fmt1 = str2format(fmt[1])\n fmt2 = str2format(fmt[2])\n\n fmt = block_to_format[\"RPCAP\"]\n fmt3 = str2format(fmt)\n\n out = []\n for k in order:\n # Load data\n data = parameters[\"rocks\"][k]\n\n # Number of additional lines to write per rock\n cond = any(\n data[k] is not None\n for k in [\n \"compressibility\",\n \"expansivity\",\n \"conductivity_dry\",\n \"tortuosity\",\n \"klinkenberg_parameter\",\n \"distribution_coefficient_3\",\n \"distribution_coefficient_4\",\n ]\n )\n nad = (\n 2\n if \"relative_permeability\" in data.keys() or \"capillarity\" in data.keys()\n else int(cond)\n )\n\n # Permeability\n per = data[\"permeability\"]\n per = [per] * 3 if not numpy.ndim(per) else per\n if not (isinstance(per, (list, tuple, numpy.ndarray)) and len(per) == 3):\n raise TypeError()\n\n # Record 1\n values = [\n k,\n nad if nad else \"\",\n data[\"density\"],\n data[\"porosity\"],\n per[0],\n per[1],\n per[2],\n data[\"conductivity\"],\n data[\"specific_heat\"],\n ]\n out += write_record(values, fmt1)\n\n # Record 2\n if cond:\n values = [\n data[\"compressibility\"],\n data[\"expansivity\"],\n data[\"conductivity_dry\"],\n data[\"tortuosity\"],\n data[\"klinkenberg_parameter\"],\n data[\"distribution_coefficient_3\"],\n data[\"distribution_coefficient_4\"],\n ]\n out += write_record(values, fmt2)\n else:\n out += write_record([], []) if nad == 2 else []\n\n # Relative permeability / Capillary pressure\n if nad == 2:\n for key in [\"relative_permeability\", \"capillarity\"]:\n if key in data.keys():\n values = [data[key][\"id\"], None]\n values += list(data[key][\"parameters\"])\n out += write_record(values, fmt3)\n else:\n out += write_record([], [])\n\n return out", "def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)", "def print_cat(self):\n for ion in self.cations:\n data_str = (\"HETATM\", int(self.nmb) + ion, self.cations[ion][0], \"\",\n self.cations[ion][0],\"\", ion, \"\",\n Vector.x(self.cations[ion][1]), Vector.y(self.cations[ion][1]),\n Vector.z(self.cations[ion][1]), 1.00, 0.00,\n self.cations[ion][0], \"\",\"\\n\")\n out_str = self.pdb_format % data_str\n self.output.write(out_str)\n self.output.close()\n return 1", "def write( data ):", "def write_file(self):\n rl_df, lift_df = self.create_df()\n\n number = re.findall('\\d+', self.url)[0]\n\n if self.write is True:\n with open('house_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file:\n rl_df.to_csv(file, sep=';')\n with open('house_lifts_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file2:\n lift_df.to_csv(file2, sep=';')", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )", "def write(self):\n\t\traise NotImplementedError('%s: No write function implemented!' % self.name)" ]
[ "0.55175173", "0.5354752", "0.53262687", "0.52530247", "0.52530247", "0.5227527", "0.5203227", "0.5147186", "0.5113114", "0.5088805", "0.5083531", "0.5081513", "0.5079073", "0.5070704", "0.5031757", "0.50116146", "0.49944216", "0.4976218", "0.49679434", "0.4965163", "0.49638355", "0.49420896", "0.49406436", "0.49382824", "0.4930062", "0.49161944", "0.49101913", "0.49077344", "0.49075034", "0.49075034", "0.49051678", "0.4903888", "0.48873538", "0.4886703", "0.4883068", "0.4879201", "0.48682767", "0.48490152", "0.48451948", "0.48237723", "0.4822621", "0.48179036", "0.4813153", "0.48073673", "0.47990203", "0.47989547", "0.47989106", "0.4796384", "0.47905886", "0.47754845", "0.47751337", "0.4744116", "0.47399798", "0.47376123", "0.47285652", "0.47264627", "0.4726446", "0.47253665", "0.47207078", "0.4717973", "0.4717821", "0.47109413", "0.4698698", "0.46956265", "0.46919566", "0.46902812", "0.46794719", "0.46760723", "0.46722263", "0.46720192", "0.46705952", "0.46659726", "0.4664864", "0.4664065", "0.46570903", "0.46555915", "0.46502474", "0.46463642", "0.46412328", "0.464027", "0.4632712", "0.46308765", "0.4627238", "0.46264184", "0.4607432", "0.459859", "0.45977858", "0.45961845", "0.45955577", "0.45951152", "0.45910856", "0.45821458", "0.45814675", "0.45775452", "0.45751217", "0.45699745", "0.45699057", "0.45693618", "0.45689428", "0.45675093" ]
0.5732155
0
set the box sizes and start row for each psf image
def _set_psf_layout_hst(self): print('setting psf layout for HST') obj_data=self.obj_data total_psf_pixels = 0 psf_start_row = 0 for iobj in range(obj_data.size): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # note assuming same psf for all "epochs" psf_im = self.psf_data.get_psf(iobj) psf_shape = psf_im.shape psf_npix = psf_im.size cen = (np.array(psf_shape)-1.0)/2.0 # we will expand the psfs for icut in range(obj_data['ncutout'][iobj]): obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_psf_layout_psfex(self):\n\n print('setting psf layout for PSFEx')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n total_psf_pixels = 0\n\n #psf_npix = psf_size*psf_size\n\n psf_start_row = 0\n for iobj in range(obj_data.size):\n for icut in range(obj_data['ncutout'][iobj]):\n\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n file_id = obj_data['file_id'][iobj,icut]\n\n p = psf_data[file_id]\n\n pim = p.get_rec(row,col)\n cen = p.get_center(row,col)\n\n psf_shape = pim.shape\n psf_npix = pim.size\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n\n self.total_psf_pixels = total_psf_pixels", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def build_filler_images(self):", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def _get_boxes(self, idx):\n # Load Image\n path = self.df.hsi_path.iloc[idx]\n im = self._load_im(path)\n\n # Crop out box\n r_box_im = self.df.width.iloc[idx] / im.shape[-1] # Ratio of RGB im box coords to load im width (e.g., r=10 for hsi)\n box = np.array([self.df.ymin.iloc[idx], self.df.ymax.iloc[idx], self.df.xmin.iloc[idx], self.df.xmax.iloc[idx]])\n box = np.around(box / r_box_im).astype(int)\n\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n if np.any(np.array(crop_im.shape) == 0):\n print('[WARNING] Loaded box has zero shape and is sketchily inflated. TODO: skip this box with ID', idx)\n if box[0] == self.df.width.iloc[idx]/r_box_im: box[0] -= 1\n if box[2] == self.df.height.iloc[idx]/r_box_im: box[2] -= 1\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n\n target = {}\n target[\"labels\"] = self.df.class_id.iloc[idx]\n target[\"uid\"] = self.df.uid.iloc[idx]\n \n\n return crop_im, target", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def setBoxsize(length,width,height):\n return length,width,height", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def __init__(self, nb_sub_images, window_size, recovery, image_horiz_size):\n self.nb_sub_images = nb_sub_images\n self.window_size = window_size\n self.recovery = recovery\n self.image_horiz_size = image_horiz_size", "def _resize_bboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n tmp_box = np.array(box, dtype=np.float32)\n tmp_box[0::2] *= results['scale_factor'][0]\n tmp_box[1::2] *= results['scale_factor'][1]\n if self.bbox_clip_border:\n tmp_box[0::2] = np.clip(tmp_box[0::2], 0, img_shape[1])\n tmp_box[1::2] = np.clip(tmp_box[1::2], 0, img_shape[0])\n bboxes.append(tmp_box)\n if len(results[key]) > 0:\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def analyzeImages(path, name_type, box1_size, box2_size, box3_size, box4_size, box5_size):\n \n folders = [f for f in sorted(glob.glob(path + \"/**\"))]\n \n for folder in folders: \n \n # to save this data frame in a csv file\n \n files = [f for f in sorted(glob.glob(folder + \"/**\" + \".jpg\"))]\n \n centroidsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n headsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n \n img_array1 = []\n img_array2 = []\n img_array3 = []\n img_array4 = []\n img_array5 = []\n\n for file in files:\n \n print(file)\n \n centroidsDf[\"frame\"][files.index(file)] = files.index(file)+1\n headsDf[\"frame\"][files.index(file)] = files.index(file)+1\n \n img = cv2.imread(file)\n \n ## FLY 1 ##\n\n box1 = img[box1_size[0]:box1_size[1], box1_size[2]:box1_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box1, file) \n \n # add the centroid and head locations on the image \n box1 = cv2.circle(box1, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box1 = cv2.circle(box1, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array1.append(box1)\n \n # add the positions in the final data frame\n centroidsDf[\"fly1_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly1_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly1_x\"][files.index(file)] = x_head\n headsDf[\"fly1_y\"][files.index(file)] = y_head\n \n ## FLY 2 ##\n \n box2 = img[box2_size[0]:box2_size[1], box2_size[2]:box2_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box2, file)\n \n # add the centroid and head locations on the image \n box2 = cv2.circle(box2, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box2 = cv2.circle(box2, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array2.append(box2)\n \n # add the positions in the final data frame \n centroidsDf[\"fly2_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly2_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly2_x\"][files.index(file)] = x_head\n headsDf[\"fly2_y\"][files.index(file)] = y_head\n \n ## FLY 3 ##\n\n box3 = img[box3_size[0]:box3_size[1], box3_size[2]:box3_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box3, file)\n \n # add the centroid and head locations on the image \n box3 = cv2.circle(box3, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box3 = cv2.circle(box3, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array3.append(box3)\n\n # add the positions in the final data frame\n centroidsDf[\"fly3_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly3_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly3_x\"][files.index(file)] = x_head\n headsDf[\"fly3_y\"][files.index(file)] = y_head\n \n ## FLY 4 ##\n \n box4 = img[box4_size[0]:box4_size[1], box4_size[2]:box4_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box4, file)\n \n # add the centroid and head locations on the image \n box4 = cv2.circle(box4, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box4 = cv2.circle(box4, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array4.append(box4)\n \n # add the positions in the final data frame\n centroidsDf[\"fly4_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly4_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly4_x\"][files.index(file)] = x_head\n headsDf[\"fly4_y\"][files.index(file)] = y_head\n \n ## FLY 5 ##\n \n # the fifth fly is not present in all the genetic strains \n if (box5_size != []):\n box5 = img[box5_size[0]:box5_size[1], box5_size[2]:box5_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box5, file)\n \n # add the centroid and head locations on the image \n box5 = cv2.circle(box5, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box5 = cv2.circle(box5, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array5.append(box5)\n \n # add the positions in the final data frame\n centroidsDf[\"fly5_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly5_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly5_x\"][files.index(file)] = x_head\n headsDf[\"fly5_y\"][files.index(file)] = y_head\n \n # save the data frame in a .csv file, \n # one for the centroids and one for the heads\n #centroidsDf.to_csv(folder+\"/centroids.csv\", index = None, header=True)\n #headsDf.to_csv(folder+\"/heads.csv\", index = None, header=True)\n \n \n ## CREATE THE VIDEOS ##\n \n height, width, _ = box1.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_1_' + str(folders.index(folder)+1)+ '.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array1)):\n out.write(img_array1[i])\n out.release()\n \n height, width, _ = box2.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_2_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array2)):\n out.write(img_array2[i])\n out.release()\n \n height, width, _ = box3.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_3_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array3)):\n out.write(img_array3[i])\n out.release()\n \n height, width, _ = box4.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_4_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array4)):\n out.write(img_array4[i])\n out.release()\n \n if (box5_size != []):\n height, width, _ = box5.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_5_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array5)):\n out.write(img_array5[i])\n out.release()", "def _iter_images_rects(self):\n image_x = self._margin\n image_y = self._margin\n total_width = self.width - 2 * self._margin\n total_height = self.height - self._texts_height - 2 * self._margin\n\n if len(self._images) == 1:\n image_width = total_width\n image_height = total_height\n elif 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_width = total_width\n image_height = (total_height - (len(self._images) - 1) * self._margin) // len(self._images)\n else:\n image_width = (total_width - (len(self._images) - 1) * self._margin) // len(self._images)\n image_height = total_height\n else:\n image_width = (total_width - self._margin) // 2\n image_height = (total_height - self._margin) // 2\n\n yield image_x, image_y, image_width, image_height\n\n if 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if 3 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if len(self._images) == 4:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n image_y += image_height + self._margin\n image_x = self._margin\n yield image_x, image_y, image_width, image_height\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height", "def _assign_sizes(self):", "def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")", "def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))", "def _images_and_boxes_preprocessing(self, imgs, boxes):\r\n # Image [0, 255] -> [0, 1].\r\n imgs = imgs.float()\r\n imgs = imgs / 255.0\r\n\r\n height, width = imgs.shape[2], imgs.shape[3]\r\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\r\n # range of [0, 1].\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n if self._split == \"train\":\r\n # Train split\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)\r\n\r\n # Random flip.\r\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\r\n elif self._split == \"val\":\r\n # Val split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n # Apply center crop for val split\r\n imgs, boxes = transform.uniform_crop(\r\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n elif self._split == \"test\":\r\n # Test split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n else:\r\n raise NotImplementedError(\"{} split not supported yet!\".format(self._split))\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = transform.color_jitter(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = transform.lighting_jitter(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = transform.color_normalization(\r\n imgs,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n\r\n if self._use_bgr:\r\n # Convert image format from RGB to BGR.\r\n # Note that Kinetics pre-training uses RGB!\r\n imgs = imgs[:, [2, 1, 0], ...]\r\n\r\n boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)\r\n\r\n return imgs, boxes", "def _resize_cbboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('cbbox_fields', []):\n cbboxes = []\n for cbox in results[key]:\n tmp_cbox = np.array(cbox, dtype=np.float32)\n new_tmp_cbox = []\n for ccbox in tmp_cbox:\n ccbox = np.array(ccbox, dtype=np.float32)\n ccbox[0::2] *= results['scale_factor'][0]\n ccbox[1::2] *= results['scale_factor'][1]\n new_tmp_cbox.append(ccbox)\n tmp_cbox = np.array(new_tmp_cbox, dtype=np.float32)\n if self.bbox_clip_border:\n tmp_cbox[:, 0::2] = np.clip(tmp_cbox[:, 0::2], 0, img_shape[1])\n tmp_cbox[:, 1::2] = np.clip(tmp_cbox[:, 1::2], 0, img_shape[0])\n cbboxes.append(tmp_cbox)\n results[key] = cbboxes", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour", "def OnSize(self, event):\r\n\r\n for pos, item in self._items.items():\r\n widget, horizontalalignment, verticalalignment = item.widget, item.horizontalalignment, item.verticalalignment\r\n\r\n rect = self.GetFieldRect(pos)\r\n widgetpos = widget.GetPosition()\r\n widgetsize = widget.GetSize()\r\n\r\n rect = self.GetFieldRect(pos)\r\n\r\n if horizontalalignment == ESB_EXACT_FIT:\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((rect.width-2, rect.height-2))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.width - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y+diffs))\r\n else:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_LEFT:\r\n\r\n xpos = rect.x - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_RIGHT:\r\n\r\n xpos = rect.x + rect.width - widgetsize[0] - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_CENTER_HORIZONTAL:\r\n\r\n xpos = rect.x + (rect.width - widgetsize[0])/2 - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height))\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-1))\r\n widget.SetPosition((xpos, rect.y+1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n if event is not None:\r\n event.Skip()", "def recreate_grid(self):\n\n self.print_numlist = arcade.SpriteList()\n for row in range(ROW_COUNT):\n for column in range(COLUMN_COUNT):\n sprite = arcade.Sprite(\n f\"Numbers/{self.grid[row][column]}.png\", scale=0.2\n )\n x = (MARGIN + WIDTH) * column + MARGIN + WIDTH // 2\n y = (MARGIN + HEIGHT) * row + MARGIN + HEIGHT // 2\n sprite.center_x = x\n sprite.center_y = y\n self.print_numlist.append(sprite)\n # Check to see if all squares have been filled in\n if 0 not in self.grid:\n # if Cameron.Check_for_Completion(self.grid) == True:\n self.done = True", "def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)", "def scale_boxes(boxes, image_shape):\n height = image_shape[0]\n width = image_shape[1]\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes", "def place_images(self, final_list, points):\n\t\tfor i in range(8): \n # Please change this (8) into a class-level variable --KOH\n\t\t\timage_object = final_list[i]\n#\t\tif type(image_object) == 'CorrectImage':\n#\t\t\t\tself.correct = [i, points[i]]\n\t\t\timage = pygame.image.load(image_object.file_path)\n # Why can't these be stored as a property of the class --KOH\n\t\t\timagerect = image.get_rect()\n\t\t\treimage = pygame.transform.scale(image, image_object.size)\n\t\t\tself.screen.blit(reimage, points[i])", "def _images_and_boxes_preprocessing(self, imgs, boxes, gt_boxes=None):\n # Image [0, 255] -> [0, 1].\n imgs = imgs.float()\n imgs = imgs / 255.0\n\n height, width = imgs.shape[2], imgs.shape[3]\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\n # range of [0, 1].\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = transform.clip_boxes_to_image(boxes, height, width)\n\n if self._split == \"train\":\n # Train split\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._jitter_min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = transform.random_crop(\n imgs, self._crop_size, boxes=boxes\n )\n\n # Random flip.\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\n elif self._split == \"val\":\n # Val split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n # Apply center crop for val split\n imgs, boxes = transform.uniform_crop(\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n elif self._split == \"test\":\n # Test split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n else:\n raise NotImplementedError(\n \"{} split not supported yet!\".format(self._split)\n )\n\n # Do color augmentation (after divided by 255.0).\n if self._split == \"train\" and self._use_color_augmentation:\n if not self._pca_jitter_only:\n imgs = transform.color_jitter(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = transform.lighting_jitter(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = transform.color_normalization(\n imgs,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n # Note that Kinetics pre-training uses RGB!\n imgs = imgs[:, [2, 1, 0], ...]\n\n boxes = transform.clip_boxes_to_image(\n boxes, self._crop_size, self._crop_size\n )\n\n return imgs, boxes", "def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)", "def stage_one(self, imgs, threshold, factor, minsize, nms_threshold):\n\n width = imgs.shape[-2]\n height = imgs.shape[-1]\n num_img = imgs.shape[0]\n\n # Compute valid scales\n scales = []\n cur_width = width\n cur_height = height\n cur_factor = 1\n while cur_width >= 12 and cur_height >= 12:\n if 12 / cur_factor >= minsize: # Ignore boxes that smaller than minsize\n\n w = cur_width\n h = cur_height\n scales.append((w, h, cur_factor))\n\n cur_factor *= factor\n cur_width = math.ceil(cur_width * factor)\n cur_height = math.ceil(cur_height * factor)\n\n # Get candidate boxesi ph\n candidate_boxes = torch.empty(0, dtype=torch.int32, device=self.device)\n candidate_scores = torch.empty(0, device=self.device)\n candidate_offsets = torch.empty(\n 0, dtype=torch.float32, device=self.device)\n all_img_labels = torch.empty(0, dtype=torch.int32, device=self.device)\n for w, h, f in scales:\n resize_img = torch.nn.functional.interpolate(\n imgs, size=(w, h), mode='bilinear')\n p_distribution, box_regs, _ = self.pnet(resize_img)\n\n candidate, scores, offsets, img_labels = self._generate_bboxes(\n p_distribution, box_regs, f, threshold)\n\n candidate_boxes = torch.cat([candidate_boxes, candidate])\n candidate_scores = torch.cat([candidate_scores, scores])\n candidate_offsets = torch.cat([candidate_offsets, offsets])\n all_img_labels = torch.cat([all_img_labels, img_labels])\n\n \n if candidate_boxes.shape[0] != 0:\n candidate_boxes = self._calibrate_box(\n candidate_boxes, candidate_offsets)\n candidate_boxes = self._convert_to_square(candidate_boxes)\n candidate_boxes = self._refine_boxes(\n candidate_boxes, width, height)\n \n final_boxes = torch.empty(0, dtype=torch.int32, device=self.device)\n final_img_labels = torch.empty(0, dtype=torch.int32, device=self.device)\n for i in range(num_img):\n mask = all_img_labels == i\n keep = func.nms(candidate_boxes[mask].cpu().numpy(),\n candidate_scores[mask].cpu().numpy(), nms_threshold)\n final_boxes = torch.cat([final_boxes, candidate_boxes[mask][keep]])\n final_img_labels = torch.cat([final_img_labels, all_img_labels[mask][keep]])\n\n return torch.cat([final_boxes, final_img_labels.unsqueeze(1 )], -1)\n else:\n return candidate_boxes", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes):\r\n\r\n height, width, _ = imgs[0].shape\r\n\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n # `transform.py` is list of np.array. However, for AVA, we only have\r\n # one np.array.\r\n boxes = [boxes]\r\n # The image now is in HWC, BGR format.\r\n if self._split == \"train\": # \"train\"\r\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = cv2_transform.random_crop_list(\r\n imgs, self._crop_size, order=\"HWC\", boxes=boxes\r\n )\r\n if self.random_horizontal_flip:\r\n # random flip\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 0.5, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"val\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n imgs, boxes = cv2_transform.spatial_shift_crop_list(\r\n self._crop_size, imgs, 1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"test\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n else:\r\n raise NotImplementedError(\"Unsupported split mode {}\".format(self._split))\r\n\r\n # Convert image to CHW keeping BGR order.\r\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\r\n\r\n # Image [0, 255] -> [0, 1].\r\n imgs = [img / 255.0 for img in imgs]\r\n\r\n imgs = [\r\n np.ascontiguousarray(\r\n # img.reshape((3, self._crop_size, self._crop_size))\r\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\r\n ).astype(np.float32)\r\n for img in imgs\r\n ]\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = cv2_transform.color_jitter_list(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = cv2_transform.lighting_list(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = [\r\n cv2_transform.color_normalization(\r\n img,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n for img in imgs\r\n ]\r\n\r\n # Concat list of images to single ndarray.\r\n imgs = np.concatenate([np.expand_dims(img, axis=1) for img in imgs], axis=1)\r\n\r\n if not self._use_bgr:\r\n # Convert image format from BGR to RGB.\r\n imgs = imgs[::-1, ...]\r\n\r\n imgs = np.ascontiguousarray(imgs)\r\n imgs = torch.from_numpy(imgs)\r\n boxes = cv2_transform.clip_boxes_to_image(\r\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\r\n )\r\n return imgs, boxes", "def __init__(self,jx,jy,img,geo,mbox=10,hist=False,zoomc=False,pfile=False):\n \n self.mbox = mbox # Box half-size\n self.nbox = 2*mbox + 1 # Box size\n # Adjust central pixel location to ensure box fits within full image\n self.ix = int( np.median( [ mbox, jx, img.nx-mbox-1 ] ) )\n self.iy = int( np.median( [ mbox, jy, img.ny-mbox-1 ] ) )\n iymin = self.iy - mbox\n iymax = self.iy + mbox\n ixmin = self.ix - mbox\n ixmax = self.ix + mbox\n label = img.label # Copy information from full disk image\n self.label = label\n self.desc = img.desc[label]\n self.img = img.images[label][iymin:iymax+1,ixmin:ixmax+1]\n self.disp(zoomc)\n if hist: self.hist(geo,pfile)", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes, gt_boxes=None, min_scale=None, crop_size=None, n_imgs=0):\n\n height, width, _ = imgs[0].shape\n\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\n\n # `transform.py` is list of np.array. However, for AVA, we only have\n # one np.array.\n boxes = [boxes]\n\n crop_size = crop_size if self.multigrid_enabled and crop_size is not None else self._crop_size\n \n if self._split != 'train':\n assert gt_boxes is not None\n gt_boxes = cv2_transform.clip_boxes_to_image(gt_boxes, height, width)\n gt_boxes = [gt_boxes]\n\n # The image now is in HWC, BGR format.\n if self._split == \"train\": # \"train\"\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\n imgs,\n min_size=self._jitter_min_scale if not self.multigrid_enabled and min_scale is None else min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = cv2_transform.random_crop_list(\n imgs, crop_size, order=\"HWC\", boxes=boxes, n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n if self.random_horizontal_flip:\n # random flip\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 0.5, imgs, order=\"HWC\", boxes=boxes, \n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n # elif self._split == \"val\":\n # # Short side to test_scale. Non-local and STRG uses 256.\n # imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\n # boxes, gt_boxes = cv2_transform.scale_boxes(\n # self._crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n # )\n # boxes, gt_boxes = [boxes], [gt_boxes]\n # imgs, boxes, gt_boxes = cv2_transform.spatial_shift_crop_list(\n # self._crop_size, imgs, 1, boxes=boxes, gt_boxes=gt_boxes\n # )\n\n # if self._test_force_flip:\n # imgs, boxes = cv2_transform.horizontal_flip_list(\n # 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes\n # )\n elif self._split == \"val\" or self._split == \"test\":\n # Short side to test_scale. Non-local and STRG uses 256.\n imgs = [cv2_transform.scale(crop_size, img) for img in imgs]\n boxes, gt_boxes = cv2_transform.scale_boxes(\n crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n )\n boxes, gt_boxes = [boxes], [gt_boxes]\n\n if self._test_force_flip:\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes,\n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n else:\n raise NotImplementedError(\n \"Unsupported split mode {}\".format(self._split)\n )\n\n # Convert image to CHW keeping BGR order.\n if self.cfg.MODEL.USE_SPA_CONF:\n try:\n if len(imgs[n_imgs].shape) == 2:\n imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n elif len(imgs[n_imgs].shape) > 3:\n imgs[n_imgs:] = [np.expand_dims(img.squeeze(), axis=-1) for img in imgs[n_imgs:]]\n except:\n import pdb; pdb.set_trace()\n \n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # try:\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\n # except:\n # print('imgs[n_imgs].shape:', imgs[n_imgs].shape)\n # print('len(imgs):', len(imgs))\n # print('n_imgs:', n_imgs)\n # import pdb; pdb.set_trace()\n\n # Image [0, 255] -> [0, 1].\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [img / 255.0 for img in imgs[:n_imgs]]\n else: \n imgs = [img / 255.0 for img in imgs]\n\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [\n np.ascontiguousarray(\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[:n_imgs]\n ]\n imgs[n_imgs:] = [\n np.ascontiguousarray(\n img.reshape((1, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[n_imgs:]\n ]\n else:\n imgs = [\n np.ascontiguousarray(\n # img.reshape((3, self._crop_size, self._crop_size))\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs\n ]\n\n # Do color augmentation (after divided by 255.0).\n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = imgs[n_imgs:]\n imgs = imgs[:n_imgs]\n if self._split == \"train\" and self._use_color_augmentation: # False\n if not self._pca_jitter_only:\n imgs = cv2_transform.color_jitter_list(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = cv2_transform.lighting_list(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = [\n cv2_transform.color_normalization(\n img,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n for img in imgs\n ]\n\n # Concat list of images to single ndarray.\n imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in imgs], axis=1\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n imgs = imgs[::-1, ...]\n\n imgs = np.ascontiguousarray(imgs)\n imgs = torch.from_numpy(imgs)\n \n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in skeleton_imgs], axis=1\n )\n skeleton_imgs = np.ascontiguousarray(skeleton_imgs)\n skeleton_imgs = torch.from_numpy(skeleton_imgs)\n\n boxes = cv2_transform.clip_boxes_to_image(\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if gt_boxes is not None:\n gt_boxes = cv2_transform.clip_boxes_to_image(\n gt_boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if self.cfg.MODEL.USE_SPA_CONF:\n return (imgs, skeleton_imgs, boxes) if gt_boxes is None else (imgs, skeleton_imgs, boxes, gt_boxes)\n else:\n return (imgs, boxes) if gt_boxes is None else (imgs, boxes, gt_boxes)", "def set_figure_size(self):\n lims, _ = self.set_lims()\n size_fac = 50\n paperSizeFac = 0.65\n one_dec = 1.6\n xdecs = np.log10(lims(1)) - np.log10(lims(0))\n one_dec = one_dec * 4 / xdecs\n ydecs = np.log10(lims[3]) - np.log10(lims[2])\n paper_width = xdecs * one_dec\n paper_height = (ydecs + 3) * one_dec\n paper_height = min([paper_height, 9])\n rectScreen = [0.5, 0.5, paper_width, paper_height] * size_fac\n rectPaper = [1.0, 1.0, paper_width * paperSizeFac, paper_height * paperSizeFac]\n\n rectRho = [0.15, 0.15 + 2.3 / (ydecs + 3), 0.8, ydecs / (ydecs + 3) * 0.8]\n rectPhi = [0.15, 0.15, 0.8, 2 / (ydecs + 3) * 0.8]\n rects = {\n \"Screen\": rectScreen,\n \"Paper\": rectPaper,\n \"Rho\": rectRho,\n \"Phi\": rectPhi,\n }\n return rects", "def define_box_location(self):\n self.contents['Box_ID'] = np.ones(self.numatom) * self.num_box", "def configure_grid(self):\r\n\r\n for r in range(3):\r\n self.rowconfigure(r, weight=1)\r\n for c in range(3):\r\n self.columnconfigure(c, weight=1)", "def create_prior_boxes(self):\n # value of k for each feature map to create k^2 boxes for each feature map\n feature_map_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5}\n\n # scale for boxes across different feature maps. boxes for inner feature maps\n # are scaled much lower to detect small objects\n obj_scales = {'conv4_3': 0.1, 'conv7': 0.21, 'conv8_2': 0.255, 'conv9_2': 0.30}\n\n # Defined aspect ratio calculated from mean of (w/h) across all bounding boxes\n # from the dataset. The mean is 0.66 with deviation of 0.07. So aspect ratio is kept\n # at 0.66 for all feature maps\n aspect_ratios = {'conv4_3': [0.5], 'conv7': [0.55], 'conv8_2': [0.6], 'conv9_2': [.66]}\n\n fmaps = list(feature_map_dims.keys())\n prior_boxes = []\n for k, fmap in enumerate(fmaps):\n # for each feature map, create k*k boxes\n for i in range(feature_map_dims[fmap]):\n for j in range(feature_map_dims[fmap]):\n # calculate center coordinates of boxes\n cx = (j + 0.5) / feature_map_dims[fmap]\n cy = (i + 0.5) / feature_map_dims[fmap]\n\n # For each\n for ratio in aspect_ratios[fmap]:\n prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])\n\n prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (1930, 4)\n prior_boxes.clamp_(0, 1) # (1930, 4)\n\n return prior_boxes", "def setup_image(self):\n # Create the correct size image for the table\n rows = self.table.count('\\n')\n columns = self.table.split('\\n')[0].count('-') + self.table.split('\\n')[0].count('+')\n self.img = Image.new('RGB', ((columns * 12) + 24, rows * 21 + 48), color=(54, 57, 63))\n\n # Initialize font and drawing object\n self.font = ImageFont.truetype('../extra_files/cour.ttf', 20)\n self.draw = ImageDraw.Draw(self.img)\n\n # Draw the table without markings\n for x in range(5):\n self.draw.text((12, 12), self.table, font=self.font, fill=(255, 255, 255))", "def boxes_postprocess(boxes, image_meta):\n if 'scales' in image_meta:\n boxes[:, [0, 2]] /= image_meta['scales'][1]\n boxes[:, [1, 3]] /= image_meta['scales'][0]\n\n if 'padding' in image_meta:\n boxes[:, [0, 2]] -= image_meta['padding'][2]\n boxes[:, [1, 3]] -= image_meta['padding'][0]\n\n if 'crops' in image_meta:\n boxes[:, [0, 2]] += image_meta['crops'][2]\n boxes[:, [1, 3]] += image_meta['crops'][0]\n\n if 'flipped' in image_meta and image_meta['flipped']:\n image_width = image_meta['drifted_size'][1] if 'drifted_size' in image_meta else \\\n image_meta['orig_size'][1]\n boxes_widths = boxes[:, 2] - boxes[:, 0] + 1.\n boxes[:, 0] = image_width - 1 - boxes[:, 2]\n boxes[:, 2] = boxes[:, 0] + boxes_widths - 1.\n\n if 'drifts' in image_meta:\n boxes[:, [0, 2]] += image_meta['drifts'][1]\n boxes[:, [1, 3]] += image_meta['drifts'][0]\n\n return boxes", "def draw_boxes(bboxes: [[int]], img: 'np.array', line_width: int=2) -> 'np.array':\n for x, y, w, h in bboxes:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), line_width)\n return img", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results", "def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)", "def fillingrid(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.fillinpercent(n)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setValue(self.currentnsigs[n])\n else:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n self.nsspins[n].setValue(self.currentnsigs[n])\n self.fillinpercent(n)", "def create_flowbox(self, flowbox, frame_list):\n\n for num_frame in frame_list:\n grid = Gtk.Grid()\n btn = self.new_thumbnail_button(num_frame)\n\n widget_cls_label = Gtk.Label()\n widget_cls_label.set_text(\"?\")\n widget_cls_label.set_size_request(20, 20)\n widget_cls_label.connect(\"draw\", self.area_on_draw, {'frame': num_frame, 'widget_label': widget_cls_label})\n # Add drawing area\n grid.add(btn)\n grid.attach_next_to(widget_cls_label, btn, Gtk.PositionType.BOTTOM, 1, 2)\n\n flowbox.add(grid)\n self.flowbox_layout = flowbox", "def updateFootprintBbox(self):\n # Pull out the image bounds of the parent Footprint\n self.bb = self.fp.getBBox()\n if not self.imbb.contains(self.bb):\n raise ValueError(('Footprint bounding-box %s extends outside image bounding-box %s') %\n (str(self.bb), str(self.imbb)))\n self.W, self.H = self.bb.getWidth(), self.bb.getHeight()\n self.x0, self.y0 = self.bb.getMinX(), self.bb.getMinY()\n self.x1, self.y1 = self.bb.getMaxX(), self.bb.getMaxY()", "def boxer(imgfile, parttree, outstack, boxsize):\n imgarray = mrc.read(imgfile)\n boxedparticles = boxerMemory(imgarray, parttree, boxsize)\n apImagicFile.writeImagic(boxedparticles, outstack)\n return True", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def set_pic_size(self, im_name):\n im_vals = np.genfromtxt(im_name, delimiter=self.delim)\n self.pic_width = int(np.size(im_vals[0]) - 1) # the first column of ASCII image is row number\n try: self.pic_height = int(np.size(im_vals[:,0])) \n except IndexError: \n self.pic_width = int(np.size(im_vals) - 1)\n self.pic_height = 1\n self.create_rect_mask()\n return self.pic_width, self.pic_height", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def setpopbox(ty,slist,scaledtime,rootpop,poptree):\r\n wadjust = \"\"\r\n for i in range(numpops-1):\r\n wadjust += \"00\"\r\n if(scaledtime != []):\r\n minx_popbox = textwide(wadjust+\"0.00 MYR\", tfactor)\r\n else:\r\n minx_popbox = textwide(wadjust+\"0.00 tu\", tfactor)\r\n minx_popbox /= gv[\"globalscale\"]\r\n if gv[\"localxscale\"] > 0:\r\n minx_popbox /= gv[\"localxscale\"]\r\n\r\n popxvals = []\r\n## if scaledpop == [] then no text is written on time split line and there is more width to work with\r\n for i in range(2*numpops - 1):\r\n## left side temporarily at zero, right side temporarily at upper confidence interval\r\n popxvals.append( [0,slist[4][4][i][1]])\r\n (width,c,popxvals, leftpoint,rightpoint) = centerbox(rootpop,0,popxvals[rootpop][1],poptree,popxvals)\r\n popxvals = popadjustx(popxvals,minx_popbox)\r\n popbox = []\r\n\r\n ## maxwide will be used to adjust the width as a scaler so the part furthest to the right is not too far out\r\n maxwide = 0\r\n for i in range(2*numpops-1):\r\n if maxwide < (popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1])):\r\n maxwide = (popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1]))\r\n maxwide = maxwide/(1.0-minx_popbox)\r\n\r\n if gv[\"localxscale\"] > 0:\r\n maxwide *= gv[\"localxscale\"]\r\n\r\n farright = 0\r\n confint = []\r\n for i in range(2*numpops-1):\r\n confint.append([])\r\n confint[i].append(minx_popbox + ((popxvals[i][1] - (slist[4][4][i][1]-slist[4][4][i][2]))/maxwide))\r\n confint[i].append(minx_popbox + ((popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1]))/maxwide))\r\n if confint[i][1] > farright:\r\n farright = confint[i][1]\r\n popbox.append([[],[]])\r\n popbox[i][0].append(minx_popbox + popxvals[i][0]/maxwide)\r\n popbox[i][1].append(minx_popbox + popxvals[i][1]/maxwide)\r\n if poptree[i][1] == -1:\r\n popbox[i][0].append(gv[\"lineINFy\"])\r\n else:\r\n popbox[i][0].append(ty[poptree[i][1]-1][0])\r\n if poptree[i][0] == 0:\r\n popbox[i][1].append(gv[\"line0y\"])\r\n else:\r\n popbox[i][1].append(ty[poptree[i][0]-1][0])\r\n return popbox,maxwide, confint, farright", "def plt_bboxes(img, classes, scores, bboxes, figsize=(10,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize)\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n colors = dict()\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n# crop_img = img[xmin:(xmax - xmin),xmax:(ymax - ymin)]\n# misc.imsave('1.jpg', crop_img)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = CLASSES[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n plt.show()", "def __init__(self,phosphene_resolution=(50,50), size=(480,480), jitter=0.35, intensity_var=0.9, aperture=.66, sigma=0.8, custom_grid=None):\n if custom_grid is None:\n self.phosphene_resolution = phosphene_resolution\n self.size = size\n self.phosphene_spacing = np.divide(size,phosphene_resolution)\n self.jitter = jitter\n self.intensity_var = intensity_var\n self.grid = self.create_regular_grid(self.phosphene_resolution,self.size,self.jitter,self.intensity_var)\n self.aperture = np.round(aperture*self.phosphene_spacing[0]).astype(int) #relative aperture > dilation kernel size\n else:\n self.grid = custom_grid\n self.aperture = aperture\n self.sigma = sigma\n self.dilation_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.aperture,self.aperture))\n self.k_size = 11 #np.round(4*sigma+1).astype(int) # rule of thumb: choose k_size>3*sigma", "def sizes(options):\n # import data\n pixels = dict() # volumes are given in #pixels\n snap_mask = \"/net/astrogate/export/astrodata/jgacon/filex/processing/\" \\\n \"export/f8_h50_v100_objs_snap_%d.csv\"\n snap_ids = np.arange(2,28+1)\n z = snapid2z(snap_ids)\n print z\n\n for id in snap_ids:\n snap = snap_mask % (id - 1) # fix: snap number one too low in filename\n pixels[id] = np.genfromtxt(snap)[1:-1,1] # row 2 contains volumes\n # rm void & halo volumes\n\n # visualise\n if \"err\" in options.keys():\n nums = np.array([pixels[id].size for id in snap_ids])\n avgs = np.array([np.mean(pixels[id]) for id in snap_ids])\n mods = np.array([st.mode(pixels[id])[0][0] for id in snap_ids])\n meds = np.array([np.median(pixels[id]) for id in snap_ids])\n stds = np.array([np.std(pixels[id]) for id in snap_ids])\n\n print mods\n print mods.shape\n\n plt.figure()\n plt.title(\"Sizes of filaments as function of redshift\")\n plt.xlabel(\"Redshift $z$\")\n plt.xticks(snap_ids[::3], z[::3])\n\n plt.ylabel(\"Size in #pixels\")\n\n plt.errorbar(snap_ids, avgs, yerr=stds, label=\"Mean\")\n plt.plot(snap_ids, mods, \"g\", label=\"Mode\")\n plt.plot(snap_ids, meds, \"c\", label=\"Median\")\n plt.legend(loc=\"best\")\n\n plt.twinx()\n plt.ylabel(\"#Filaments\", color=\"r\")\n plt.tick_params(\"y\", colors=\"r\")\n\n plt.plot(snap_ids, nums, \"r--\")\n\n plt.savefig(options[\"err\"])\n\n if \"dist\" in options.keys():\n targets = np.array([5,10,15,20,25])\n plt.figure()\n plt.title(\"Volume distribution of filaments\")\n plt.xlabel(\"Volume $V$ in #pixels\")\n plt.ylabel(\"#Element with $V$ / Total #Elements\")\n plt.xlim([0,1000])\n for target in targets:\n sns.kdeplot(pixels[target], label=\"$z$ = %f\" % snapid2z(target))\n plt.legend(loc=\"best\")\n plt.savefig(options[\"dist\"])\n\n if \"dist_inter\" in options.keys():\n default = snap_ids[-1]\n fig, ax = plt.subplots()\n plt.subplots_adjust(bottom=0.25)\n sns.kdeplot(pixels[int(default - 2)], ax=ax)\n plt.xlim([0, 1000])\n plt.ylim([0, 0.01])\n plt.xlabel(\"Volume $V$ of filaments in #pixels\")\n plt.ylabel(\"#Filaments with volume $V$ / Total #Filaments\")\n\n nums = np.array([pixels[id].size for id in snap_ids])\n ax2 = ax.twinx()\n ax2.set_ylabel(\"#Filaments\", color=\"r\", alpha=0.5)\n ax2.tick_params(axis=\"y\", labelcolor=\"r\")\n ax2_x = np.linspace(0, 1000, nums.size)\n ax2.plot(ax2_x, nums, \"r--\", alpha=0.5)\n point, = ax2.plot(ax2_x[default - 2], nums[default - 2], \"ro\", alpha=0.5)\n\n axcolor = 'lightgoldenrodyellow'\n axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n sid = Slider(axfreq, \"ID\", 2, 28, valinit=default, valstep=1)\n ax.set_title(\"$z$ = %f\" % snapid2z(default))\n\n def update(val):\n id = sid.val\n\n print id\n #ax.clear()\n ax.set_ydata()\n ax.set_xdata()\n ax.set_title(\"$z$ = %f\" % snapid2z(int(id)))\n ax.set_xlim([0,1000])\n ax.set_ylim([0, 0.01])\n sns.kdeplot(pixels[int(id)], ax=ax)\n point.set_xdata(ax2_x[int(id) - 2])\n point.set_ydata(nums[int(id) - 2])\n fig.canvas.draw_idle()\n sid.on_changed(update)\n\n plt.show()\n\n\n if \"hist\" in options.keys():\n conc = None\n for id, vols in pixels.iteritems():\n data = np.empty((vols.size, 2))\n data[:,0] = id\n data[:,1] = vols\n\n if conc is None:\n conc = data\n else:\n conc = np.vstack((conc, data))\n\n plt.figure()\n plt.hist2d(conc[:,0], conc[:,1], bins=(snap_ids.size, 1000))\n plt.ylim([100,400])\n plt.savefig(options[\"hist\"])", "def create_image_caption_pairs(self):", "def _set_boxes(self, listOfBoxes):\n self._boxes = listOfBoxes", "def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()", "def plt_bboxes(img, classes, scores, bboxes, figsize=(17.78,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n print (\"original height width\", height, width)\n if (classes.shape[0] > 0):\n print (\"This frame has class\")\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = pascal_classes[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n plt.close()\n print(\"Processed data with shape, \", data.shape)\n return data", "def draw_boxes(indexes, frame, all_boxes):\n bbox = []\n mid_points = []\n\n for i in indexes:\n x = i[0]\n box = all_boxes[x]\n bbox.append(box)\n mid_points.append(mid_point(frame, box))\n x1, y1, w, h = box[0], box[1], box[2], box[3]\n x2, y2 = x1+w, y1+h\n\n cv2.rectangle(frame, (x1,y1),(x2,y2),(255,0,0),2) \n\n return mid_points, bbox", "def __draw_boxes(self, img, bboxes, color=(128, 0, 0), thick=4):\n\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "def setUp(self):\n img_path = osp.join(osp.dirname(__file__), '../../data/gray.jpg')\n self.results = {\n 'img_path':\n img_path,\n 'img_shape': (300, 400),\n 'instances': [{\n 'bbox': [0, 0, 10, 20],\n 'bbox_label': 1,\n 'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],\n 'ignore_flag': 0\n }, {\n 'bbox': [10, 10, 110, 120],\n 'bbox_label': 2,\n 'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],\n 'ignore_flag': 0\n }, {\n 'bbox': [50, 50, 60, 80],\n 'bbox_label': 2,\n 'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],\n 'ignore_flag': 1\n }]\n }", "def make_grid_bbox(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0, draw_line=False):\n\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n # print(box[0].size())\n # print(box[1].size())\n # assert False\n # num_curr_box = box[0][k].size(0)\n num_curr_box = box[k][0].size(0)\n for z in irange(num_curr_box):\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n print(box)\n print(k)\n assert False\n \n if label != -1:\n block = draw_box(block, box[k][0][z], label, draw_line)\n # print(k, z)\n else:\n break\n # copy to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid", "def _resize_bboxes(self, ori_bboxes, scale_factor):\n bboxes = ori_bboxes * scale_factor\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, self.img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, self.img_shape[0])\n return bboxes", "def draw_boxes_info(image, current_data):\n\n font_position1 = (50, 600)\n font_position2 = (50, 650)\n font_scale = .4\n font_thickness = 1\n\n locations = current_data[\"locations\"] #returns x1, y1, x2, y2\n frame_num = \"Frame Number: \" + str(current_data[\"frame_num\"])\n\n for box in locations:\n box_text = (\"Box locations are x1: {0}, y1: {1}, x2: {2}, y2: {3}\").format(box[1],box[3],box[0],box[2])\n\n cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 3)\n cv2.putText(image, box_text, font_position1, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n cv2.putText(image, frame_num, font_position2, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n return image", "def display_precomputed_boxes(self, sample_index, all_boxes):\n image_rois = [class_detections[sample_index]\n for class_detections in all_boxes]\n\n image_rois_list = []\n image_classes = []\n for class_index, class_rois in enumerate(image_rois):\n if len(class_rois) > 0:\n classes = np.ones((class_rois.shape[0])) * class_index\n image_rois_list.extend(class_rois)\n image_classes.extend(classes)\n image_rois_list = np.array(image_rois_list)\n image_classes = np.array(image_classes)\n\n show_gt_boxes = False\n self.display_detections(image_rois_list, image_classes, \n self.data_loader.dataset.samples[sample_index])", "def plot_n_box(img_prepro):\n print(img_prepro)\n h, w= img_prepro.shape\n boxes = pytesseract.image_to_boxes(img_prepro)\n for b in boxes.splitlines():\n b = b.split(' ')\n img_prepro = cv2.rectangle(img_prepro, (int(b[1]), h - int(b[2])), (int(b[3]), h - int(b[4])), (0, 255, 0), 2)\n cv2.imshow('img', img_prepro)\n cv2.waitKey(0)\n\n return", "def setwinsize(self, rows, cols):", "def onet_process(self, image, boxes, height, width):\n data = self.__padding(image, boxes, height, width)\n return data", "def forward(self):\n priors = []\n for k, f in enumerate(self.feature_maps):\n scale = self.image_size / self.strides[k]\n # for i, j in product(range(f), repeat=2):\n for i in range(f[0]):\n for j in range(f[1]):\n # print(i, j)\n # unit center x,y\n cx = (j + 0.5) / scale\n cy = (i + 0.5) / scale\n\n # small sized square box\n size = self.min_sizes[k]\n h = w = size / self.image_size\n priors.append([cx, cy, w, h])\n\n # big sized square box\n size = sqrt(self.min_sizes[k] * self.max_sizes[k])\n h = w = size / self.image_size\n priors.append([cx, cy, w, h])\n\n # change h/w ratio of the small sized box\n size = self.min_sizes[k]\n h = w = size / self.image_size\n for ratio in self.aspect_ratios[k]:\n ratio = sqrt(ratio)\n priors.append([cx, cy, w * ratio, h / ratio])\n priors.append([cx, cy, w / ratio, h * ratio])\n\n priors = torch.Tensor(priors)\n if self.clip:\n priors.clamp_(max=1, min=0)\n return priors", "def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes", "def __create_blank_page__(self):\n with open(\"active_weather.basic.exp\"+str(self.box_count)+\".box\",\"w\") as f:\n f.write(\"\")\n\n self.width = 2508\n # self.height = 200\n self.height = 4000\n self.training_page = np.zeros((self.height,self.width),dtype=np.uint8)\n self.training_page.fill(255)\n\n self.row_bitmaps = []\n self.row_characters = []\n\n self.row_pointer = spacing\n self.column_pointer = spacing\n\n\n # self.__box_file_flush__()\n self.box_file_entries = []\n self.used_height = spacing", "def plot_image(image, boxes, class_dic, frame_n):\n im = np.array(image)\n print(im.shape)\n height, width, _ = im.shape\n\n # Create figure and axes\n # fig, ax = plt.subplots(1)\n # # Display the image\n # ax.imshow(im)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='auto')\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(im)\n # box[0] is x midpoint, box[2] is width\n # box[1] is y midpoint, box[3] is height\n\n # Create a Rectangle potch\n for box in boxes:\n class_ = int(box[0])\n confidence_ = box[1]\n box = box[2:]\n assert len(box) == 4, \"Got more values than in x, y, w, h, in a box!\"\n upper_left_x = box[0] - box[2] / 2\n upper_left_y = box[1] - box[3] / 2\n rect = patches.Rectangle(\n (upper_left_x * width, upper_left_y * height),\n box[2] * width,\n box[3] * height,\n linewidth=1,\n edgecolor=\"r\",\n facecolor=\"none\",\n )\n\n\n label_bbox = class_dic[class_] + \":::\" + f\"{100 * confidence_:.2f}\" + \"%\"\n plt.text(upper_left_x * width, upper_left_y * height - 10, label_bbox, size=10, rotation=0,\n ha=\"left\", va=\"bottom\",\n bbox=dict(boxstyle=\"square\",\n ec=(1, 0, 0),\n fc=(1, 0, 0),\n )\n )\n \n \n # Add the patch to the Axes\n ax.add_patch(rect)\n if frame_n:\n plt.savefig(str(frame_n) + '.png', dpi=200, bbox_inches=\"tight\", transparent=True, pad_inches=0)\n else:\n plt.show()", "def setUp(self):\n self.single_box = [ScoredRect(Rect(0, 10, 0, 20), 0.0)]\n\n self.ground_truths = [ScoredRect(Rect(0, 10, 0, 10), 0.5),\n ScoredRect(Rect(10, 20, 20, 30), 0.5),\n ScoredRect(Rect(-40, -30, -20, -10), 0.5),\n ]\n self.detections = [ScoredRect(Rect(0, 10, 0, 10), 0.5),\n ScoredRect(Rect(10, 20, 20, 30), 0.5),\n ScoredRect(Rect(10, 20, 20, 30), 0.5),\n ScoredRect(Rect(100, 110, 20, 30), 0.5),\n ]", "def normalize_boxes(all_boxes, image_width, image_height):\n new_boxes = []\n for boxes_per_frame in all_boxes:\n new_boxes_per_frame = []\n for i, box in enumerate(boxes_per_frame):\n left, top, right, bottom = box\n new_boxes_per_frame.append((left / image_width, top / image_height, right / image_width, bottom / image_height))\n new_boxes.append(new_boxes_per_frame)\n\n assert(len(new_boxes) == len(all_boxes))\n for i, boxes_per_frame in enumerate(all_boxes):\n assert(len(boxes_per_frame) == len(new_boxes[i]))\n\n\n\n return new_boxes", "def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)", "def resize_spacing(img_sz,img_sp,factor):\n img_sz_np = np.array(list(img_sz))\n img_sp_np = np.array(list(img_sp))\n new_sz_np = img_sz_np*factor\n new_sp = img_sp_np*(img_sz_np-1)/(new_sz_np-1)\n return tuple(list(new_sp))", "def __init__(self):\n toplevel_create_image = mw.tkinter.Toplevel(mw.MainWindow, bg = \"#a1a1a1\")\n \"\"\" top level window attributes\"\"\"\n toplevel_create_image.title(\"create new image\")\n toplevel_create_image.geometry(\"800x600+600+200\")\n toplevel_create_image.resizable(width = False, height = False)\n self. top_down = 60\n\n # UI\n image_width_label = mw.tkinter.Label(toplevel_create_image, text = \"Width: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_width_label.grid(row = 0, column = 0, padx = 20, pady = 20)\n image_height_label = mw.tkinter.Label(toplevel_create_image, text = \"Height: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_height_label.grid(row = 1, column = 0)\n\n # text box image size \n image_width_text_box = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_width_text_box.grid(row = 0, column = 1, pady = 20)\n\n image_height_text_box = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_height_text_box.grid(row = 1, column = 1)\n\n # color mode\n color_mode_combo_box = mw.tkinter.Label(toplevel_create_image, text = \"color mode: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n color_mode_combo_box.grid(row = 2, column = 0, padx = 20, pady = 20)\n\n # image_color_mode_text = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n # bg = \"grey\", fg = \"white\")\n # image_color_mode_text.grid(row = 2, column = 1)\n\n color_mode_combo_box = ttk.Combobox(toplevel_create_image, values = [\n \"RGB\",\n \"RGBA\",\n \"CYMA\"\n ])\n color_mode_combo_box.current(0)\n color_mode_combo_box.grid(row = 2, column = 1)\n\n # background color\n image_background_color_label = mw.tkinter.Label(toplevel_create_image, text = \"red: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_background_color_label.grid(row = 3, column = 0, padx = 30)\n\n image_background_color_text_red = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_background_color_text_red.grid(row = 3, column = 1)\n# -----------------------------------------------------------------------------------------------------------\n image_background_color_label = mw.tkinter.Label(toplevel_create_image, text = \"green: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_background_color_label.grid(row = 4, column = 0, padx = 30)\n\n image_background_color_text_green = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_background_color_text_green.grid(row = 4, column = 1)\n\n# ----------------------------------------------------------------------------------------------------------\n image_background_color_label = mw.tkinter.Label(toplevel_create_image, text = \"blue: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_background_color_label.grid(row = 5, column = 0, padx = 30)\n\n image_background_color_text_blue = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_background_color_text_blue.grid(row = 5, column = 1)\n # create_new_image\n def create_new_image():\n new_image = Image.new(mode = (color_mode_combo_box.get()),\n size = (int(image_width_text_box.get(\"1.0\", mw.tkinter.END)),\n int(image_height_text_box.get(\"1.0\", mw.tkinter.END)) ),\n color = ((int(image_background_color_text_red.get(\"1.0\", mw.tkinter.END))),\n (int(image_background_color_text_green.get(\"1.0\", mw.tkinter.END))),\n (int(image_background_color_text_blue.get(\"1.0\", mw.tkinter.END)))))\n # get new image properties\n the_width, the_height = new_image.size\n # to-do clear the image\n new_image_compatible = ImageTk.PhotoImage(new_image)\n # load the image on the canvas \n app.default_image_canvas.delete(\"all\")\n # app.default_image_canvas = mw.tkinter.Canvas(toplevel_create_image, height = the_height, width = the_width)\n app.default_image_canvas.create_image(0,0, image = new_image_compatible, anchor = mw.tkinter.NW)\n # app.load_image_canvas.grid(row = 0, column = 1)\n toplevel_create_image.destroy()\n # to-do - correct error\n app.default_image_canvas.configure(mw.MainWindow)\n \n\n\n # create_image_button\n create_image_button = mw.tkinter.Button(toplevel_create_image, text = \"create image\",\n bg = \"grey\", fg = \"white\", font = (\"times\", 13,\"bold\"),\n command = create_new_image)\n create_image_button.grid(row = 6, column = 0, padx = 40, pady = 20)", "def draw_boxes(self, image, boxes):\n return draw_boxes(image, boxes, self.labels)", "def initrows(self):\n #~ self.initrows2()\n self.rows=[]\n for yy in range(self.height):\n row=[]\n for xx in range(self.width):\n if (xx,yy) in self.allsqs:\n row.append(0)\n #~ elif p in self.gatesqs:\n #~ row.append(0)\n else:\n row.append(1)\n self.rows.append(row)", "def rescale_box(box, img_size_orig, img_size_new):\n orig_w, orig_h = img_size_orig\n new_w, new_h = img_size_new\n scale_x = new_w / orig_w\n scale_y = new_h / orig_h\n sx, sy, ex, ey = box\n return [sx * scale_x, sy * scale_y, ex * scale_x, ey * scale_y]", "def __getitem__(self, index):\n image_id = self.image_ids[index]\n\n filename = self.image_id_to_filename[image_id]\n image_path = os.path.join(self.image_dir, filename)\n\n with open(image_path, 'rb') as f:\n with PIL.Image.open(f) as image:\n WW, HH = image.size\n image = self.transform(image.convert('RGB'))\n\n H, W = self.image_size\n objs, boxes, masks = [], [], []\n\n for object_data in self.image_id_to_objects[image_id]:\n # objs.append(object_data['category_id'])\n objs.append(int(object_data.find('name').get(\"id\")))\n\n bndbox = object_data.findall('bndbox')[0]\n xmin = int(bndbox.find('xmin').text)\n ymin = int(bndbox.find('ymin').text)\n xmax = int(bndbox.find('xmax').text)\n ymax = int(bndbox.find('ymax').text)\n w = xmax - xmin\n h = ymax - ymin\n\n boxes.append(torch.FloatTensor([xmin, ymin, xmax, ymax]))\n\n # This will give a numpy array of shape (HH, WW)\n mask = torch.zeros(1, H, W)\n # mask = seg_to_mask(object_data['segmentation'], WW, HH)\n mask[:, round(ymin * H):max(round(ymin * H) + 1, round(ymax * H)),\n round(xmin * W):max(round(xmin * W) + 1, round(xmax * W))] = 1\n masks.append(mask)\n # shuffle objs\n O = len(objs)\n rand_idx = list(range(O))\n random.shuffle(rand_idx)\n\n objs = [objs[i] for i in rand_idx]\n boxes = [boxes[i] for i in rand_idx]\n masks = [masks[i] for i in rand_idx]\n\n objs = torch.LongTensor(objs)\n boxes = torch.stack(boxes, dim=0)\n masks = torch.stack(masks, dim=0)\n\n # print(image_path)\n\n return image, objs, boxes, masks", "def show_field(self, vehicles, type):\n\n # starting pixels x = 0, y = 0 on field image\n start_x = 78\n start_y = 45\n\n # block pixel width is slightly different per field size\n if self.size == 6:\n block_width = 72\n elif self.size == 9:\n block_width = 69\n elif self.size == 12:\n block_width = 68.5\n\n field = plt.imread(f\"data/RushHourImages/RushHour{self.size}.jpg\")\n fig, ax = plt.subplots()\n plt.imshow(field)\n plt.axis('off')\n\n for vehicle in vehicles:\n if vehicle.orientation == 'H':\n x = start_x + (vehicle.x * block_width)\n y = start_y + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck{vehicle.id}.png\")\n\n # truck: the image coordinate is his middle, which changes with the length of the car\n x += 40\n\n if vehicle.orientation == 'V':\n x = start_y + (vehicle.x * block_width)\n y = start_x + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car-rotated{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck-rotated{vehicle.id}.png\")\n y += 40\n\n if self.size == 6:\n imagebox = OffsetImage(car, zoom=0.6)\n elif self.size == 9:\n imagebox = OffsetImage(car, zoom=0.4)\n elif self.size == 12:\n imagebox = OffsetImage(car, zoom=0.3)\n\n imagebox.image.axes = ax\n xy = (x, y)\n ab = AnnotationBbox(imagebox, xy, frameon=False)\n ax.add_artist(ab)\n\n if type == True:\n plt.show(block=False)\n plt.pause(0.001)\n plt.close()\n else:\n plt.show()", "def proposal_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None, train_anchor_inds=None, proposals=None):\n assert proposals is not None\n rois = filter_roi_proposals(proposals[:, 2:].data.contiguous(), proposals[:, 1].data.contiguous(), np.array([2000] * len(im_sizes)), nms_thresh=0.7, pre_nms_topn=12000 if self.training and self.mode == 'rpntrain' else 6000, post_nms_topn=2000 if self.training and self.mode == 'rpntrain' else 1000)\n if self.training:\n all_rois, labels, bbox_targets = proposal_assignments_det(rois, gt_boxes.data, gt_classes.data, image_offset, fg_thresh=0.5)\n all_rois = torch.cat((all_rois, Variable(rois)), 0)\n else:\n all_rois = Variable(rois, volatile=True)\n labels = None\n bbox_targets = None\n rpn_scores = None\n rpn_box_deltas = None\n rel_labels = None\n return all_rois, labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels", "def _to_image_coords(self, boxes, height, width):\n box_coords = np.zeros_like(boxes)\n box_coords[:, 0] = boxes[:, 0] * height\n box_coords[:, 1] = boxes[:, 1] * width\n box_coords[:, 2] = boxes[:, 2] * height\n box_coords[:, 3] = boxes[:, 3] * width\n \n return box_coords", "def boxplots(self, groups, nrows, ncols, type):\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6, 9.3), sharey=\"row\")\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\n for ax, (effs, dat) in zip(axs, groups):\n ax[0].boxplot(dat[\"perm\"])\n ax[1].boxplot(dat[\"t_test\"])\n ax[0].set_ylabel(\"Errors\")\n if type == \"es\":\n ax[0].set_title(f\"Effect size = {effs}, Test = Perm\")\n ax[1].set_title(f\"Effect size = {effs}, Test = t-test\")\n elif type == \"samp2\":\n ax[0].set_title(f\"Sample size = {effs}, Test = Perm\")\n ax[1].set_title(f\"Sample size = {effs}, Test = t-test\")\n\n\n plt.tight_layout()\n #plt.show()", "def draw_bboxes_withindex(img,boxes, uids):\n source = Image.fromarray(img)\n draw = ImageDraw.Draw(source)\n w2,h2 = (img.shape[0],img.shape[1])\n \n font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSerif.ttf', 40)\n #font = ImageFont.truetype('arial.ttf', 24)\n\n\n idx = 0\n\n for b in boxes:\n xmin,ymin,xmax,ymax = b\n \n for j in range(3):\n draw.rectangle(((xmin+j, ymin+j), (xmax+j, ymax+j)), outline=\"red\")\n draw.text((xmin+20, ymin+70), str(uids[idx]), font = font)\n idx +=1\n return source", "def load_boxes(self, data):\r\n\r\n # worldbox represents the total map area\r\n self.worldbox = self.Box((0, 0), (len(data[0]) * self.cellwidth, len(data) * self.cellwidth))\r\n\r\n # create a box corresponding to each character/cell in the map file\r\n tl_x = 0\r\n tl_y = 0\r\n for row in data:\r\n for cell in row:\r\n if cell == \".\":\r\n self.wallboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n elif cell == \"x\":\r\n self.targetboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n tl_x += self.cellwidth\r\n tl_x = 0\r\n tl_y += self.cellwidth", "def layout_graphics(self):\n # Graphics layout object to place viewboxes in\n self.g_layout = pg.GraphicsLayoutWidget(border=(80, 80, 80))\n self.g_layout.setCursor(QtCore.Qt.CrossCursor)\n\n # Viewboxes for images\n # aspect locked so that pixels are square\n # y inverted so that (0,0) is top left as in Thorlabs software\n options = {\"lockAspect\":True, \"invertY\":True}\n self.vb_image = self.g_layout.addViewBox(row=0, col=0, rowspan=2, **options)\n self.vb_zoom = self.g_layout.addViewBox(row=0, col=2, **options)\n self.vb_residuals = self.g_layout.addViewBox(row=1, col=2, **options)\n\n # Link zoom and residual views\n self.vb_zoom.setXLink(self.vb_residuals)\n self.vb_zoom.setYLink(self.vb_residuals)\n\n # Viewboxes for slice data\n # Both boxes have mouse disabled - range is fixed so we don't want to\n # scale them accidentally\n # Y box has y inverted to match the main image\n # Y box has x inverted so that zero pixel value is far from the image\n options = {\"enableMouse\":False, \"enableMenu\": False}\n self.vb_x = self.g_layout.addViewBox(row=2, col=0, **options)\n self.vb_y = self.g_layout.addViewBox(row=0, col=1, rowspan=2,\n invertX=True, invertY=True, **options)\n\n # Link the slice axes to the main image so that when we zoom/pan the\n # main image, our slices zoom/pan also\n self.vb_x.setXLink(self.vb_image)\n self.vb_y.setYLink(self.vb_image)\n\n # Disable autoscaling and fix range to maximum pixel intensity\n self.vb_x.setRange(yRange=(0,255))\n self.vb_y.setRange(xRange=(0,255))\n self.vb_x.disableAutoRange(axis=self.vb_x.YAxis)\n self.vb_y.disableAutoRange(axis=self.vb_y.XAxis)\n\n # Background color must not be black so that we can see where images\n # start/end\n color = pg.mkColor(40,40,40)\n self.vb_image.setBackgroundColor(color)\n self.vb_zoom.setBackgroundColor(color)\n self.vb_residuals.setBackgroundColor(color)\n self.vb_x.setBackgroundColor(color)\n self.vb_y.setBackgroundColor(color)\n self.g_layout.setBackground(color)\n\n self.vb_image.addItem(self.image)\n self.vb_image.addItem(self.fit_v_line)\n self.vb_image.addItem(self.fit_h_line)\n self.vb_image.addItem(self.mark_v_line)\n self.vb_image.addItem(self.mark_h_line)\n # self.vb_image.addItem(self.cursor_text)\n self.vb_image.addItem(self.cursor_delta)\n self.vb_image.addItem(self.beam_delta)\n self.vb_image.addItem(self.history_plot)\n # Figure out how to overlay properly?\n # self.vb_image.addItem(self.x_slice)\n # self.vb_image.addItem(self.x_fit)\n # self.vb_image.addItem(self.y_slice)\n # self.vb_image.addItem(self.y_fit)\n self.vb_zoom.addItem(self.zoom)\n self.vb_zoom.addItem(self.fit_maj_line)\n self.vb_zoom.addItem(self.fit_min_line)\n self.vb_zoom.addItem(self.zoom_text)\n self.vb_residuals.addItem(self.residuals)\n self.vb_residuals.addItem(self.residuals_text)\n self.vb_x.addItem(self.x_slice)\n self.vb_x.addItem(self.x_fit)\n self.vb_x.addItem(self.cursor_v)\n self.vb_y.addItem(self.y_slice)\n self.vb_y.addItem(self.y_fit)\n self.vb_y.addItem(self.cursor_h)\n\n self.res_legend.setParentItem(self.vb_residuals)\n self.cursor_text.setParentItem(self.vb_image)\n\n self.vb_image.setRange(QtCore.QRectF(0, 0, 1280, 1024))\n self.vb_zoom.setRange(QtCore.QRectF(0, 0, 50, 50))\n self.vb_residuals.setRange(QtCore.QRectF(0, 0, 50, 50))\n\n #\n # Size hints below here\n #\n self.g_layout.ci.layout.setColumnStretchFactor(0, 4)\n self.g_layout.ci.layout.setColumnStretchFactor(1, 1)\n self.g_layout.ci.layout.setColumnStretchFactor(2, 2)\n self.g_layout.ci.layout.setRowStretchFactor(0, 2)\n self.g_layout.ci.layout.setRowStretchFactor(1, 2)\n self.g_layout.ci.layout.setRowStretchFactor(2, 1)\n\n self.vb_x.setMinimumHeight(50)\n self.vb_y.setMinimumWidth(50)\n self.vb_x.setMaximumHeight(100)\n self.vb_y.setMaximumWidth(100)\n self.vb_image.setMinimumSize(640, 512)\n self.vb_zoom.setMinimumSize(320, 320)\n self.vb_residuals.setMinimumSize(320, 320)\n\n self.g_layout.setMinimumSize(1100,562)", "def plantGrid(name, minimum, maximum, spacing):\n name = sys.argv[1]\n minimum = sys.argv[2]\n maximum = sys.argv[3]\n minimum = int(minimum)\n maximum = int(maximum)\n #convert to flux\n min1 = conversion(minimum)\n max1 = conversion(maximum)\n min1 = int(min1)\n max1 = int(max1)\n #brightness = [random.uniform(min1, max1) for _ in xrange(len(position))]\n #print brightness\n spacing = sys.argv[4]\n spacing = float(spacing)\n print len(data1)\n #create the position array for x values\n x = np.arange(6, len(data1), spacing)\n #create the position array for y values\n y = np.arange(6, len(data1), spacing)\n x2 = np.arange(6, len(data1), spacing)\n y2 = np.flipud(y)\n \n #combine both arrays to form a grid\n position = np.column_stack((x,y))\n position2 = np.column_stack((x2,y2))\n \n #combine both lines of grid to one array\n position = np.concatenate((position, position2), axis = 0)\n \n #create a random brightness array between the min and max values\n brightness = np.array([random.uniform(min1, max1) for _ in range(0,len(position))])\n \n #add to image file and subtract\n fakestars.addtofits(name, out_file, psf, position, brightness, coordsys, verbose)\n fakestars.addtofits(name, outfile2, psf, position, brightness, coordsys, verbose)\n imarith.imsubtract(out_file, outfile2, differenceFile, clobber=True)", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def scale_box(box, img_size):\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]", "def initialize(self, frame):\n self.grid_size = 5\n\n Label(frame, text=\"Grid Size:\").grid(row=0)\n\n self.e1 = Scale(frame, from_=self.grid_size, to=25, orient=HORIZONTAL)\n self.e1.grid(row=0, column=1)\n\n return self.e1", "def make(self) -> None:\n\n # arbitrarily selecting the first image from the list, index 0\n with Image.open(self.image_list[0]) as first_frame_image_in_list:\n\n # Find the width and height of the first image of the list.\n # Assuming all the images have same size.\n frame_image_width, frame_image_height = first_frame_image_in_list.size\n\n # scale is the ratio of collage_image_width and product of\n # images_per_row_in_collage with frame_image_width.\n\n # The scale will always lie between 0 and 1, which implies that\n # the images are always going to get downsized.\n scale = (self.collage_image_width) / (\n self.images_per_row_in_collage * frame_image_width\n )\n\n # Calculating the scaled height and width for the frame image.\n scaled_frame_image_width = ceil(frame_image_width * scale)\n scaled_frame_image_height = ceil(frame_image_height * scale)\n\n # Divide the number of images by images_per_row_in_collage. The later\n # was calculated by taking the square root of total number of images.\n number_of_rows = ceil(self.number_of_images / self.images_per_row_in_collage)\n\n # Multiplying the height of one downsized image with number of rows.\n # Height of 1 downsized image is product of scale and frame_image_height\n # Total height is number of rows times the height of one downsized image.\n self.collage_image_height = ceil(scale * frame_image_height * number_of_rows)\n\n # Create an image of passed collage_image_width and calculated collage_image_height.\n # The downsized images will be pasted on this new base image.\n # The image is 0,0,0 RGB(black).\n collage_image = Image.new(\n \"RGB\", (self.collage_image_width, self.collage_image_height)\n )\n\n # keep track of the x and y coordinates of the resized frame images\n i, j = (0, 0)\n\n # iterate the frames and paste them on their position on the collage_image\n for count, frame_path in enumerate(self.image_list):\n\n # Set the x coordinate to zero if we are on the first column\n # If self.images_per_row_in_collage is 4\n # then 0,4,8 and so on should have their x coordinate as 0\n if (count % self.images_per_row_in_collage) == 0:\n i = 0\n\n # open the frame image, must open it to resize it using the thumbnail method\n frame = Image.open(frame_path)\n\n # scale the opened frame images\n frame.thumbnail(\n (scaled_frame_image_width, scaled_frame_image_height), Image.ANTIALIAS\n )\n\n # set the value of x to that of i's value.\n # i is set to 0 if we are on the first column.\n x = i\n\n # It ensures that y coordinate stays the same for any given row.\n # The floor of a real number is the largest integer that is less\n # than or equal to the number. floor division is used because of\n # the zero based indexing, the floor of the division stays same\n # for an entier row as the decimal values are negled by the floor.\n # for the first row the result of floor division is always zero and\n # the product of 0 with scaled_frame_image_height is also zero, they\n # y coordinate for the first row is 0.\n # For the second row the result of floor division is one and the prodcut\n # with scaled_frame_image_height ensures that the y coordinate is\n # scaled_frame_image_height below the first row.\n y = (j // self.images_per_row_in_collage) * scaled_frame_image_height\n\n # paste the frame image on the newly created base image(base image is black)\n collage_image.paste(frame, (x, y))\n frame.close()\n\n # increase the x coordinate by scaled_frame_image_width\n # to get the x coordinate of the next frame. unless the next image\n # will be on the very first column this will be the x coordinate.\n i = i + scaled_frame_image_width\n\n # increase the value of j by 1, this is to calculate the y coordinate of\n # next image. The increased number will be floor divided by images_per_row_in_collage\n # therefore the y coordinate stays the same for any given row.\n j += 1\n\n # save the base image with all the scaled frame images embeded on it.\n collage_image.save(self.output_path)\n collage_image.close()", "def _build_meds_layout(self):\n\n\n nim = self.image_info.size\n nobj = self.obj_data.size\n\n trim_to_coadd = self.get('trim_to_coadd',False)\n if trim_to_coadd:\n print(' trimming to coadd')\n coadd_wcs, coadd_pos, coadd_bnds, coadd_q = \\\n self._get_pos_and_bounds(self.obj_data, 0)\n in_bnds = coadd_bnds.contains_points(coadd_pos['zrow'], coadd_pos['zcol'])\n w_in_bnds, = np.where(in_bnds == True)\n assert w_in_bnds.size > 0,\"none found in coadd\"\n\n w_in_bnds = coadd_q[w_in_bnds]\n self.obj_data = self.obj_data[w_in_bnds]\n\n self._do_psf_setup()\n\n # box sizes are even\n half_box_size = self.obj_data['box_size']//2\n\n for file_id in range(nim):\n\n wcs, pos, bnds, q = self._get_pos_and_bounds(self.obj_data, file_id)\n\n # do the test\n in_bnds = bnds.contains_points(pos['zrow'], pos['zcol'])\n q_rc, = np.where(in_bnds == True)\n print(' second cut: %6d of %6d objects' % (len(q_rc),len(q)))\n\n # now make sure everything is there\n if self['check_in_first_image']:\n if file_id == 0 and len(self.obj_data['ra']) != len(q_rc):\n raise MEDSCreationError('Not all objects were found in first image for '\n 'MEDS making (which is the coadd/detection '\n 'image by convention).')\n # compose them\n q = q[q_rc]\n\n # fill in the object_data structure\n\n # note q_rc since pos was created using obj_data[q]\n qrow = pos['zrow'][q_rc]\n qcol = pos['zcol'][q_rc]\n\n icut = self.obj_data['ncutout'][q]\n self.obj_data['file_id'][q,icut] = file_id\n self.obj_data['orig_row'][q,icut] = qrow\n self.obj_data['orig_col'][q,icut] = qcol\n\n # this results in the object center being close to\n # the natural center (dim-1.)/2.\n ostart_row = qrow.astype('i4') - half_box_size[q] + 1\n ostart_col = qcol.astype('i4') - half_box_size[q] + 1\n crow = qrow - ostart_row\n ccol = qcol - ostart_col\n\n self.obj_data['orig_start_row'][q,icut] = ostart_row\n self.obj_data['orig_start_col'][q,icut] = ostart_col\n self.obj_data['cutout_row'][q,icut] = crow\n self.obj_data['cutout_col'][q,icut] = ccol\n\n # do jacobian, in original, not-offset coords\n # note q_rc since pos was created using self.obj_data[q]\n jacob = wcs.get_jacobian(\n x=pos['wcs_col'][q_rc],\n y=pos['wcs_row'][q_rc])\n\n # jacob is a tuple of arrays\n self.obj_data['dudcol'][q,icut] = jacob[0]\n self.obj_data['dudrow'][q,icut] = jacob[1]\n self.obj_data['dvdcol'][q,icut] = jacob[2]\n self.obj_data['dvdrow'][q,icut] = jacob[3]\n\n # increment\n self.obj_data['ncutout'][q] += 1\n\n w,=np.where(self.obj_data['ncutout'] > 0)\n print('%d/%d had ncut > 0' % (w.size, self.obj_data.size))\n #self.obj_data = self.obj_data[w]\n\n self.obj_data = self._make_resized_data(self.obj_data)\n print('setting number field as sequential')\n self.obj_data['number'] = 1+np.arange(self.obj_data.size)\n\n\n self._set_start_rows_and_pixel_count()\n\n if self['survey']=='cosmos':\n self._set_psf_layout_hst()\n else:\n self._set_psf_layout_psfex()", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3" ]
[ "0.6761654", "0.6085298", "0.6009164", "0.58949316", "0.58541226", "0.58446145", "0.5767578", "0.5712032", "0.5706543", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5668548", "0.5650026", "0.5615289", "0.5615289", "0.5593645", "0.55755764", "0.55713177", "0.55709535", "0.5566756", "0.5561757", "0.55550766", "0.55439216", "0.55324256", "0.5499715", "0.5493037", "0.5491024", "0.5488773", "0.54860556", "0.5479351", "0.5477189", "0.54661876", "0.54654974", "0.5462513", "0.5436562", "0.54347384", "0.5429081", "0.54147685", "0.5409667", "0.54019666", "0.5395863", "0.5394675", "0.5392269", "0.5371903", "0.53668004", "0.53408337", "0.53349805", "0.533052", "0.53242874", "0.52984256", "0.529094", "0.5273975", "0.5269214", "0.5264517", "0.5261543", "0.5260264", "0.5256314", "0.52466404", "0.52401096", "0.52370954", "0.52364236", "0.52281755", "0.52137214", "0.52120256", "0.5209313", "0.52089477", "0.5208163", "0.5206278", "0.52018976", "0.51996654", "0.51974803", "0.5184914", "0.51843166", "0.51824045", "0.5175214", "0.517447", "0.51738864", "0.517314", "0.51640254", "0.5162107", "0.51584274", "0.5151213", "0.51493704", "0.5145289", "0.5140283", "0.51401466", "0.51368606", "0.5136106", "0.51344085", "0.51342916", "0.5132771", "0.5125228", "0.5124148", "0.512393", "0.512312", "0.5120537", "0.51197904" ]
0.62900573
1
set the box sizes and start row for each psf image
def _set_psf_layout_psfex(self): print('setting psf layout for PSFEx') obj_data=self.obj_data psf_data=self.psf_data total_psf_pixels = 0 #psf_npix = psf_size*psf_size psf_start_row = 0 for iobj in range(obj_data.size): for icut in range(obj_data['ncutout'][iobj]): row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = psf_data[file_id] pim = p.get_rec(row,col) cen = p.get_center(row,col) psf_shape = pim.shape psf_npix = pim.size obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_psf_layout_hst(self):\n\n print('setting psf layout for HST')\n obj_data=self.obj_data\n\n total_psf_pixels = 0\n psf_start_row = 0\n\n for iobj in range(obj_data.size):\n if (iobj+1) % 100 == 0:\n print(' %d/%d' % (iobj+1,obj_data.size))\n\n # note assuming same psf for all \"epochs\"\n psf_im = self.psf_data.get_psf(iobj)\n\n psf_shape = psf_im.shape\n psf_npix = psf_im.size\n\n cen = (np.array(psf_shape)-1.0)/2.0\n\n # we will expand the psfs\n\n for icut in range(obj_data['ncutout'][iobj]):\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n self.total_psf_pixels = total_psf_pixels", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def build_filler_images(self):", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def _get_boxes(self, idx):\n # Load Image\n path = self.df.hsi_path.iloc[idx]\n im = self._load_im(path)\n\n # Crop out box\n r_box_im = self.df.width.iloc[idx] / im.shape[-1] # Ratio of RGB im box coords to load im width (e.g., r=10 for hsi)\n box = np.array([self.df.ymin.iloc[idx], self.df.ymax.iloc[idx], self.df.xmin.iloc[idx], self.df.xmax.iloc[idx]])\n box = np.around(box / r_box_im).astype(int)\n\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n if np.any(np.array(crop_im.shape) == 0):\n print('[WARNING] Loaded box has zero shape and is sketchily inflated. TODO: skip this box with ID', idx)\n if box[0] == self.df.width.iloc[idx]/r_box_im: box[0] -= 1\n if box[2] == self.df.height.iloc[idx]/r_box_im: box[2] -= 1\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n\n target = {}\n target[\"labels\"] = self.df.class_id.iloc[idx]\n target[\"uid\"] = self.df.uid.iloc[idx]\n \n\n return crop_im, target", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def setBoxsize(length,width,height):\n return length,width,height", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def __init__(self, nb_sub_images, window_size, recovery, image_horiz_size):\n self.nb_sub_images = nb_sub_images\n self.window_size = window_size\n self.recovery = recovery\n self.image_horiz_size = image_horiz_size", "def _resize_bboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n tmp_box = np.array(box, dtype=np.float32)\n tmp_box[0::2] *= results['scale_factor'][0]\n tmp_box[1::2] *= results['scale_factor'][1]\n if self.bbox_clip_border:\n tmp_box[0::2] = np.clip(tmp_box[0::2], 0, img_shape[1])\n tmp_box[1::2] = np.clip(tmp_box[1::2], 0, img_shape[0])\n bboxes.append(tmp_box)\n if len(results[key]) > 0:\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def analyzeImages(path, name_type, box1_size, box2_size, box3_size, box4_size, box5_size):\n \n folders = [f for f in sorted(glob.glob(path + \"/**\"))]\n \n for folder in folders: \n \n # to save this data frame in a csv file\n \n files = [f for f in sorted(glob.glob(folder + \"/**\" + \".jpg\"))]\n \n centroidsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n headsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n \n img_array1 = []\n img_array2 = []\n img_array3 = []\n img_array4 = []\n img_array5 = []\n\n for file in files:\n \n print(file)\n \n centroidsDf[\"frame\"][files.index(file)] = files.index(file)+1\n headsDf[\"frame\"][files.index(file)] = files.index(file)+1\n \n img = cv2.imread(file)\n \n ## FLY 1 ##\n\n box1 = img[box1_size[0]:box1_size[1], box1_size[2]:box1_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box1, file) \n \n # add the centroid and head locations on the image \n box1 = cv2.circle(box1, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box1 = cv2.circle(box1, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array1.append(box1)\n \n # add the positions in the final data frame\n centroidsDf[\"fly1_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly1_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly1_x\"][files.index(file)] = x_head\n headsDf[\"fly1_y\"][files.index(file)] = y_head\n \n ## FLY 2 ##\n \n box2 = img[box2_size[0]:box2_size[1], box2_size[2]:box2_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box2, file)\n \n # add the centroid and head locations on the image \n box2 = cv2.circle(box2, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box2 = cv2.circle(box2, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array2.append(box2)\n \n # add the positions in the final data frame \n centroidsDf[\"fly2_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly2_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly2_x\"][files.index(file)] = x_head\n headsDf[\"fly2_y\"][files.index(file)] = y_head\n \n ## FLY 3 ##\n\n box3 = img[box3_size[0]:box3_size[1], box3_size[2]:box3_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box3, file)\n \n # add the centroid and head locations on the image \n box3 = cv2.circle(box3, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box3 = cv2.circle(box3, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array3.append(box3)\n\n # add the positions in the final data frame\n centroidsDf[\"fly3_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly3_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly3_x\"][files.index(file)] = x_head\n headsDf[\"fly3_y\"][files.index(file)] = y_head\n \n ## FLY 4 ##\n \n box4 = img[box4_size[0]:box4_size[1], box4_size[2]:box4_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box4, file)\n \n # add the centroid and head locations on the image \n box4 = cv2.circle(box4, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box4 = cv2.circle(box4, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array4.append(box4)\n \n # add the positions in the final data frame\n centroidsDf[\"fly4_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly4_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly4_x\"][files.index(file)] = x_head\n headsDf[\"fly4_y\"][files.index(file)] = y_head\n \n ## FLY 5 ##\n \n # the fifth fly is not present in all the genetic strains \n if (box5_size != []):\n box5 = img[box5_size[0]:box5_size[1], box5_size[2]:box5_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box5, file)\n \n # add the centroid and head locations on the image \n box5 = cv2.circle(box5, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box5 = cv2.circle(box5, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array5.append(box5)\n \n # add the positions in the final data frame\n centroidsDf[\"fly5_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly5_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly5_x\"][files.index(file)] = x_head\n headsDf[\"fly5_y\"][files.index(file)] = y_head\n \n # save the data frame in a .csv file, \n # one for the centroids and one for the heads\n #centroidsDf.to_csv(folder+\"/centroids.csv\", index = None, header=True)\n #headsDf.to_csv(folder+\"/heads.csv\", index = None, header=True)\n \n \n ## CREATE THE VIDEOS ##\n \n height, width, _ = box1.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_1_' + str(folders.index(folder)+1)+ '.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array1)):\n out.write(img_array1[i])\n out.release()\n \n height, width, _ = box2.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_2_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array2)):\n out.write(img_array2[i])\n out.release()\n \n height, width, _ = box3.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_3_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array3)):\n out.write(img_array3[i])\n out.release()\n \n height, width, _ = box4.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_4_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array4)):\n out.write(img_array4[i])\n out.release()\n \n if (box5_size != []):\n height, width, _ = box5.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_5_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array5)):\n out.write(img_array5[i])\n out.release()", "def _iter_images_rects(self):\n image_x = self._margin\n image_y = self._margin\n total_width = self.width - 2 * self._margin\n total_height = self.height - self._texts_height - 2 * self._margin\n\n if len(self._images) == 1:\n image_width = total_width\n image_height = total_height\n elif 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_width = total_width\n image_height = (total_height - (len(self._images) - 1) * self._margin) // len(self._images)\n else:\n image_width = (total_width - (len(self._images) - 1) * self._margin) // len(self._images)\n image_height = total_height\n else:\n image_width = (total_width - self._margin) // 2\n image_height = (total_height - self._margin) // 2\n\n yield image_x, image_y, image_width, image_height\n\n if 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if 3 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if len(self._images) == 4:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n image_y += image_height + self._margin\n image_x = self._margin\n yield image_x, image_y, image_width, image_height\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height", "def _assign_sizes(self):", "def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")", "def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))", "def _images_and_boxes_preprocessing(self, imgs, boxes):\r\n # Image [0, 255] -> [0, 1].\r\n imgs = imgs.float()\r\n imgs = imgs / 255.0\r\n\r\n height, width = imgs.shape[2], imgs.shape[3]\r\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\r\n # range of [0, 1].\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n if self._split == \"train\":\r\n # Train split\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)\r\n\r\n # Random flip.\r\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\r\n elif self._split == \"val\":\r\n # Val split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n # Apply center crop for val split\r\n imgs, boxes = transform.uniform_crop(\r\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n elif self._split == \"test\":\r\n # Test split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n else:\r\n raise NotImplementedError(\"{} split not supported yet!\".format(self._split))\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = transform.color_jitter(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = transform.lighting_jitter(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = transform.color_normalization(\r\n imgs,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n\r\n if self._use_bgr:\r\n # Convert image format from RGB to BGR.\r\n # Note that Kinetics pre-training uses RGB!\r\n imgs = imgs[:, [2, 1, 0], ...]\r\n\r\n boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)\r\n\r\n return imgs, boxes", "def _resize_cbboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('cbbox_fields', []):\n cbboxes = []\n for cbox in results[key]:\n tmp_cbox = np.array(cbox, dtype=np.float32)\n new_tmp_cbox = []\n for ccbox in tmp_cbox:\n ccbox = np.array(ccbox, dtype=np.float32)\n ccbox[0::2] *= results['scale_factor'][0]\n ccbox[1::2] *= results['scale_factor'][1]\n new_tmp_cbox.append(ccbox)\n tmp_cbox = np.array(new_tmp_cbox, dtype=np.float32)\n if self.bbox_clip_border:\n tmp_cbox[:, 0::2] = np.clip(tmp_cbox[:, 0::2], 0, img_shape[1])\n tmp_cbox[:, 1::2] = np.clip(tmp_cbox[:, 1::2], 0, img_shape[0])\n cbboxes.append(tmp_cbox)\n results[key] = cbboxes", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour", "def OnSize(self, event):\r\n\r\n for pos, item in self._items.items():\r\n widget, horizontalalignment, verticalalignment = item.widget, item.horizontalalignment, item.verticalalignment\r\n\r\n rect = self.GetFieldRect(pos)\r\n widgetpos = widget.GetPosition()\r\n widgetsize = widget.GetSize()\r\n\r\n rect = self.GetFieldRect(pos)\r\n\r\n if horizontalalignment == ESB_EXACT_FIT:\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((rect.width-2, rect.height-2))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.width - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y+diffs))\r\n else:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_LEFT:\r\n\r\n xpos = rect.x - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_RIGHT:\r\n\r\n xpos = rect.x + rect.width - widgetsize[0] - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_CENTER_HORIZONTAL:\r\n\r\n xpos = rect.x + (rect.width - widgetsize[0])/2 - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height))\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-1))\r\n widget.SetPosition((xpos, rect.y+1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n if event is not None:\r\n event.Skip()", "def recreate_grid(self):\n\n self.print_numlist = arcade.SpriteList()\n for row in range(ROW_COUNT):\n for column in range(COLUMN_COUNT):\n sprite = arcade.Sprite(\n f\"Numbers/{self.grid[row][column]}.png\", scale=0.2\n )\n x = (MARGIN + WIDTH) * column + MARGIN + WIDTH // 2\n y = (MARGIN + HEIGHT) * row + MARGIN + HEIGHT // 2\n sprite.center_x = x\n sprite.center_y = y\n self.print_numlist.append(sprite)\n # Check to see if all squares have been filled in\n if 0 not in self.grid:\n # if Cameron.Check_for_Completion(self.grid) == True:\n self.done = True", "def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)", "def scale_boxes(boxes, image_shape):\n height = image_shape[0]\n width = image_shape[1]\n image_dims = K.stack([height, width, height, width])\n image_dims = K.reshape(image_dims, [1, 4])\n boxes = boxes * image_dims\n return boxes", "def place_images(self, final_list, points):\n\t\tfor i in range(8): \n # Please change this (8) into a class-level variable --KOH\n\t\t\timage_object = final_list[i]\n#\t\tif type(image_object) == 'CorrectImage':\n#\t\t\t\tself.correct = [i, points[i]]\n\t\t\timage = pygame.image.load(image_object.file_path)\n # Why can't these be stored as a property of the class --KOH\n\t\t\timagerect = image.get_rect()\n\t\t\treimage = pygame.transform.scale(image, image_object.size)\n\t\t\tself.screen.blit(reimage, points[i])", "def _images_and_boxes_preprocessing(self, imgs, boxes, gt_boxes=None):\n # Image [0, 255] -> [0, 1].\n imgs = imgs.float()\n imgs = imgs / 255.0\n\n height, width = imgs.shape[2], imgs.shape[3]\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\n # range of [0, 1].\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = transform.clip_boxes_to_image(boxes, height, width)\n\n if self._split == \"train\":\n # Train split\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._jitter_min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = transform.random_crop(\n imgs, self._crop_size, boxes=boxes\n )\n\n # Random flip.\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\n elif self._split == \"val\":\n # Val split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n # Apply center crop for val split\n imgs, boxes = transform.uniform_crop(\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n elif self._split == \"test\":\n # Test split\n # Resize short side to crop_size. Non-local and STRG uses 256.\n imgs, boxes = transform.random_short_side_scale_jitter(\n imgs,\n min_size=self._crop_size,\n max_size=self._crop_size,\n boxes=boxes,\n )\n\n if self._test_force_flip:\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\n else:\n raise NotImplementedError(\n \"{} split not supported yet!\".format(self._split)\n )\n\n # Do color augmentation (after divided by 255.0).\n if self._split == \"train\" and self._use_color_augmentation:\n if not self._pca_jitter_only:\n imgs = transform.color_jitter(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = transform.lighting_jitter(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = transform.color_normalization(\n imgs,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n # Note that Kinetics pre-training uses RGB!\n imgs = imgs[:, [2, 1, 0], ...]\n\n boxes = transform.clip_boxes_to_image(\n boxes, self._crop_size, self._crop_size\n )\n\n return imgs, boxes", "def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)", "def stage_one(self, imgs, threshold, factor, minsize, nms_threshold):\n\n width = imgs.shape[-2]\n height = imgs.shape[-1]\n num_img = imgs.shape[0]\n\n # Compute valid scales\n scales = []\n cur_width = width\n cur_height = height\n cur_factor = 1\n while cur_width >= 12 and cur_height >= 12:\n if 12 / cur_factor >= minsize: # Ignore boxes that smaller than minsize\n\n w = cur_width\n h = cur_height\n scales.append((w, h, cur_factor))\n\n cur_factor *= factor\n cur_width = math.ceil(cur_width * factor)\n cur_height = math.ceil(cur_height * factor)\n\n # Get candidate boxesi ph\n candidate_boxes = torch.empty(0, dtype=torch.int32, device=self.device)\n candidate_scores = torch.empty(0, device=self.device)\n candidate_offsets = torch.empty(\n 0, dtype=torch.float32, device=self.device)\n all_img_labels = torch.empty(0, dtype=torch.int32, device=self.device)\n for w, h, f in scales:\n resize_img = torch.nn.functional.interpolate(\n imgs, size=(w, h), mode='bilinear')\n p_distribution, box_regs, _ = self.pnet(resize_img)\n\n candidate, scores, offsets, img_labels = self._generate_bboxes(\n p_distribution, box_regs, f, threshold)\n\n candidate_boxes = torch.cat([candidate_boxes, candidate])\n candidate_scores = torch.cat([candidate_scores, scores])\n candidate_offsets = torch.cat([candidate_offsets, offsets])\n all_img_labels = torch.cat([all_img_labels, img_labels])\n\n \n if candidate_boxes.shape[0] != 0:\n candidate_boxes = self._calibrate_box(\n candidate_boxes, candidate_offsets)\n candidate_boxes = self._convert_to_square(candidate_boxes)\n candidate_boxes = self._refine_boxes(\n candidate_boxes, width, height)\n \n final_boxes = torch.empty(0, dtype=torch.int32, device=self.device)\n final_img_labels = torch.empty(0, dtype=torch.int32, device=self.device)\n for i in range(num_img):\n mask = all_img_labels == i\n keep = func.nms(candidate_boxes[mask].cpu().numpy(),\n candidate_scores[mask].cpu().numpy(), nms_threshold)\n final_boxes = torch.cat([final_boxes, candidate_boxes[mask][keep]])\n final_img_labels = torch.cat([final_img_labels, all_img_labels[mask][keep]])\n\n return torch.cat([final_boxes, final_img_labels.unsqueeze(1 )], -1)\n else:\n return candidate_boxes", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes):\r\n\r\n height, width, _ = imgs[0].shape\r\n\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n # `transform.py` is list of np.array. However, for AVA, we only have\r\n # one np.array.\r\n boxes = [boxes]\r\n # The image now is in HWC, BGR format.\r\n if self._split == \"train\": # \"train\"\r\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = cv2_transform.random_crop_list(\r\n imgs, self._crop_size, order=\"HWC\", boxes=boxes\r\n )\r\n if self.random_horizontal_flip:\r\n # random flip\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 0.5, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"val\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n imgs, boxes = cv2_transform.spatial_shift_crop_list(\r\n self._crop_size, imgs, 1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"test\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n else:\r\n raise NotImplementedError(\"Unsupported split mode {}\".format(self._split))\r\n\r\n # Convert image to CHW keeping BGR order.\r\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\r\n\r\n # Image [0, 255] -> [0, 1].\r\n imgs = [img / 255.0 for img in imgs]\r\n\r\n imgs = [\r\n np.ascontiguousarray(\r\n # img.reshape((3, self._crop_size, self._crop_size))\r\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\r\n ).astype(np.float32)\r\n for img in imgs\r\n ]\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = cv2_transform.color_jitter_list(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = cv2_transform.lighting_list(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = [\r\n cv2_transform.color_normalization(\r\n img,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n for img in imgs\r\n ]\r\n\r\n # Concat list of images to single ndarray.\r\n imgs = np.concatenate([np.expand_dims(img, axis=1) for img in imgs], axis=1)\r\n\r\n if not self._use_bgr:\r\n # Convert image format from BGR to RGB.\r\n imgs = imgs[::-1, ...]\r\n\r\n imgs = np.ascontiguousarray(imgs)\r\n imgs = torch.from_numpy(imgs)\r\n boxes = cv2_transform.clip_boxes_to_image(\r\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\r\n )\r\n return imgs, boxes", "def __init__(self,jx,jy,img,geo,mbox=10,hist=False,zoomc=False,pfile=False):\n \n self.mbox = mbox # Box half-size\n self.nbox = 2*mbox + 1 # Box size\n # Adjust central pixel location to ensure box fits within full image\n self.ix = int( np.median( [ mbox, jx, img.nx-mbox-1 ] ) )\n self.iy = int( np.median( [ mbox, jy, img.ny-mbox-1 ] ) )\n iymin = self.iy - mbox\n iymax = self.iy + mbox\n ixmin = self.ix - mbox\n ixmax = self.ix + mbox\n label = img.label # Copy information from full disk image\n self.label = label\n self.desc = img.desc[label]\n self.img = img.images[label][iymin:iymax+1,ixmin:ixmax+1]\n self.disp(zoomc)\n if hist: self.hist(geo,pfile)", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes, gt_boxes=None, min_scale=None, crop_size=None, n_imgs=0):\n\n height, width, _ = imgs[0].shape\n\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\n\n # `transform.py` is list of np.array. However, for AVA, we only have\n # one np.array.\n boxes = [boxes]\n\n crop_size = crop_size if self.multigrid_enabled and crop_size is not None else self._crop_size\n \n if self._split != 'train':\n assert gt_boxes is not None\n gt_boxes = cv2_transform.clip_boxes_to_image(gt_boxes, height, width)\n gt_boxes = [gt_boxes]\n\n # The image now is in HWC, BGR format.\n if self._split == \"train\": # \"train\"\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\n imgs,\n min_size=self._jitter_min_scale if not self.multigrid_enabled and min_scale is None else min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = cv2_transform.random_crop_list(\n imgs, crop_size, order=\"HWC\", boxes=boxes, n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n if self.random_horizontal_flip:\n # random flip\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 0.5, imgs, order=\"HWC\", boxes=boxes, \n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n # elif self._split == \"val\":\n # # Short side to test_scale. Non-local and STRG uses 256.\n # imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\n # boxes, gt_boxes = cv2_transform.scale_boxes(\n # self._crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n # )\n # boxes, gt_boxes = [boxes], [gt_boxes]\n # imgs, boxes, gt_boxes = cv2_transform.spatial_shift_crop_list(\n # self._crop_size, imgs, 1, boxes=boxes, gt_boxes=gt_boxes\n # )\n\n # if self._test_force_flip:\n # imgs, boxes = cv2_transform.horizontal_flip_list(\n # 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes\n # )\n elif self._split == \"val\" or self._split == \"test\":\n # Short side to test_scale. Non-local and STRG uses 256.\n imgs = [cv2_transform.scale(crop_size, img) for img in imgs]\n boxes, gt_boxes = cv2_transform.scale_boxes(\n crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n )\n boxes, gt_boxes = [boxes], [gt_boxes]\n\n if self._test_force_flip:\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes,\n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n else:\n raise NotImplementedError(\n \"Unsupported split mode {}\".format(self._split)\n )\n\n # Convert image to CHW keeping BGR order.\n if self.cfg.MODEL.USE_SPA_CONF:\n try:\n if len(imgs[n_imgs].shape) == 2:\n imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n elif len(imgs[n_imgs].shape) > 3:\n imgs[n_imgs:] = [np.expand_dims(img.squeeze(), axis=-1) for img in imgs[n_imgs:]]\n except:\n import pdb; pdb.set_trace()\n \n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # try:\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\n # except:\n # print('imgs[n_imgs].shape:', imgs[n_imgs].shape)\n # print('len(imgs):', len(imgs))\n # print('n_imgs:', n_imgs)\n # import pdb; pdb.set_trace()\n\n # Image [0, 255] -> [0, 1].\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [img / 255.0 for img in imgs[:n_imgs]]\n else: \n imgs = [img / 255.0 for img in imgs]\n\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [\n np.ascontiguousarray(\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[:n_imgs]\n ]\n imgs[n_imgs:] = [\n np.ascontiguousarray(\n img.reshape((1, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[n_imgs:]\n ]\n else:\n imgs = [\n np.ascontiguousarray(\n # img.reshape((3, self._crop_size, self._crop_size))\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs\n ]\n\n # Do color augmentation (after divided by 255.0).\n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = imgs[n_imgs:]\n imgs = imgs[:n_imgs]\n if self._split == \"train\" and self._use_color_augmentation: # False\n if not self._pca_jitter_only:\n imgs = cv2_transform.color_jitter_list(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = cv2_transform.lighting_list(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = [\n cv2_transform.color_normalization(\n img,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n for img in imgs\n ]\n\n # Concat list of images to single ndarray.\n imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in imgs], axis=1\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n imgs = imgs[::-1, ...]\n\n imgs = np.ascontiguousarray(imgs)\n imgs = torch.from_numpy(imgs)\n \n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in skeleton_imgs], axis=1\n )\n skeleton_imgs = np.ascontiguousarray(skeleton_imgs)\n skeleton_imgs = torch.from_numpy(skeleton_imgs)\n\n boxes = cv2_transform.clip_boxes_to_image(\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if gt_boxes is not None:\n gt_boxes = cv2_transform.clip_boxes_to_image(\n gt_boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if self.cfg.MODEL.USE_SPA_CONF:\n return (imgs, skeleton_imgs, boxes) if gt_boxes is None else (imgs, skeleton_imgs, boxes, gt_boxes)\n else:\n return (imgs, boxes) if gt_boxes is None else (imgs, boxes, gt_boxes)", "def set_figure_size(self):\n lims, _ = self.set_lims()\n size_fac = 50\n paperSizeFac = 0.65\n one_dec = 1.6\n xdecs = np.log10(lims(1)) - np.log10(lims(0))\n one_dec = one_dec * 4 / xdecs\n ydecs = np.log10(lims[3]) - np.log10(lims[2])\n paper_width = xdecs * one_dec\n paper_height = (ydecs + 3) * one_dec\n paper_height = min([paper_height, 9])\n rectScreen = [0.5, 0.5, paper_width, paper_height] * size_fac\n rectPaper = [1.0, 1.0, paper_width * paperSizeFac, paper_height * paperSizeFac]\n\n rectRho = [0.15, 0.15 + 2.3 / (ydecs + 3), 0.8, ydecs / (ydecs + 3) * 0.8]\n rectPhi = [0.15, 0.15, 0.8, 2 / (ydecs + 3) * 0.8]\n rects = {\n \"Screen\": rectScreen,\n \"Paper\": rectPaper,\n \"Rho\": rectRho,\n \"Phi\": rectPhi,\n }\n return rects", "def define_box_location(self):\n self.contents['Box_ID'] = np.ones(self.numatom) * self.num_box", "def configure_grid(self):\r\n\r\n for r in range(3):\r\n self.rowconfigure(r, weight=1)\r\n for c in range(3):\r\n self.columnconfigure(c, weight=1)", "def create_prior_boxes(self):\n # value of k for each feature map to create k^2 boxes for each feature map\n feature_map_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5}\n\n # scale for boxes across different feature maps. boxes for inner feature maps\n # are scaled much lower to detect small objects\n obj_scales = {'conv4_3': 0.1, 'conv7': 0.21, 'conv8_2': 0.255, 'conv9_2': 0.30}\n\n # Defined aspect ratio calculated from mean of (w/h) across all bounding boxes\n # from the dataset. The mean is 0.66 with deviation of 0.07. So aspect ratio is kept\n # at 0.66 for all feature maps\n aspect_ratios = {'conv4_3': [0.5], 'conv7': [0.55], 'conv8_2': [0.6], 'conv9_2': [.66]}\n\n fmaps = list(feature_map_dims.keys())\n prior_boxes = []\n for k, fmap in enumerate(fmaps):\n # for each feature map, create k*k boxes\n for i in range(feature_map_dims[fmap]):\n for j in range(feature_map_dims[fmap]):\n # calculate center coordinates of boxes\n cx = (j + 0.5) / feature_map_dims[fmap]\n cy = (i + 0.5) / feature_map_dims[fmap]\n\n # For each\n for ratio in aspect_ratios[fmap]:\n prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)])\n\n prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (1930, 4)\n prior_boxes.clamp_(0, 1) # (1930, 4)\n\n return prior_boxes", "def setup_image(self):\n # Create the correct size image for the table\n rows = self.table.count('\\n')\n columns = self.table.split('\\n')[0].count('-') + self.table.split('\\n')[0].count('+')\n self.img = Image.new('RGB', ((columns * 12) + 24, rows * 21 + 48), color=(54, 57, 63))\n\n # Initialize font and drawing object\n self.font = ImageFont.truetype('../extra_files/cour.ttf', 20)\n self.draw = ImageDraw.Draw(self.img)\n\n # Draw the table without markings\n for x in range(5):\n self.draw.text((12, 12), self.table, font=self.font, fill=(255, 255, 255))", "def boxes_postprocess(boxes, image_meta):\n if 'scales' in image_meta:\n boxes[:, [0, 2]] /= image_meta['scales'][1]\n boxes[:, [1, 3]] /= image_meta['scales'][0]\n\n if 'padding' in image_meta:\n boxes[:, [0, 2]] -= image_meta['padding'][2]\n boxes[:, [1, 3]] -= image_meta['padding'][0]\n\n if 'crops' in image_meta:\n boxes[:, [0, 2]] += image_meta['crops'][2]\n boxes[:, [1, 3]] += image_meta['crops'][0]\n\n if 'flipped' in image_meta and image_meta['flipped']:\n image_width = image_meta['drifted_size'][1] if 'drifted_size' in image_meta else \\\n image_meta['orig_size'][1]\n boxes_widths = boxes[:, 2] - boxes[:, 0] + 1.\n boxes[:, 0] = image_width - 1 - boxes[:, 2]\n boxes[:, 2] = boxes[:, 0] + boxes_widths - 1.\n\n if 'drifts' in image_meta:\n boxes[:, [0, 2]] += image_meta['drifts'][1]\n boxes[:, [1, 3]] += image_meta['drifts'][0]\n\n return boxes", "def draw_boxes(bboxes: [[int]], img: 'np.array', line_width: int=2) -> 'np.array':\n for x, y, w, h in bboxes:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), line_width)\n return img", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results", "def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)", "def fillingrid(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.fillinpercent(n)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setValue(self.currentnsigs[n])\n else:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n self.nsspins[n].setValue(self.currentnsigs[n])\n self.fillinpercent(n)", "def create_flowbox(self, flowbox, frame_list):\n\n for num_frame in frame_list:\n grid = Gtk.Grid()\n btn = self.new_thumbnail_button(num_frame)\n\n widget_cls_label = Gtk.Label()\n widget_cls_label.set_text(\"?\")\n widget_cls_label.set_size_request(20, 20)\n widget_cls_label.connect(\"draw\", self.area_on_draw, {'frame': num_frame, 'widget_label': widget_cls_label})\n # Add drawing area\n grid.add(btn)\n grid.attach_next_to(widget_cls_label, btn, Gtk.PositionType.BOTTOM, 1, 2)\n\n flowbox.add(grid)\n self.flowbox_layout = flowbox", "def updateFootprintBbox(self):\n # Pull out the image bounds of the parent Footprint\n self.bb = self.fp.getBBox()\n if not self.imbb.contains(self.bb):\n raise ValueError(('Footprint bounding-box %s extends outside image bounding-box %s') %\n (str(self.bb), str(self.imbb)))\n self.W, self.H = self.bb.getWidth(), self.bb.getHeight()\n self.x0, self.y0 = self.bb.getMinX(), self.bb.getMinY()\n self.x1, self.y1 = self.bb.getMaxX(), self.bb.getMaxY()", "def boxer(imgfile, parttree, outstack, boxsize):\n imgarray = mrc.read(imgfile)\n boxedparticles = boxerMemory(imgarray, parttree, boxsize)\n apImagicFile.writeImagic(boxedparticles, outstack)\n return True", "def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def set_pic_size(self, im_name):\n im_vals = np.genfromtxt(im_name, delimiter=self.delim)\n self.pic_width = int(np.size(im_vals[0]) - 1) # the first column of ASCII image is row number\n try: self.pic_height = int(np.size(im_vals[:,0])) \n except IndexError: \n self.pic_width = int(np.size(im_vals) - 1)\n self.pic_height = 1\n self.create_rect_mask()\n return self.pic_width, self.pic_height", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def setpopbox(ty,slist,scaledtime,rootpop,poptree):\r\n wadjust = \"\"\r\n for i in range(numpops-1):\r\n wadjust += \"00\"\r\n if(scaledtime != []):\r\n minx_popbox = textwide(wadjust+\"0.00 MYR\", tfactor)\r\n else:\r\n minx_popbox = textwide(wadjust+\"0.00 tu\", tfactor)\r\n minx_popbox /= gv[\"globalscale\"]\r\n if gv[\"localxscale\"] > 0:\r\n minx_popbox /= gv[\"localxscale\"]\r\n\r\n popxvals = []\r\n## if scaledpop == [] then no text is written on time split line and there is more width to work with\r\n for i in range(2*numpops - 1):\r\n## left side temporarily at zero, right side temporarily at upper confidence interval\r\n popxvals.append( [0,slist[4][4][i][1]])\r\n (width,c,popxvals, leftpoint,rightpoint) = centerbox(rootpop,0,popxvals[rootpop][1],poptree,popxvals)\r\n popxvals = popadjustx(popxvals,minx_popbox)\r\n popbox = []\r\n\r\n ## maxwide will be used to adjust the width as a scaler so the part furthest to the right is not too far out\r\n maxwide = 0\r\n for i in range(2*numpops-1):\r\n if maxwide < (popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1])):\r\n maxwide = (popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1]))\r\n maxwide = maxwide/(1.0-minx_popbox)\r\n\r\n if gv[\"localxscale\"] > 0:\r\n maxwide *= gv[\"localxscale\"]\r\n\r\n farright = 0\r\n confint = []\r\n for i in range(2*numpops-1):\r\n confint.append([])\r\n confint[i].append(minx_popbox + ((popxvals[i][1] - (slist[4][4][i][1]-slist[4][4][i][2]))/maxwide))\r\n confint[i].append(minx_popbox + ((popxvals[i][1] + (slist[4][4][i][3]-slist[4][4][i][1]))/maxwide))\r\n if confint[i][1] > farright:\r\n farright = confint[i][1]\r\n popbox.append([[],[]])\r\n popbox[i][0].append(minx_popbox + popxvals[i][0]/maxwide)\r\n popbox[i][1].append(minx_popbox + popxvals[i][1]/maxwide)\r\n if poptree[i][1] == -1:\r\n popbox[i][0].append(gv[\"lineINFy\"])\r\n else:\r\n popbox[i][0].append(ty[poptree[i][1]-1][0])\r\n if poptree[i][0] == 0:\r\n popbox[i][1].append(gv[\"line0y\"])\r\n else:\r\n popbox[i][1].append(ty[poptree[i][0]-1][0])\r\n return popbox,maxwide, confint, farright", "def plt_bboxes(img, classes, scores, bboxes, figsize=(10,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize)\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n colors = dict()\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n# crop_img = img[xmin:(xmax - xmin),xmax:(ymax - ymin)]\n# misc.imsave('1.jpg', crop_img)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = CLASSES[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n plt.show()", "def __init__(self,phosphene_resolution=(50,50), size=(480,480), jitter=0.35, intensity_var=0.9, aperture=.66, sigma=0.8, custom_grid=None):\n if custom_grid is None:\n self.phosphene_resolution = phosphene_resolution\n self.size = size\n self.phosphene_spacing = np.divide(size,phosphene_resolution)\n self.jitter = jitter\n self.intensity_var = intensity_var\n self.grid = self.create_regular_grid(self.phosphene_resolution,self.size,self.jitter,self.intensity_var)\n self.aperture = np.round(aperture*self.phosphene_spacing[0]).astype(int) #relative aperture > dilation kernel size\n else:\n self.grid = custom_grid\n self.aperture = aperture\n self.sigma = sigma\n self.dilation_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.aperture,self.aperture))\n self.k_size = 11 #np.round(4*sigma+1).astype(int) # rule of thumb: choose k_size>3*sigma", "def sizes(options):\n # import data\n pixels = dict() # volumes are given in #pixels\n snap_mask = \"/net/astrogate/export/astrodata/jgacon/filex/processing/\" \\\n \"export/f8_h50_v100_objs_snap_%d.csv\"\n snap_ids = np.arange(2,28+1)\n z = snapid2z(snap_ids)\n print z\n\n for id in snap_ids:\n snap = snap_mask % (id - 1) # fix: snap number one too low in filename\n pixels[id] = np.genfromtxt(snap)[1:-1,1] # row 2 contains volumes\n # rm void & halo volumes\n\n # visualise\n if \"err\" in options.keys():\n nums = np.array([pixels[id].size for id in snap_ids])\n avgs = np.array([np.mean(pixels[id]) for id in snap_ids])\n mods = np.array([st.mode(pixels[id])[0][0] for id in snap_ids])\n meds = np.array([np.median(pixels[id]) for id in snap_ids])\n stds = np.array([np.std(pixels[id]) for id in snap_ids])\n\n print mods\n print mods.shape\n\n plt.figure()\n plt.title(\"Sizes of filaments as function of redshift\")\n plt.xlabel(\"Redshift $z$\")\n plt.xticks(snap_ids[::3], z[::3])\n\n plt.ylabel(\"Size in #pixels\")\n\n plt.errorbar(snap_ids, avgs, yerr=stds, label=\"Mean\")\n plt.plot(snap_ids, mods, \"g\", label=\"Mode\")\n plt.plot(snap_ids, meds, \"c\", label=\"Median\")\n plt.legend(loc=\"best\")\n\n plt.twinx()\n plt.ylabel(\"#Filaments\", color=\"r\")\n plt.tick_params(\"y\", colors=\"r\")\n\n plt.plot(snap_ids, nums, \"r--\")\n\n plt.savefig(options[\"err\"])\n\n if \"dist\" in options.keys():\n targets = np.array([5,10,15,20,25])\n plt.figure()\n plt.title(\"Volume distribution of filaments\")\n plt.xlabel(\"Volume $V$ in #pixels\")\n plt.ylabel(\"#Element with $V$ / Total #Elements\")\n plt.xlim([0,1000])\n for target in targets:\n sns.kdeplot(pixels[target], label=\"$z$ = %f\" % snapid2z(target))\n plt.legend(loc=\"best\")\n plt.savefig(options[\"dist\"])\n\n if \"dist_inter\" in options.keys():\n default = snap_ids[-1]\n fig, ax = plt.subplots()\n plt.subplots_adjust(bottom=0.25)\n sns.kdeplot(pixels[int(default - 2)], ax=ax)\n plt.xlim([0, 1000])\n plt.ylim([0, 0.01])\n plt.xlabel(\"Volume $V$ of filaments in #pixels\")\n plt.ylabel(\"#Filaments with volume $V$ / Total #Filaments\")\n\n nums = np.array([pixels[id].size for id in snap_ids])\n ax2 = ax.twinx()\n ax2.set_ylabel(\"#Filaments\", color=\"r\", alpha=0.5)\n ax2.tick_params(axis=\"y\", labelcolor=\"r\")\n ax2_x = np.linspace(0, 1000, nums.size)\n ax2.plot(ax2_x, nums, \"r--\", alpha=0.5)\n point, = ax2.plot(ax2_x[default - 2], nums[default - 2], \"ro\", alpha=0.5)\n\n axcolor = 'lightgoldenrodyellow'\n axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n sid = Slider(axfreq, \"ID\", 2, 28, valinit=default, valstep=1)\n ax.set_title(\"$z$ = %f\" % snapid2z(default))\n\n def update(val):\n id = sid.val\n\n print id\n #ax.clear()\n ax.set_ydata()\n ax.set_xdata()\n ax.set_title(\"$z$ = %f\" % snapid2z(int(id)))\n ax.set_xlim([0,1000])\n ax.set_ylim([0, 0.01])\n sns.kdeplot(pixels[int(id)], ax=ax)\n point.set_xdata(ax2_x[int(id) - 2])\n point.set_ydata(nums[int(id) - 2])\n fig.canvas.draw_idle()\n sid.on_changed(update)\n\n plt.show()\n\n\n if \"hist\" in options.keys():\n conc = None\n for id, vols in pixels.iteritems():\n data = np.empty((vols.size, 2))\n data[:,0] = id\n data[:,1] = vols\n\n if conc is None:\n conc = data\n else:\n conc = np.vstack((conc, data))\n\n plt.figure()\n plt.hist2d(conc[:,0], conc[:,1], bins=(snap_ids.size, 1000))\n plt.ylim([100,400])\n plt.savefig(options[\"hist\"])", "def create_image_caption_pairs(self):", "def _set_boxes(self, listOfBoxes):\n self._boxes = listOfBoxes", "def generate_image_grid(sess, df, filenames,op, op2):\n #x_points = np.arange(0, 1, 1.5).astype(np.float32)\n #y_points = np.arange(0, 1, 1.5).astype(np.float32)\n\n nx, ny = 12, 1\n #plt.subplot()\n gs = gridspec.GridSpec(nx, ny, hspace=1, wspace=0.05)\n # input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n #\n # plt.imshow(np.array(df[0].tolist()).reshape(28, 28), cmap='gray')\n # plt.show()\n # x = sess.run(op, feed_dict={decoder_input: input_x[0].reshape(1,2)})\n # img = np.array(x.tolist()).reshape(28, 28)\n #\n # plt.imshow(img, cmap='gray')\n # plt.show()\n\n \"\"\" grid \"\"\"\n input_x = sess.run(op2, feed_dict={x_input: df[0:24]})\n for i, g in enumerate(gs):\n\n x = sess.run(op, feed_dict={decoder_input: input_x[i].reshape(1,2)})\n ax = plt.subplot(g)\n img = np.array(x.tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()\n\n for i, g in enumerate(gs):\n\n ax = plt.subplot(g)\n img = np.array(df[i].tolist()).reshape(28, 28)\n ax.imshow(img, cmap='gray')\n ax.set_xticks([])\n ax.set_yticks([])\n #ax.set_aspect('auto')\n ax.set_title(filenames[i])\n plt.show()", "def plt_bboxes(img, classes, scores, bboxes, figsize=(17.78,10), linewidth=1.5):\n fig = plt.figure(figsize=figsize, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n plt.imshow(img)\n height = img.shape[0]\n width = img.shape[1]\n print (\"original height width\", height, width)\n if (classes.shape[0] > 0):\n print (\"This frame has class\")\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random.random(), random.random(), random.random())\n ymin = int(bboxes[i, 0] * height)\n xmin = int(bboxes[i, 1] * width)\n ymax = int(bboxes[i, 2] * height)\n xmax = int(bboxes[i, 3] * width)\n rect = plt.Rectangle((xmin, ymin), xmax - xmin,\n ymax - ymin, fill=False,\n edgecolor=colors[cls_id],\n linewidth=linewidth)\n plt.gca().add_patch(rect)\n class_name = pascal_classes[cls_id]\n plt.gca().text(xmin, ymin - 2,\n '{:s} | {:.3f}'.format(class_name, score),\n bbox=dict(facecolor=colors[cls_id], alpha=0.5),\n fontsize=12, color='white')\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n plt.close()\n print(\"Processed data with shape, \", data.shape)\n return data", "def draw_boxes(indexes, frame, all_boxes):\n bbox = []\n mid_points = []\n\n for i in indexes:\n x = i[0]\n box = all_boxes[x]\n bbox.append(box)\n mid_points.append(mid_point(frame, box))\n x1, y1, w, h = box[0], box[1], box[2], box[3]\n x2, y2 = x1+w, y1+h\n\n cv2.rectangle(frame, (x1,y1),(x2,y2),(255,0,0),2) \n\n return mid_points, bbox", "def __draw_boxes(self, img, bboxes, color=(128, 0, 0), thick=4):\n\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "def setUp(self):\n img_path = osp.join(osp.dirname(__file__), '../../data/gray.jpg')\n self.results = {\n 'img_path':\n img_path,\n 'img_shape': (300, 400),\n 'instances': [{\n 'bbox': [0, 0, 10, 20],\n 'bbox_label': 1,\n 'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],\n 'ignore_flag': 0\n }, {\n 'bbox': [10, 10, 110, 120],\n 'bbox_label': 2,\n 'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],\n 'ignore_flag': 0\n }, {\n 'bbox': [50, 50, 60, 80],\n 'bbox_label': 2,\n 'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],\n 'ignore_flag': 1\n }]\n }", "def make_grid_bbox(tensor, box, nrow=8, padding=2,\n normalize=False, range=None, \n scale_each=False, pad_value=0, draw_line=False):\n\n # make the mini-batch of images into a grid\n # nmaps = tensor.size(0)\n nmaps = len(box)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n # height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n height, width = int(256 + padding), int(256 + padding)\n tensor = torch.ones(())\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n # # add the white image into the grid\n # block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n # add the white image into the grid\n block = tensor.new_full((3, height - padding, width - padding), 9.0/13)\n # print(box[0].size())\n # print(box[1].size())\n # assert False\n # num_curr_box = box[0][k].size(0)\n num_curr_box = box[k][0].size(0)\n for z in irange(num_curr_box):\n # label = box[1][k][z].item()\n try:\n label = box[k][1][z].item()\n except:\n print(box)\n print(k)\n assert False\n \n if label != -1:\n block = draw_box(block, box[k][0][z], label, draw_line)\n # print(k, z)\n else:\n break\n # copy to the grid\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(block)\n k = k + 1\n return grid", "def _resize_bboxes(self, ori_bboxes, scale_factor):\n bboxes = ori_bboxes * scale_factor\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, self.img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, self.img_shape[0])\n return bboxes", "def draw_boxes_info(image, current_data):\n\n font_position1 = (50, 600)\n font_position2 = (50, 650)\n font_scale = .4\n font_thickness = 1\n\n locations = current_data[\"locations\"] #returns x1, y1, x2, y2\n frame_num = \"Frame Number: \" + str(current_data[\"frame_num\"])\n\n for box in locations:\n box_text = (\"Box locations are x1: {0}, y1: {1}, x2: {2}, y2: {3}\").format(box[1],box[3],box[0],box[2])\n\n cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 3)\n cv2.putText(image, box_text, font_position1, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n cv2.putText(image, frame_num, font_position2, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n return image", "def display_precomputed_boxes(self, sample_index, all_boxes):\n image_rois = [class_detections[sample_index]\n for class_detections in all_boxes]\n\n image_rois_list = []\n image_classes = []\n for class_index, class_rois in enumerate(image_rois):\n if len(class_rois) > 0:\n classes = np.ones((class_rois.shape[0])) * class_index\n image_rois_list.extend(class_rois)\n image_classes.extend(classes)\n image_rois_list = np.array(image_rois_list)\n image_classes = np.array(image_classes)\n\n show_gt_boxes = False\n self.display_detections(image_rois_list, image_classes, \n self.data_loader.dataset.samples[sample_index])", "def plot_n_box(img_prepro):\n print(img_prepro)\n h, w= img_prepro.shape\n boxes = pytesseract.image_to_boxes(img_prepro)\n for b in boxes.splitlines():\n b = b.split(' ')\n img_prepro = cv2.rectangle(img_prepro, (int(b[1]), h - int(b[2])), (int(b[3]), h - int(b[4])), (0, 255, 0), 2)\n cv2.imshow('img', img_prepro)\n cv2.waitKey(0)\n\n return", "def setwinsize(self, rows, cols):", "def onet_process(self, image, boxes, height, width):\n data = self.__padding(image, boxes, height, width)\n return data", "def forward(self):\n priors = []\n for k, f in enumerate(self.feature_maps):\n scale = self.image_size / self.strides[k]\n # for i, j in product(range(f), repeat=2):\n for i in range(f[0]):\n for j in range(f[1]):\n # print(i, j)\n # unit center x,y\n cx = (j + 0.5) / scale\n cy = (i + 0.5) / scale\n\n # small sized square box\n size = self.min_sizes[k]\n h = w = size / self.image_size\n priors.append([cx, cy, w, h])\n\n # big sized square box\n size = sqrt(self.min_sizes[k] * self.max_sizes[k])\n h = w = size / self.image_size\n priors.append([cx, cy, w, h])\n\n # change h/w ratio of the small sized box\n size = self.min_sizes[k]\n h = w = size / self.image_size\n for ratio in self.aspect_ratios[k]:\n ratio = sqrt(ratio)\n priors.append([cx, cy, w * ratio, h / ratio])\n priors.append([cx, cy, w / ratio, h * ratio])\n\n priors = torch.Tensor(priors)\n if self.clip:\n priors.clamp_(max=1, min=0)\n return priors", "def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes", "def __create_blank_page__(self):\n with open(\"active_weather.basic.exp\"+str(self.box_count)+\".box\",\"w\") as f:\n f.write(\"\")\n\n self.width = 2508\n # self.height = 200\n self.height = 4000\n self.training_page = np.zeros((self.height,self.width),dtype=np.uint8)\n self.training_page.fill(255)\n\n self.row_bitmaps = []\n self.row_characters = []\n\n self.row_pointer = spacing\n self.column_pointer = spacing\n\n\n # self.__box_file_flush__()\n self.box_file_entries = []\n self.used_height = spacing", "def plot_image(image, boxes, class_dic, frame_n):\n im = np.array(image)\n print(im.shape)\n height, width, _ = im.shape\n\n # Create figure and axes\n # fig, ax = plt.subplots(1)\n # # Display the image\n # ax.imshow(im)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='auto')\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(im)\n # box[0] is x midpoint, box[2] is width\n # box[1] is y midpoint, box[3] is height\n\n # Create a Rectangle potch\n for box in boxes:\n class_ = int(box[0])\n confidence_ = box[1]\n box = box[2:]\n assert len(box) == 4, \"Got more values than in x, y, w, h, in a box!\"\n upper_left_x = box[0] - box[2] / 2\n upper_left_y = box[1] - box[3] / 2\n rect = patches.Rectangle(\n (upper_left_x * width, upper_left_y * height),\n box[2] * width,\n box[3] * height,\n linewidth=1,\n edgecolor=\"r\",\n facecolor=\"none\",\n )\n\n\n label_bbox = class_dic[class_] + \":::\" + f\"{100 * confidence_:.2f}\" + \"%\"\n plt.text(upper_left_x * width, upper_left_y * height - 10, label_bbox, size=10, rotation=0,\n ha=\"left\", va=\"bottom\",\n bbox=dict(boxstyle=\"square\",\n ec=(1, 0, 0),\n fc=(1, 0, 0),\n )\n )\n \n \n # Add the patch to the Axes\n ax.add_patch(rect)\n if frame_n:\n plt.savefig(str(frame_n) + '.png', dpi=200, bbox_inches=\"tight\", transparent=True, pad_inches=0)\n else:\n plt.show()", "def setUp(self):\n self.single_box = [ScoredRect(Rect(0, 10, 0, 20), 0.0)]\n\n self.ground_truths = [ScoredRect(Rect(0, 10, 0, 10), 0.5),\n ScoredRect(Rect(10, 20, 20, 30), 0.5),\n ScoredRect(Rect(-40, -30, -20, -10), 0.5),\n ]\n self.detections = [ScoredRect(Rect(0, 10, 0, 10), 0.5),\n ScoredRect(Rect(10, 20, 20, 30), 0.5),\n ScoredRect(Rect(10, 20, 20, 30), 0.5),\n ScoredRect(Rect(100, 110, 20, 30), 0.5),\n ]", "def normalize_boxes(all_boxes, image_width, image_height):\n new_boxes = []\n for boxes_per_frame in all_boxes:\n new_boxes_per_frame = []\n for i, box in enumerate(boxes_per_frame):\n left, top, right, bottom = box\n new_boxes_per_frame.append((left / image_width, top / image_height, right / image_width, bottom / image_height))\n new_boxes.append(new_boxes_per_frame)\n\n assert(len(new_boxes) == len(all_boxes))\n for i, boxes_per_frame in enumerate(all_boxes):\n assert(len(boxes_per_frame) == len(new_boxes[i]))\n\n\n\n return new_boxes", "def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)", "def resize_spacing(img_sz,img_sp,factor):\n img_sz_np = np.array(list(img_sz))\n img_sp_np = np.array(list(img_sp))\n new_sz_np = img_sz_np*factor\n new_sp = img_sp_np*(img_sz_np-1)/(new_sz_np-1)\n return tuple(list(new_sp))", "def __init__(self):\n toplevel_create_image = mw.tkinter.Toplevel(mw.MainWindow, bg = \"#a1a1a1\")\n \"\"\" top level window attributes\"\"\"\n toplevel_create_image.title(\"create new image\")\n toplevel_create_image.geometry(\"800x600+600+200\")\n toplevel_create_image.resizable(width = False, height = False)\n self. top_down = 60\n\n # UI\n image_width_label = mw.tkinter.Label(toplevel_create_image, text = \"Width: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_width_label.grid(row = 0, column = 0, padx = 20, pady = 20)\n image_height_label = mw.tkinter.Label(toplevel_create_image, text = \"Height: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_height_label.grid(row = 1, column = 0)\n\n # text box image size \n image_width_text_box = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_width_text_box.grid(row = 0, column = 1, pady = 20)\n\n image_height_text_box = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_height_text_box.grid(row = 1, column = 1)\n\n # color mode\n color_mode_combo_box = mw.tkinter.Label(toplevel_create_image, text = \"color mode: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n color_mode_combo_box.grid(row = 2, column = 0, padx = 20, pady = 20)\n\n # image_color_mode_text = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n # bg = \"grey\", fg = \"white\")\n # image_color_mode_text.grid(row = 2, column = 1)\n\n color_mode_combo_box = ttk.Combobox(toplevel_create_image, values = [\n \"RGB\",\n \"RGBA\",\n \"CYMA\"\n ])\n color_mode_combo_box.current(0)\n color_mode_combo_box.grid(row = 2, column = 1)\n\n # background color\n image_background_color_label = mw.tkinter.Label(toplevel_create_image, text = \"red: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_background_color_label.grid(row = 3, column = 0, padx = 30)\n\n image_background_color_text_red = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_background_color_text_red.grid(row = 3, column = 1)\n# -----------------------------------------------------------------------------------------------------------\n image_background_color_label = mw.tkinter.Label(toplevel_create_image, text = \"green: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_background_color_label.grid(row = 4, column = 0, padx = 30)\n\n image_background_color_text_green = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_background_color_text_green.grid(row = 4, column = 1)\n\n# ----------------------------------------------------------------------------------------------------------\n image_background_color_label = mw.tkinter.Label(toplevel_create_image, text = \"blue: \",\n font = (\"\", 15), bg = \"#a1a1a1\", fg = \"white\")\n image_background_color_label.grid(row = 5, column = 0, padx = 30)\n\n image_background_color_text_blue = mw.tkinter.Text(toplevel_create_image, width = 20, height = 2,\n bg = \"grey\", fg = \"white\")\n image_background_color_text_blue.grid(row = 5, column = 1)\n # create_new_image\n def create_new_image():\n new_image = Image.new(mode = (color_mode_combo_box.get()),\n size = (int(image_width_text_box.get(\"1.0\", mw.tkinter.END)),\n int(image_height_text_box.get(\"1.0\", mw.tkinter.END)) ),\n color = ((int(image_background_color_text_red.get(\"1.0\", mw.tkinter.END))),\n (int(image_background_color_text_green.get(\"1.0\", mw.tkinter.END))),\n (int(image_background_color_text_blue.get(\"1.0\", mw.tkinter.END)))))\n # get new image properties\n the_width, the_height = new_image.size\n # to-do clear the image\n new_image_compatible = ImageTk.PhotoImage(new_image)\n # load the image on the canvas \n app.default_image_canvas.delete(\"all\")\n # app.default_image_canvas = mw.tkinter.Canvas(toplevel_create_image, height = the_height, width = the_width)\n app.default_image_canvas.create_image(0,0, image = new_image_compatible, anchor = mw.tkinter.NW)\n # app.load_image_canvas.grid(row = 0, column = 1)\n toplevel_create_image.destroy()\n # to-do - correct error\n app.default_image_canvas.configure(mw.MainWindow)\n \n\n\n # create_image_button\n create_image_button = mw.tkinter.Button(toplevel_create_image, text = \"create image\",\n bg = \"grey\", fg = \"white\", font = (\"times\", 13,\"bold\"),\n command = create_new_image)\n create_image_button.grid(row = 6, column = 0, padx = 40, pady = 20)", "def draw_boxes(self, image, boxes):\n return draw_boxes(image, boxes, self.labels)", "def initrows(self):\n #~ self.initrows2()\n self.rows=[]\n for yy in range(self.height):\n row=[]\n for xx in range(self.width):\n if (xx,yy) in self.allsqs:\n row.append(0)\n #~ elif p in self.gatesqs:\n #~ row.append(0)\n else:\n row.append(1)\n self.rows.append(row)", "def rescale_box(box, img_size_orig, img_size_new):\n orig_w, orig_h = img_size_orig\n new_w, new_h = img_size_new\n scale_x = new_w / orig_w\n scale_y = new_h / orig_h\n sx, sy, ex, ey = box\n return [sx * scale_x, sy * scale_y, ex * scale_x, ey * scale_y]", "def __getitem__(self, index):\n image_id = self.image_ids[index]\n\n filename = self.image_id_to_filename[image_id]\n image_path = os.path.join(self.image_dir, filename)\n\n with open(image_path, 'rb') as f:\n with PIL.Image.open(f) as image:\n WW, HH = image.size\n image = self.transform(image.convert('RGB'))\n\n H, W = self.image_size\n objs, boxes, masks = [], [], []\n\n for object_data in self.image_id_to_objects[image_id]:\n # objs.append(object_data['category_id'])\n objs.append(int(object_data.find('name').get(\"id\")))\n\n bndbox = object_data.findall('bndbox')[0]\n xmin = int(bndbox.find('xmin').text)\n ymin = int(bndbox.find('ymin').text)\n xmax = int(bndbox.find('xmax').text)\n ymax = int(bndbox.find('ymax').text)\n w = xmax - xmin\n h = ymax - ymin\n\n boxes.append(torch.FloatTensor([xmin, ymin, xmax, ymax]))\n\n # This will give a numpy array of shape (HH, WW)\n mask = torch.zeros(1, H, W)\n # mask = seg_to_mask(object_data['segmentation'], WW, HH)\n mask[:, round(ymin * H):max(round(ymin * H) + 1, round(ymax * H)),\n round(xmin * W):max(round(xmin * W) + 1, round(xmax * W))] = 1\n masks.append(mask)\n # shuffle objs\n O = len(objs)\n rand_idx = list(range(O))\n random.shuffle(rand_idx)\n\n objs = [objs[i] for i in rand_idx]\n boxes = [boxes[i] for i in rand_idx]\n masks = [masks[i] for i in rand_idx]\n\n objs = torch.LongTensor(objs)\n boxes = torch.stack(boxes, dim=0)\n masks = torch.stack(masks, dim=0)\n\n # print(image_path)\n\n return image, objs, boxes, masks", "def show_field(self, vehicles, type):\n\n # starting pixels x = 0, y = 0 on field image\n start_x = 78\n start_y = 45\n\n # block pixel width is slightly different per field size\n if self.size == 6:\n block_width = 72\n elif self.size == 9:\n block_width = 69\n elif self.size == 12:\n block_width = 68.5\n\n field = plt.imread(f\"data/RushHourImages/RushHour{self.size}.jpg\")\n fig, ax = plt.subplots()\n plt.imshow(field)\n plt.axis('off')\n\n for vehicle in vehicles:\n if vehicle.orientation == 'H':\n x = start_x + (vehicle.x * block_width)\n y = start_y + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck{vehicle.id}.png\")\n\n # truck: the image coordinate is his middle, which changes with the length of the car\n x += 40\n\n if vehicle.orientation == 'V':\n x = start_y + (vehicle.x * block_width)\n y = start_x + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car-rotated{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck-rotated{vehicle.id}.png\")\n y += 40\n\n if self.size == 6:\n imagebox = OffsetImage(car, zoom=0.6)\n elif self.size == 9:\n imagebox = OffsetImage(car, zoom=0.4)\n elif self.size == 12:\n imagebox = OffsetImage(car, zoom=0.3)\n\n imagebox.image.axes = ax\n xy = (x, y)\n ab = AnnotationBbox(imagebox, xy, frameon=False)\n ax.add_artist(ab)\n\n if type == True:\n plt.show(block=False)\n plt.pause(0.001)\n plt.close()\n else:\n plt.show()", "def proposal_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None, train_anchor_inds=None, proposals=None):\n assert proposals is not None\n rois = filter_roi_proposals(proposals[:, 2:].data.contiguous(), proposals[:, 1].data.contiguous(), np.array([2000] * len(im_sizes)), nms_thresh=0.7, pre_nms_topn=12000 if self.training and self.mode == 'rpntrain' else 6000, post_nms_topn=2000 if self.training and self.mode == 'rpntrain' else 1000)\n if self.training:\n all_rois, labels, bbox_targets = proposal_assignments_det(rois, gt_boxes.data, gt_classes.data, image_offset, fg_thresh=0.5)\n all_rois = torch.cat((all_rois, Variable(rois)), 0)\n else:\n all_rois = Variable(rois, volatile=True)\n labels = None\n bbox_targets = None\n rpn_scores = None\n rpn_box_deltas = None\n rel_labels = None\n return all_rois, labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels", "def _to_image_coords(self, boxes, height, width):\n box_coords = np.zeros_like(boxes)\n box_coords[:, 0] = boxes[:, 0] * height\n box_coords[:, 1] = boxes[:, 1] * width\n box_coords[:, 2] = boxes[:, 2] * height\n box_coords[:, 3] = boxes[:, 3] * width\n \n return box_coords", "def boxplots(self, groups, nrows, ncols, type):\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6, 9.3), sharey=\"row\")\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\n for ax, (effs, dat) in zip(axs, groups):\n ax[0].boxplot(dat[\"perm\"])\n ax[1].boxplot(dat[\"t_test\"])\n ax[0].set_ylabel(\"Errors\")\n if type == \"es\":\n ax[0].set_title(f\"Effect size = {effs}, Test = Perm\")\n ax[1].set_title(f\"Effect size = {effs}, Test = t-test\")\n elif type == \"samp2\":\n ax[0].set_title(f\"Sample size = {effs}, Test = Perm\")\n ax[1].set_title(f\"Sample size = {effs}, Test = t-test\")\n\n\n plt.tight_layout()\n #plt.show()", "def draw_bboxes_withindex(img,boxes, uids):\n source = Image.fromarray(img)\n draw = ImageDraw.Draw(source)\n w2,h2 = (img.shape[0],img.shape[1])\n \n font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSerif.ttf', 40)\n #font = ImageFont.truetype('arial.ttf', 24)\n\n\n idx = 0\n\n for b in boxes:\n xmin,ymin,xmax,ymax = b\n \n for j in range(3):\n draw.rectangle(((xmin+j, ymin+j), (xmax+j, ymax+j)), outline=\"red\")\n draw.text((xmin+20, ymin+70), str(uids[idx]), font = font)\n idx +=1\n return source", "def load_boxes(self, data):\r\n\r\n # worldbox represents the total map area\r\n self.worldbox = self.Box((0, 0), (len(data[0]) * self.cellwidth, len(data) * self.cellwidth))\r\n\r\n # create a box corresponding to each character/cell in the map file\r\n tl_x = 0\r\n tl_y = 0\r\n for row in data:\r\n for cell in row:\r\n if cell == \".\":\r\n self.wallboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n elif cell == \"x\":\r\n self.targetboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n tl_x += self.cellwidth\r\n tl_x = 0\r\n tl_y += self.cellwidth", "def layout_graphics(self):\n # Graphics layout object to place viewboxes in\n self.g_layout = pg.GraphicsLayoutWidget(border=(80, 80, 80))\n self.g_layout.setCursor(QtCore.Qt.CrossCursor)\n\n # Viewboxes for images\n # aspect locked so that pixels are square\n # y inverted so that (0,0) is top left as in Thorlabs software\n options = {\"lockAspect\":True, \"invertY\":True}\n self.vb_image = self.g_layout.addViewBox(row=0, col=0, rowspan=2, **options)\n self.vb_zoom = self.g_layout.addViewBox(row=0, col=2, **options)\n self.vb_residuals = self.g_layout.addViewBox(row=1, col=2, **options)\n\n # Link zoom and residual views\n self.vb_zoom.setXLink(self.vb_residuals)\n self.vb_zoom.setYLink(self.vb_residuals)\n\n # Viewboxes for slice data\n # Both boxes have mouse disabled - range is fixed so we don't want to\n # scale them accidentally\n # Y box has y inverted to match the main image\n # Y box has x inverted so that zero pixel value is far from the image\n options = {\"enableMouse\":False, \"enableMenu\": False}\n self.vb_x = self.g_layout.addViewBox(row=2, col=0, **options)\n self.vb_y = self.g_layout.addViewBox(row=0, col=1, rowspan=2,\n invertX=True, invertY=True, **options)\n\n # Link the slice axes to the main image so that when we zoom/pan the\n # main image, our slices zoom/pan also\n self.vb_x.setXLink(self.vb_image)\n self.vb_y.setYLink(self.vb_image)\n\n # Disable autoscaling and fix range to maximum pixel intensity\n self.vb_x.setRange(yRange=(0,255))\n self.vb_y.setRange(xRange=(0,255))\n self.vb_x.disableAutoRange(axis=self.vb_x.YAxis)\n self.vb_y.disableAutoRange(axis=self.vb_y.XAxis)\n\n # Background color must not be black so that we can see where images\n # start/end\n color = pg.mkColor(40,40,40)\n self.vb_image.setBackgroundColor(color)\n self.vb_zoom.setBackgroundColor(color)\n self.vb_residuals.setBackgroundColor(color)\n self.vb_x.setBackgroundColor(color)\n self.vb_y.setBackgroundColor(color)\n self.g_layout.setBackground(color)\n\n self.vb_image.addItem(self.image)\n self.vb_image.addItem(self.fit_v_line)\n self.vb_image.addItem(self.fit_h_line)\n self.vb_image.addItem(self.mark_v_line)\n self.vb_image.addItem(self.mark_h_line)\n # self.vb_image.addItem(self.cursor_text)\n self.vb_image.addItem(self.cursor_delta)\n self.vb_image.addItem(self.beam_delta)\n self.vb_image.addItem(self.history_plot)\n # Figure out how to overlay properly?\n # self.vb_image.addItem(self.x_slice)\n # self.vb_image.addItem(self.x_fit)\n # self.vb_image.addItem(self.y_slice)\n # self.vb_image.addItem(self.y_fit)\n self.vb_zoom.addItem(self.zoom)\n self.vb_zoom.addItem(self.fit_maj_line)\n self.vb_zoom.addItem(self.fit_min_line)\n self.vb_zoom.addItem(self.zoom_text)\n self.vb_residuals.addItem(self.residuals)\n self.vb_residuals.addItem(self.residuals_text)\n self.vb_x.addItem(self.x_slice)\n self.vb_x.addItem(self.x_fit)\n self.vb_x.addItem(self.cursor_v)\n self.vb_y.addItem(self.y_slice)\n self.vb_y.addItem(self.y_fit)\n self.vb_y.addItem(self.cursor_h)\n\n self.res_legend.setParentItem(self.vb_residuals)\n self.cursor_text.setParentItem(self.vb_image)\n\n self.vb_image.setRange(QtCore.QRectF(0, 0, 1280, 1024))\n self.vb_zoom.setRange(QtCore.QRectF(0, 0, 50, 50))\n self.vb_residuals.setRange(QtCore.QRectF(0, 0, 50, 50))\n\n #\n # Size hints below here\n #\n self.g_layout.ci.layout.setColumnStretchFactor(0, 4)\n self.g_layout.ci.layout.setColumnStretchFactor(1, 1)\n self.g_layout.ci.layout.setColumnStretchFactor(2, 2)\n self.g_layout.ci.layout.setRowStretchFactor(0, 2)\n self.g_layout.ci.layout.setRowStretchFactor(1, 2)\n self.g_layout.ci.layout.setRowStretchFactor(2, 1)\n\n self.vb_x.setMinimumHeight(50)\n self.vb_y.setMinimumWidth(50)\n self.vb_x.setMaximumHeight(100)\n self.vb_y.setMaximumWidth(100)\n self.vb_image.setMinimumSize(640, 512)\n self.vb_zoom.setMinimumSize(320, 320)\n self.vb_residuals.setMinimumSize(320, 320)\n\n self.g_layout.setMinimumSize(1100,562)", "def plantGrid(name, minimum, maximum, spacing):\n name = sys.argv[1]\n minimum = sys.argv[2]\n maximum = sys.argv[3]\n minimum = int(minimum)\n maximum = int(maximum)\n #convert to flux\n min1 = conversion(minimum)\n max1 = conversion(maximum)\n min1 = int(min1)\n max1 = int(max1)\n #brightness = [random.uniform(min1, max1) for _ in xrange(len(position))]\n #print brightness\n spacing = sys.argv[4]\n spacing = float(spacing)\n print len(data1)\n #create the position array for x values\n x = np.arange(6, len(data1), spacing)\n #create the position array for y values\n y = np.arange(6, len(data1), spacing)\n x2 = np.arange(6, len(data1), spacing)\n y2 = np.flipud(y)\n \n #combine both arrays to form a grid\n position = np.column_stack((x,y))\n position2 = np.column_stack((x2,y2))\n \n #combine both lines of grid to one array\n position = np.concatenate((position, position2), axis = 0)\n \n #create a random brightness array between the min and max values\n brightness = np.array([random.uniform(min1, max1) for _ in range(0,len(position))])\n \n #add to image file and subtract\n fakestars.addtofits(name, out_file, psf, position, brightness, coordsys, verbose)\n fakestars.addtofits(name, outfile2, psf, position, brightness, coordsys, verbose)\n imarith.imsubtract(out_file, outfile2, differenceFile, clobber=True)", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def scale_box(box, img_size):\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]", "def initialize(self, frame):\n self.grid_size = 5\n\n Label(frame, text=\"Grid Size:\").grid(row=0)\n\n self.e1 = Scale(frame, from_=self.grid_size, to=25, orient=HORIZONTAL)\n self.e1.grid(row=0, column=1)\n\n return self.e1", "def make(self) -> None:\n\n # arbitrarily selecting the first image from the list, index 0\n with Image.open(self.image_list[0]) as first_frame_image_in_list:\n\n # Find the width and height of the first image of the list.\n # Assuming all the images have same size.\n frame_image_width, frame_image_height = first_frame_image_in_list.size\n\n # scale is the ratio of collage_image_width and product of\n # images_per_row_in_collage with frame_image_width.\n\n # The scale will always lie between 0 and 1, which implies that\n # the images are always going to get downsized.\n scale = (self.collage_image_width) / (\n self.images_per_row_in_collage * frame_image_width\n )\n\n # Calculating the scaled height and width for the frame image.\n scaled_frame_image_width = ceil(frame_image_width * scale)\n scaled_frame_image_height = ceil(frame_image_height * scale)\n\n # Divide the number of images by images_per_row_in_collage. The later\n # was calculated by taking the square root of total number of images.\n number_of_rows = ceil(self.number_of_images / self.images_per_row_in_collage)\n\n # Multiplying the height of one downsized image with number of rows.\n # Height of 1 downsized image is product of scale and frame_image_height\n # Total height is number of rows times the height of one downsized image.\n self.collage_image_height = ceil(scale * frame_image_height * number_of_rows)\n\n # Create an image of passed collage_image_width and calculated collage_image_height.\n # The downsized images will be pasted on this new base image.\n # The image is 0,0,0 RGB(black).\n collage_image = Image.new(\n \"RGB\", (self.collage_image_width, self.collage_image_height)\n )\n\n # keep track of the x and y coordinates of the resized frame images\n i, j = (0, 0)\n\n # iterate the frames and paste them on their position on the collage_image\n for count, frame_path in enumerate(self.image_list):\n\n # Set the x coordinate to zero if we are on the first column\n # If self.images_per_row_in_collage is 4\n # then 0,4,8 and so on should have their x coordinate as 0\n if (count % self.images_per_row_in_collage) == 0:\n i = 0\n\n # open the frame image, must open it to resize it using the thumbnail method\n frame = Image.open(frame_path)\n\n # scale the opened frame images\n frame.thumbnail(\n (scaled_frame_image_width, scaled_frame_image_height), Image.ANTIALIAS\n )\n\n # set the value of x to that of i's value.\n # i is set to 0 if we are on the first column.\n x = i\n\n # It ensures that y coordinate stays the same for any given row.\n # The floor of a real number is the largest integer that is less\n # than or equal to the number. floor division is used because of\n # the zero based indexing, the floor of the division stays same\n # for an entier row as the decimal values are negled by the floor.\n # for the first row the result of floor division is always zero and\n # the product of 0 with scaled_frame_image_height is also zero, they\n # y coordinate for the first row is 0.\n # For the second row the result of floor division is one and the prodcut\n # with scaled_frame_image_height ensures that the y coordinate is\n # scaled_frame_image_height below the first row.\n y = (j // self.images_per_row_in_collage) * scaled_frame_image_height\n\n # paste the frame image on the newly created base image(base image is black)\n collage_image.paste(frame, (x, y))\n frame.close()\n\n # increase the x coordinate by scaled_frame_image_width\n # to get the x coordinate of the next frame. unless the next image\n # will be on the very first column this will be the x coordinate.\n i = i + scaled_frame_image_width\n\n # increase the value of j by 1, this is to calculate the y coordinate of\n # next image. The increased number will be floor divided by images_per_row_in_collage\n # therefore the y coordinate stays the same for any given row.\n j += 1\n\n # save the base image with all the scaled frame images embeded on it.\n collage_image.save(self.output_path)\n collage_image.close()", "def _build_meds_layout(self):\n\n\n nim = self.image_info.size\n nobj = self.obj_data.size\n\n trim_to_coadd = self.get('trim_to_coadd',False)\n if trim_to_coadd:\n print(' trimming to coadd')\n coadd_wcs, coadd_pos, coadd_bnds, coadd_q = \\\n self._get_pos_and_bounds(self.obj_data, 0)\n in_bnds = coadd_bnds.contains_points(coadd_pos['zrow'], coadd_pos['zcol'])\n w_in_bnds, = np.where(in_bnds == True)\n assert w_in_bnds.size > 0,\"none found in coadd\"\n\n w_in_bnds = coadd_q[w_in_bnds]\n self.obj_data = self.obj_data[w_in_bnds]\n\n self._do_psf_setup()\n\n # box sizes are even\n half_box_size = self.obj_data['box_size']//2\n\n for file_id in range(nim):\n\n wcs, pos, bnds, q = self._get_pos_and_bounds(self.obj_data, file_id)\n\n # do the test\n in_bnds = bnds.contains_points(pos['zrow'], pos['zcol'])\n q_rc, = np.where(in_bnds == True)\n print(' second cut: %6d of %6d objects' % (len(q_rc),len(q)))\n\n # now make sure everything is there\n if self['check_in_first_image']:\n if file_id == 0 and len(self.obj_data['ra']) != len(q_rc):\n raise MEDSCreationError('Not all objects were found in first image for '\n 'MEDS making (which is the coadd/detection '\n 'image by convention).')\n # compose them\n q = q[q_rc]\n\n # fill in the object_data structure\n\n # note q_rc since pos was created using obj_data[q]\n qrow = pos['zrow'][q_rc]\n qcol = pos['zcol'][q_rc]\n\n icut = self.obj_data['ncutout'][q]\n self.obj_data['file_id'][q,icut] = file_id\n self.obj_data['orig_row'][q,icut] = qrow\n self.obj_data['orig_col'][q,icut] = qcol\n\n # this results in the object center being close to\n # the natural center (dim-1.)/2.\n ostart_row = qrow.astype('i4') - half_box_size[q] + 1\n ostart_col = qcol.astype('i4') - half_box_size[q] + 1\n crow = qrow - ostart_row\n ccol = qcol - ostart_col\n\n self.obj_data['orig_start_row'][q,icut] = ostart_row\n self.obj_data['orig_start_col'][q,icut] = ostart_col\n self.obj_data['cutout_row'][q,icut] = crow\n self.obj_data['cutout_col'][q,icut] = ccol\n\n # do jacobian, in original, not-offset coords\n # note q_rc since pos was created using self.obj_data[q]\n jacob = wcs.get_jacobian(\n x=pos['wcs_col'][q_rc],\n y=pos['wcs_row'][q_rc])\n\n # jacob is a tuple of arrays\n self.obj_data['dudcol'][q,icut] = jacob[0]\n self.obj_data['dudrow'][q,icut] = jacob[1]\n self.obj_data['dvdcol'][q,icut] = jacob[2]\n self.obj_data['dvdrow'][q,icut] = jacob[3]\n\n # increment\n self.obj_data['ncutout'][q] += 1\n\n w,=np.where(self.obj_data['ncutout'] > 0)\n print('%d/%d had ncut > 0' % (w.size, self.obj_data.size))\n #self.obj_data = self.obj_data[w]\n\n self.obj_data = self._make_resized_data(self.obj_data)\n print('setting number field as sequential')\n self.obj_data['number'] = 1+np.arange(self.obj_data.size)\n\n\n self._set_start_rows_and_pixel_count()\n\n if self['survey']=='cosmos':\n self._set_psf_layout_hst()\n else:\n self._set_psf_layout_psfex()", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3" ]
[ "0.62900573", "0.6085298", "0.6009164", "0.58949316", "0.58541226", "0.58446145", "0.5767578", "0.5712032", "0.5706543", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5668548", "0.5650026", "0.5615289", "0.5615289", "0.5593645", "0.55755764", "0.55713177", "0.55709535", "0.5566756", "0.5561757", "0.55550766", "0.55439216", "0.55324256", "0.5499715", "0.5493037", "0.5491024", "0.5488773", "0.54860556", "0.5479351", "0.5477189", "0.54661876", "0.54654974", "0.5462513", "0.5436562", "0.54347384", "0.5429081", "0.54147685", "0.5409667", "0.54019666", "0.5395863", "0.5394675", "0.5392269", "0.5371903", "0.53668004", "0.53408337", "0.53349805", "0.533052", "0.53242874", "0.52984256", "0.529094", "0.5273975", "0.5269214", "0.5264517", "0.5261543", "0.5260264", "0.5256314", "0.52466404", "0.52401096", "0.52370954", "0.52364236", "0.52281755", "0.52137214", "0.52120256", "0.5209313", "0.52089477", "0.5208163", "0.5206278", "0.52018976", "0.51996654", "0.51974803", "0.5184914", "0.51843166", "0.51824045", "0.5175214", "0.517447", "0.51738864", "0.517314", "0.51640254", "0.5162107", "0.51584274", "0.5151213", "0.51493704", "0.5145289", "0.5140283", "0.51401466", "0.51368606", "0.5136106", "0.51344085", "0.51342916", "0.5132771", "0.5125228", "0.5124148", "0.512393", "0.512312", "0.5120537", "0.51197904" ]
0.6761654
0
read the cosmos catalog
def _read_catalog(self, catname): print('loading catalog:',catname) with fitsio.FITS(catname,lower=True) as fits: #cat = fits[1][100000:110000] if 'object_data' in fits: print('reading from MEDS object data') ext='object_data' else: ext=1 cat = fits[ext][:] # one cut here based on if we matched to the galsim cat w, = np.where( #(cat['mu_class'] < 3) #& #(cat['mask']==0) #& (cat['gscosmos_index'] >= 0) ) print('initial cuts %d/%d %g%%' % (w.size,cat.size,w.size/cat.size*100)) cat = cat[w] return cat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_catalog(catalog):\n with open(catalog, \"r\") as f:\n header = f.readline()\n if header.startswith('#EventID | Time | Latitude | Longitude | Depth/km'):\n catalog = _read_iris(f)\n elif header.startswith('time, latitude, longitude, depth, depthUnits, magnitude'):\n catalog = _read_sod(f)\n else:\n sys.exit(\"Unknown catalog format\")\n return catalog", "def getCatalogs():", "def getCatalog(unique_name):", "def read_catalog():\n categories = session.query(Category).all()\n items = session.query(CatalogItem).order_by(CatalogItem.id.desc())\n quantity = items.count()\n return categories, items, quantity", "def catalog(self) -> str:\n return pulumi.get(self, \"catalog\")", "def get_catalog():\n return jsonify(getCatalog())", "def sample():\n write_drive_catalog_file(\"j:\\\\\", \"SANSA2_1G\", r\"c:\\SANSA2_1G.txt\")\n write_drive_catalog_file(\"k:\\\\\", \"8GB\", r\"c:\\8GB.txt\")\n write_master_catalog_file([r\"c:\\SANSA2_1G.txt\", r\"c:\\8GB.txt\"], r\"c:\\Master_Catalog.txt\")\n entries = read_catalog_file_entries(r\"c:\\Master_Catalog.txt\")\n for entry in entries:\n println(entry_to_line(entry))", "def testCosmologyCatalog(self):\n dbObj = myTestGals(database=self.dbName)\n cat = cosmologicalGalaxyCatalog(dbObj)\n cat.write_catalog(self.catName)", "def _get_catalog_object(self):\n return self.cluster.catalogd.service.read_debug_webpage(\n \"catalog_object?object_type=TABLE&object_name=functional.alltypes\")", "def get_catalog(self) -> Dict[str, str]:\n return self.catalog", "def load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path, \n index_col=0, parse_dates=True)\n self.unique_years = self.catalog.index.year.unique()\n return", "def loadData(catalog):\n return controller.loadData(catalog)", "def loadData(catalog):\n return controller.loadData(catalog)", "def test_api_ucs_get_catalog(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n total_elements = 0\n for elementTypes in api_data[\"json\"]:\n for element in api_data[\"json\"][str(elementTypes)]:\n api_data_c = request(\"get\", \"/catalog\",\n query={\"identifier\": element[\"relative_path\"].strip(\"/\")})\n self.assertEqual(api_data_c['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' +\n str(api_data_c['status']))\n total_elements += 1\n self.assertGreater(total_elements, 0, \"Zero catalog elements found\")\n # TO DO: deeper check on the catalog data", "def read_catalogs():\n if not StateHolder.config_parsed:\n config = YamlUtils.read(file=StateHolder.catalog_config_file, doc=Doc.CATALOGS_CONFIG)\n\n if not type(config) is dict:\n config['default'] = {}\n StateHolder.config = dict(config)\n StateHolder.config_parsed = True", "def loadData(catalog):\r\n controller.loadData(catalog)", "def readDataFromCosmosDB(self):\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaleup_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n # read all the data from cosmos DB with encrypted fields and store in a data frame\n df = spark.read.format(\"com.microsoft.azure.cosmosdb.spark\").options(\n **self.config.get_hash_readconfig()).load()\n\n # iterate over the dataframe and decrypt and replace all fields except the cosmos db system fields strating\n # with \"_\" and the key --> id field since its hashed not encrypted and also not the partition field\n df = df.repartition(160).cache()\n dec_udf = udf(decrypt)\n\n for columns in df.columns:\n if columns.startswith('_') or columns.startswith('id') or columns.startswith('partition'):\n print('not to be encrypted field: ' + columns)\n else:\n print('to be encrypted field: ' + columns)\n df = df.withColumn(columns, dec_udf(df[columns]))\n print(\"succesfully decrypted the fields in spark df data frame\")\n\n # Register the DataFrame as a SQL temporary view\n df = df.repartition(1).cache()\n # df.persist(StorageLevel.DISK_ONLY_2)\n df.createOrReplaceTempView(\"customer\")\n spark.sql(\"CACHE TABLE customer\").collect()\n\n print(\"succesfully read \" + str(df.count()) +\n \" records from CosmosDB and saved in spark df data frame\")\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaledown_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n return df", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def get_catalog(self) -> Catalog:\n params: Dict[str, Any] = self._status.get_status_info()\n\n response = self._client.open_api_do(\n \"GET\", \"labels/catalogs\", self.dataset_id, params=params\n ).json()\n return Catalog.loads(response[\"catalog\"])", "def print_catalog(self):\n # first download the json for the catalog\n self.download_json()\n\n # open the saved json file and load the json\n with self.file.open(\"r\") as catalog_file:\n pages = json.load(catalog_file)\n\n # the catalog json is just a list of pages\n # so we begin by iterating through the pages\n for page_num in range(len(pages)):\n # get each page\n page = pages[page_num]\n\n # get the threads on each page\n threads = page[\"threads\"]\n\n # print the page heading\n print(\"*** PAGE \", page_num + 1, \"***\")\n\n # iterate through the threads on each page\n for thread_num in range(len(threads)):\n # get each thread\n thread = threads[thread_num]\n\n # print the thread number\n num = thread[\"no\"]\n print(\"---\", \"Thread:\", num, \"---\")\n\n # not all threads have a subject or comment\n try:\n subject = thread[\"sub\"]\n comment = thread[\"com\"]\n\n print(\"Sub:\", subject)\n print(\"Comment:\", comment)\n except KeyError:\n print(\"N/A\")", "def read_combined_star_catalog(params,log):\n\n if path.isfile(params['catalog_file']) == False:\n\n return np.zeros(1)\n\n hdulist = fits.open(params['catalog_file'])\n\n data = hdulist[1].data\n\n header = hdulist[0].header\n\n star_catalog = Table(data)\n\n data = hdulist[2].data\n\n image_trios = Table(data)\n\n log.info('Read data from combined colour star catalog')\n\n return star_catalog, image_trios, header", "def list_detail_catalog(self, catalog_name):\n # list catalog\n self._list_catalog(catalog_name)\n # detail catalog\n self._details_catalog(catalog_name)", "def load_data(catalog):\n controller.load_data(catalog)", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def checkCatalogs():\n url = CHECKBASE % 'catalogs'\n catalogs = []\n try:\n fh = getURLHandle(url)\n #fh = urllib2.urlopen(url)\n data = fh.read()\n dom = minidom.parseString(data)\n fh.close()\n catalog_elements = dom.getElementsByTagName('Catalog')\n for catel in catalog_elements:\n if catel.firstChild is None:\n continue\n catalog = catel.firstChild.data.strip()\n if len(catalog):\n catalogs.append(str(catalog))\n except:\n raise Exception,\"Could not open %s to search for list of catalogs\" % url\n return catalogs", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def list(logger, client):\n logger.info('Retrieving Cloudify License')\n license = client.license.list()\n print_data(LICENSE_COLUMN, license, 'Cloudify License')", "async def get_catalog(self, board_id):\n\n route = f'{board_id}/catalog'\n\n data = await self.interact(route)\n\n value = Asset(data)\n\n return value", "def get_catalog(self):\n\n rep = req.get_json(self.CATALOG)\n repo_list = rep[\"repositories\"]\n\n for repo in repo_list:\n self.list.append(Repository(repo))\n\n return self.list", "def test_catalog_opds(self):\n client = Client()\n response = client.get('/catalog.atom/')\n print 'status code for catalog in opds', response.status_code\n self.failUnlessEqual(response.status_code, 200)", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)", "def init_catalog():\n return controller.init_catalog()", "def get_catalog(self, command):\n return self._catalogs.get(str(command))", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "def test_get_catalogue(self):\n s1 = System()\n self.assertEqual(len(s1.get_catalogue()), 0)", "def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)", "def catalog(self) -> TNSCatalog:\n if not self.__catalog:\n self.__catalog = TNSCatalog.from_web(cache=True)\n return self.__catalog\n else:\n self.__catalog.refresh()\n return self.__catalog", "def dcos_aws() -> None:", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def print_catalog(self):\n for book in self.books.keys():\n print(book)", "def show_catalogue(self):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock, location \n FROM catalogue WHERE vendorname = ?\"\"\", (self.vendorname,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Unit Cost\", \"Stock\", \"Location\"]))", "def test_getDigitalObjects(self):\n cases = [\n (self.test_eac + 'NE00001.xml', 0),\n (self.test_eac + 'NE00100.xml', 1),\n (self.test_eac + 'NE01101.xml', 15),\n (self.test_eac + 'NE01400.xml', 1),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n self.assertNotEqual(doc, None)\n result = doc.getDigitalObjects()\n self.assertNotEqual(result, None)\n self.assertEqual(len(result), expected)", "def catalogs(env):\n envs = environments()\n check_env(env, envs)\n\n if app.config['ENABLE_CATALOG']:\n nodenames = []\n catalog_list = []\n query = AndOperator()\n\n if env != '*':\n query.add(EqualsOperator(\"catalog_environment\", env))\n\n query.add(NullOperator(\"catalog_timestamp\", False))\n\n order_by_str = '[{\"field\": \"certname\", \"order\": \"asc\"}]'\n nodes = get_or_abort(puppetdb.nodes,\n query=query,\n with_status=False,\n order_by=order_by_str)\n nodes, temp = tee(nodes)\n\n for node in temp:\n nodenames.append(node.name)\n\n for node in nodes:\n table_row = {\n 'name': node.name,\n 'catalog_timestamp': node.catalog_timestamp\n }\n\n if len(nodenames) > 1:\n form = CatalogForm()\n\n form.compare.data = node.name\n form.against.choices = [(x, x) for x in nodenames\n if x != node.name]\n table_row['form'] = form\n else:\n table_row['form'] = None\n\n catalog_list.append(table_row)\n\n return render_template(\n 'catalogs.html',\n nodes=catalog_list,\n envs=envs,\n current_env=env)\n else:\n log.warn('Access to catalog interface disabled by administrator')\n abort(403)", "def retrieve_catalog(self, catalog_hash):\n return self.repository.retrieve_catalog(catalog_hash)", "def read_all():\n # Create the list of CIs from our data\n ci = db.session.query(CI).order_by(CI.id).all()\n app.logger.debug(pformat(ci))\n # Serialize the data for the response\n ci_schema = CISchema(many=True)\n data = ci_schema.dump(ci)\n return data", "def get_items_for_catalog(catalog_id):\n pass", "def get(self):\n return GenericGet().get_catalogs()", "def get_scnlist_con2ard(self):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True,\n EDDSentinel1ASF.ARDProduct == False,\n EDDSentinel1ASF.Invalid == False).order_by(\n EDDSentinel1ASF.Acquisition_Date.asc()).all()\n\n scns2ard = list()\n if query_result is not None:\n for record in query_result:\n scns2ard.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns2ard", "def get_catalog(self, user_id, tenant_id, metadata=None):\n raise exception.NotImplemented() # pragma: no cover", "def read(self, path):\n client = self.connect(VAULT_TOKEN)\n return client.read(path)", "def read_data(term):\n course_url = '/admweb/!SWKSECX.main?term='+ term + '&title=&course=&crn=&coll=&dept=&subj='\n connection = httplib.HTTPSConnection('courses.rice.edu', 443)\n connection.connect()\n connection.request(\n 'GET',\n course_url\n )\n courses_xml = connection.getresponse()\n return courses_xml", "def final_catalogs(self, filename=None, catalog_cols=None):\n\n final_catalog = vstack([cluster_info['catalog'] for cluster_info in self._catalog_dictionary.values()])\n\n # If we request to keep only certain columns in our output\n if catalog_cols is not None:\n final_catalog.keep_columns(catalog_cols)\n\n if filename is None:\n return final_catalog\n else:\n if filename.endswith('.cat'):\n final_catalog.write(filename, format='ascii', overwrite=True)\n else:\n final_catalog.write(filename, overwrite=True)", "def getcatalogs():\n \n # default path for the gthumb catalogs of the logged in user\n gpath = os.environ['HOME'] + \"/.local/share/gthumb/catalogs\"\n\n cats = [] \n cat_list = [] \n try:\n # dir_list has all files and directories in path\n # directories are WITHOUT ending '/'\n dir_list = os.listdir(gpath)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getcatalogs, gpath:\", gpath\n return []\n \n # get only the directories \n for line in dir_list:\n file = gpath + \"/\" + line\n #print file \n if os.path.isdir(file):\n cats.append(file)\n else: \n # not a directory; ignore \n #print \"not a directory:\", file \n pass\n\n # now get each catalog file from each directory\n for cat in cats:\n try:\n # dir_list has all files and directories in path\n # any directory is WITHOUT ending '/'\n dir_list = os.listdir(cat)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getcatalogs, cat:\", cat\n return []\n \n for line in dir_list:\n file = cat + \"/\" + line\n #print os.path.splitext(file)[1][1:]\n # append file only if it has catalog extension\n if os.path.splitext(file)[1][1:] == \"catalog\":\n cat_list.append(file)\n \n cat_list.sort() \n\n if random_mode:\n random.shuffle(cat_list)\n \n return cat_list", "async def getCollectionDetail(self, slug=None):\n payload = {}\n \n if slug:\n payload[\"slug\"] = slug\n \n\n # Parameter validation\n schema = CatalogValidator.getCollectionDetail()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/{slug}/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"slug\",\"description\":\"A `slug` is a human readable, URL friendly unique identifier of an object. Pass the `slug` of the collection which you want to retrieve.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"slug\",\"description\":\"A `slug` is a human readable, URL friendly unique identifier of an object. Pass the `slug` of the collection which you want to retrieve.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", slug=slug)\n query_string = await create_query_string(slug=slug)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/collections/{slug}/\", slug=slug), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def catalog():\n session['target'] = \"/\"\n sqlsession = SQLSESSION()\n items = sqlsession.query(Item, Category)\\\n .join(Category).order_by(Item.create_date).limit(10)\n categories = sqlsession.query(Category).all()\n return render_template(\"catalog.html\",\n items=items,\n categories=categories,\n item_title=\"Latest Items\")", "def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)", "def getUserCatalogOnFilespace(fs_id):\n result = None\n session = Queries.createSession()\n try:\n result = session.execute(sqlalchemy.select([Catalog])\n .where(Catalog.fs_id == fs_id)\n .order_by(asc(Catalog.id))\n ).fetchone()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result", "def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def catalog_get(self, args):\n headers = DEFAULT_HEADERS.copy()\n headers.update(args.headers)\n\n if args.output_format == \"json\":\n headers[\"accept\"] = \"application/json\"\n elif args.output_format == \"json-stream\":\n headers[\"accept\"] = \"application/x-json-stream\"\n elif args.output_format == \"csv\":\n headers[\"accept\"] = \"text/csv\"\n else:\n raise UsageException(\"Unsupported output format: %s\" % args.output_format)\n\n catalog = self.server.connect_ermrest(args.id)\n try:\n if args.output_file:\n catalog.getAsFile(args.path,\n destfilename=args.output_file,\n headers=headers,\n delete_if_empty=args.auto_delete)\n else:\n pp(catalog.get(args.path, headers=headers).json())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog not found', e)\n except:\n if args.output_file and os.path.isfile(args.output_file):\n logging.info(\"Deleting empty file: %s\" % args.output_file)\n os.remove(args.output_file)\n raise", "def build_catalog_info(self, catalog_info):\n cat = SourceFactory.build_catalog(**catalog_info)\n catalog_info['catalog'] = cat\n # catalog_info['catalog_table'] =\n # Table.read(catalog_info['catalog_file'])\n catalog_info['catalog_table'] = cat.table\n catalog_info['roi_model'] =\\\n SourceFactory.make_fermipy_roi_model_from_catalogs([cat])\n catalog_info['srcmdl_name'] =\\\n self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name'])\n return CatalogInfo(**catalog_info)", "def test_get_hyperflex_app_catalog_list(self):\n pass", "def init():\n catalog = model.newCatalog()\n return catalog", "def read_nonsidereal_catalog(filename):\n catalog_table = ascii.read(filename, comment='#')\n\n # Check to see whether the position is in x,y or ra,dec\n pixelflag = False\n try:\n if 'position_pixels' in catalog_table.meta['comments'][0:4]:\n pixelflag = True\n except:\n pass\n\n # If present, check whether the velocity entries are pix/sec\n # or arcsec/sec.\n pixelvelflag = False\n try:\n if 'velocity_pixels' in catalog_table.meta['comments'][0:4]:\n pixelvelflag = True\n except:\n pass\n return catalog_table, pixelflag, pixelvelflag", "def read_all(self):\r\n pass", "def check_catalogs():\n for config in StateHolder.config:\n conf = StateHolder.config[config]\n if type(conf) is not dict:\n continue\n if conf.get(\"repositoryType\", \"file\") is \"file\":\n FileUtils.make_empty_file_with_empty_dict(directory=StateHolder.home_dir,\n file=conf.get('file', 'poco-catalog.yml'))", "def query_object_catalogs(self, position, catalogs=__ALL_STRING, row_limit=DEFAULT_ROW_LIMIT,\n get_query_payload=False, cache=True, verbose=False):\n return self.query_region_catalogs(position=position,\n radius=self.__ZERO_ARCMIN_STRING,\n catalogs=catalogs,\n row_limit=row_limit,\n get_query_payload=get_query_payload,\n cache=cache,\n verbose=verbose)", "def read_category_items():\n items = session.query(CatalogItem).order_by(CatalogItem.id.desc())\n return items", "def getCatalog(self, version=None, level=None, cubeInfo=True):\n print('WaPOR API: Loading catalog WaPOR.v{v}_l{lv}...'.format(\n v=version, lv=level))\n self.isAPITokenSet()\n\n isFound = False\n\n # if isinstance(version, int) and isinstance(level, int):\n # print('| int')\n # if 0 < version < 3 and 0 < level < 4:\n # print('| range')\n if version == self.version and level == self.level:\n # print('| equal')\n if self.catalog is not None:\n # print('| not None')\n isFound = True\n\n if isFound:\n df = self.catalog\n\n print('WaPOR API: Loading catalog WaPOR.v{v}_l{lv} found.'.format(\n v=version, lv=level))\n else:\n df = self._query_catalog(version, level)\n\n print('WaPOR API: Loading catalog WaPOR.v{v}_l{lv} loaded.'.format(\n v=version, lv=level))\n\n if cubeInfo:\n cubes_measure = []\n cubes_dimension = []\n for cube_code in df['code'].values:\n cubes_measure.append(self._query_cubeMeasures(cube_code))\n cubes_dimension.append(self._query_cubeDimensions(cube_code))\n df['measure'] = cubes_measure\n df['dimension'] = cubes_dimension\n\n self.catalog = df\n return self.catalog", "def _extract_catalog(self, data):\n interface = 'public'\n catalog = data['token']['catalog']\n service_map = {}\n for service in catalog:\n service_endpoint = None\n for endpoint in service['endpoints']:\n if endpoint['interface'] == interface:\n service_endpoint = endpoint['url']\n break\n if service_endpoint:\n service_map[service['type']] = service_endpoint\n LOG.debug('Service catalog: %s' % service_map)\n return service_map", "def __init__(self, catalog_path):\n self.catalog_path = catalog_path\n self.load_catalog()\n return", "def getting_info(self, cloud_path):\n\t\telog(\"getting info on {}\".format(cloud_path))", "def get_infores_catalog(self):\n return self._infores_catalog", "def initCatalog():\n t = \"SINGLE_LINKED\"\n catalog = model.newCatalog(t)\n return catalog", "def read(self):", "def read(self, path: Path, oracle: Oracle) -> MagicCollection:\n with path.open(\"rt\", encoding=\"utf-8\") as csv_file:\n reader = csv.DictReader(csv_file)\n card_counts = counts.aggregate_card_counts(reader, oracle)\n return MagicCollection(oracle=oracle, counts=card_counts)", "def read_meta(self):\n meta = cPickle.load(open('../sugar_analysis_data/META-CABALLO2.pkl'))\n self.meta_sn_name_list = []\n self.meta_zcmb = []\n self.meta_x0 =[]\n self.meta_x0_err = []\n self.meta_x1 =[]\n self.meta_x1_err = []\n self.meta_c = []\n self.meta_c_err = []\n self.meta_mb = []\n self.meta_mb_err = []\n self.meta_cov_x0_x1 = [] \n self.meta_cov_x0_c = []\n self.meta_cov_x1_c = []\n self.meta_cov_mb_x1 = []\n self.meta_cov_mb_c = [] \n self.meta_zhl = []\n self.meta_zhl_err = []\n self.meta_idr = []\n for meta_sn_name in meta.keys(): \n \n if meta[meta_sn_name]['idr.subset'] != 'bad' and meta[meta_sn_name]['idr.subset'] != 'auxiliary':\n \n self.meta_sn_name_list.append(meta_sn_name)\n self.meta_zhl_err.append(meta[meta_sn_name]['host.zhelio.err'])\n self.meta_zhl.append(meta[meta_sn_name]['host.zhelio'])\n self.meta_zcmb.append(meta[meta_sn_name]['host.zcmb'])\n self.meta_x0.append(meta[meta_sn_name]['salt2.X0'])\n self.meta_x0_err.append(meta[meta_sn_name]['salt2.X0.err'])\n self.meta_x1.append(meta[meta_sn_name]['salt2.X1'])\n self.meta_x1_err.append(meta[meta_sn_name]['salt2.X1.err'])\n self.meta_c.append(meta[meta_sn_name]['salt2.Color'])\n self.meta_c_err.append(meta[meta_sn_name]['salt2.Color.err'])\n self.meta_mb.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B'])\n self.meta_mb_err.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B.err'])\n self.meta_cov_x0_x1.append(meta[meta_sn_name]['salt2.CovX0X1'])\n self.meta_cov_x0_c.append(meta[meta_sn_name]['salt2.CovColorX0'])\n self.meta_cov_x1_c.append(meta[meta_sn_name]['salt2.CovColorX1'])\n self.meta_cov_mb_x1.append(meta[meta_sn_name]['salt2.CovRestFrameMag_0_BX1'])\n self.meta_cov_mb_c.append(meta[meta_sn_name]['salt2.CovColorRestFrameMag_0_B'])\n self.meta_idr.append(meta[meta_sn_name]['idr.subset'])\n \n self.meta_idr = np.array(self.meta_idr)\n self.meta_zcmb = np.array(self.meta_zcmb)\n self.meta_zhl = np.array(self.meta_zhl)\n self.meta_zhl_err = np.array(self.meta_zhl_err)\n self.meta_x0 = np.array(self.meta_x0)\n self.meta_x0_err = np.array(self.meta_x0_err)\n self.meta_x1 = np.array(self.meta_x1)\n self.meta_x1_err = np.array(self.meta_x1_err) \n self.meta_c = np.array(self.meta_c)\n self.meta_c_err = np.array(self.meta_c_err)\n self.meta_mb = np.array(self.meta_mb)\n self.meta_mb_err = np.array(self.meta_mb_err)\n self.meta_cov_x0_x1 = np.array(self.meta_cov_x0_x1)\n self.meta_cov_x0_c = np.array(self.meta_cov_x0_c)\n self.meta_cov_x1_c = np.array(self.meta_cov_x1_c)\n self.meta_cov_mb_x1 = np.array(self.meta_cov_mb_x1)\n self.meta_cov_mb_c = np.array(self.meta_cov_mb_c)", "def s3_read_data(self):\n\n self.k.open()\n self.k.read()", "def read():\n # TODO", "def read_catalog(catalogfile):\n\t#dictionary to store the url and key\n\tglobal books\n\tglobal default_books\n\tglobal default_titles\n\tglobal count_books\n\t#list to store the titles\n\tglobal titles\n\t#counter for dictionary key\n\ti = 0\n\ttry:\n\t#open file\n\t\twith open(catalogfile,'r') as catalog:\n\t\t\t#read and strip lines\n\t\t\tfor lines in catalog.readlines():\n\t\t\t\tlines_stripped = lines.strip()\n\t\t\t\t#split the lines by using the tag http\n\t\t\t\tvalues = lines_stripped.split(\",http://\")\n\t\t\t\t#if values is not empty and has length 2 and title and url is not empty\n\t\t\t\tif values != [''] and len(values) == 2 and values[0] != \"\" and values[1] != \"\":\n\t\t\t\t\t#add title to the list\n\t\t\t\t\ttitles.append(values[0])\n\t\t\t\t\t#add url to the dictionary\n\t\t\t\t\tbooks[i] = \"http://\" + values[1]\n\t\t\t\t\t#increase counter\n\t\t\t\t\ti += 1\n\t\t#get count of book\n\t\tcount_books = len(books)\n\n\t\t#if no books was read - terminate program\n\t\tif (count_books == 0):\n\t\t\tprint(\"File is not readable. Using default values.\")\n\t\t\t#use default values\n\t\t\tbooks = default_books\n\t\t\ttitles = default_titles\n\t\t\tcount_books = len(books)\n\t#if error in reading file - terminate\n\texcept:\n\t\tprint(\"Exception occured while reading file. Using default values\")\n\t\t#use default values\n\t\tbooks = default_books\n\t\ttitles = default_titles\n\t\tcount_books = len(books)\n\twhile True:\n\t#read all the books\n\t\tall_data = list()\n\t\tb_count = range(0,count_books)\n\t\t#for each book in the dictionary - read data\n\t\tfor book_num in b_count:\n\t\t#print(titles)\n\t\t\tcontents = read_book(books[book_num],book_num)\n\t\t\t# add contents to a list\n\t\t\tall_data.append(contents)\n\t\t#if all the URL are not readable\n\t\tempty_data = list()\n\t\tfor x in range(0,count_books):\n\t\t\tempty_data.append([])\n\n\t\tif all_data == empty_data:\n\t\t\t#use default values\n\t\t\tprint(\"No contents were read. Using default values.\")\n\t\t\tbooks = default_books\n\t\t\ttitles = default_titles\n\t\t\tcount_books = len(books)\n\t\t\tcontinue\n\t\t#create a dictionary of words in the data\n\t\tfor values in range(0,count_books):\n\t\t\tupdate_words(all_data[values],values)\n\t\t#ask user for search term\n\t\tprompt_user()\n\t\tbreak", "def fake_catalog(tenant, token):\n catalog_gen = servicecatalog.ServiceCatalogGenerator(token, tenant)\n catalog = catalog_gen.generate_full_catalog()['access']\n return access.AccessInfoV2(**catalog)", "def print_catalog(self, catalog_filter=[]):\n keys = sorted(self.catalog.keys())\n if len(catalog_filter) > 0: # pylint: disable=len-as-condition\n valid = []\n for key in keys:\n for f in catalog_filter:\n if len(self.catalog[key][f]) > 0: # pylint: disable=len-as-condition\n valid.append(key)\n keys = valid\n for key in keys:\n print(f\"Pnum: {self.catalog[key]['pnum']}\")\n print(f\"Edition: {self.catalog[key]['edition']}\")\n print(f\"Metadata: {len(self.catalog[key]['metadata']) > 0}\")\n print(f\"Transliteration: {len(self.catalog[key]['transliteration']) > 0}\")\n print(f\"Normalization: {len(self.catalog[key]['normalization']) > 0}\")\n print(f\"Translation: {len(self.catalog[key]['translation']) > 0}\")\n print()", "def get_cart_contents(db):", "def get_scnlist_datacube(self, loaded=False):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need converting to ARD.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.ARDProduct == True,\n EDDSentinel1ASF.DCLoaded == loaded).order_by(\n EDDSentinel1ASF.Acquisition_Date.asc()).all()\n scns2dcload = list()\n if query_result is not None:\n for record in query_result:\n scns2dcload.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns2dcload", "def loadSourceCatalog(self, filename):\n sourceCat = afwTable.SourceCatalog.readFits(filename)\n aliasMap = sourceCat.schema.getAliasMap()\n aliasMap.set(\"slot_ApFlux\", \"base_PsfFlux\")\n instFluxKey = sourceCat.schema[\"slot_ApFlux_instFlux\"].asKey()\n instFluxErrKey = sourceCat.schema[\"slot_ApFlux_instFluxErr\"].asKey()\n\n # print(\"schema=\", sourceCat.schema)\n\n # Source x,y positions are ~ (500,1500) x (500,1500)\n centroidKey = sourceCat.table.getCentroidSlot().getMeasKey()\n for src in sourceCat:\n adjCentroid = src.get(centroidKey) - lsst.geom.Extent2D(500, 500)\n src.set(centroidKey, adjCentroid)\n src.set(instFluxKey, 1000)\n src.set(instFluxErrKey, 1)\n\n # Set catalog coord\n for src in sourceCat:\n src.updateCoord(self.wcs)\n return sourceCat", "def load_spitzer_catalog(): # pragma: no cover\n\n path = get_path('spitzer_example_catalog.xml', location='remote')\n table = Table.read(path)\n\n return table", "def do_ascii(catalog):\n task_str = catalog.get_current_task_str()\n\n\n # Howerton Catalog\n datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',\n 'vizier_J_MNRAS_441_1186_table1 J_MNRAS_441_1186_table3_20200403.csv')\n data = read(datafile, format='csv')\n for rrow in pbar(data, task_str):\n row = dict((x, str(rrow[x])) for x in rrow.columns)\n# if any(x in row['Notes'].lower() for x in ['artifact']):\n# continue\n# ctypes = row['Type'].split('/')\n# nonsne = False\n# for ct in ctypes:\n# if ct.replace('?', '') in catalog.nonsnetypes:\n# nonsne = True\n# else:\n# nonsne = False\n# break\n# if nonsne:\n# continue\n name, source = catalog.new_entry(\n row['CRTS'],\n srcname='CRTS',\n bibcode='2014MNRAS.441.1186D')\n# if row['IAU des.'] != '--':\n# catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,\n# row['IAU des.'], source)\n# for ct in ctypes:\n# catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, ct,\n# source)\n# catalog.entries[name].add_quantity(SUPERNOVA.DISCOVERER,\n# row['Discoverer'], source)\n# date = row['Discovery'].split('/')\n# date = '/'.join([date[-1].zfill(2), date[0].zfill(2), date[1]])\n# catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, date,\n# source)\n catalog.entries[name].add_quantity(CATACLYSMIC.VISUAL_MAG, row['Vmax'],\n source)\n catalog.entries[name].add_quantity(CATACLYSMIC.RA, row['RAJ2000'], source)\n catalog.entries[name].add_quantity(CATACLYSMIC.DEC, row['DEJ2000'], source)\n catalog.journal_entries()\n\n # Howerton Catalog", "def get_data_from_storage(data_file):\n print(f\"{CR}Yipes, I don't know how to pull data from dvc yet{C0}\")", "def _read_data(self):" ]
[ "0.69564354", "0.63602346", "0.6289939", "0.6274737", "0.6120884", "0.60243875", "0.5962401", "0.59330785", "0.5929398", "0.59037805", "0.5888229", "0.58801526", "0.58801526", "0.587579", "0.586561", "0.582441", "0.582154", "0.57935977", "0.57935977", "0.57935977", "0.57935977", "0.57935977", "0.5749884", "0.5731499", "0.5704847", "0.5690862", "0.5621293", "0.5611027", "0.5611027", "0.5611027", "0.5611027", "0.5611027", "0.55949634", "0.5548831", "0.5548831", "0.5548831", "0.55485344", "0.55387056", "0.5530069", "0.54957", "0.5495501", "0.54836243", "0.5478288", "0.5468943", "0.5468943", "0.5455843", "0.54533356", "0.54413396", "0.54113495", "0.53944564", "0.53944564", "0.53887814", "0.5357043", "0.5351722", "0.53232026", "0.5321776", "0.5314509", "0.5288348", "0.52870494", "0.5286469", "0.52845746", "0.5282257", "0.52753425", "0.52727574", "0.5256187", "0.5253637", "0.5252118", "0.52322114", "0.5211968", "0.5211652", "0.521025", "0.5209808", "0.52049756", "0.5204262", "0.52028525", "0.51889217", "0.51863503", "0.5179233", "0.51778716", "0.517136", "0.5157558", "0.5154618", "0.51533186", "0.5151255", "0.5146149", "0.5143206", "0.51393056", "0.51295197", "0.51288337", "0.512875", "0.51263404", "0.51123536", "0.5098167", "0.50966096", "0.5085958", "0.5068772", "0.505841", "0.5050331", "0.5046148", "0.50458694" ]
0.67307436
1
add fields from the cat some will not be in the odata but some will. When copy is True We will copy over the ones that are in both, in some cases
def _add_cat_fields(self, odata, copy=True): # these are required fileds from get_meds_output_dtype # that we have put into the input catalog always_copy=[ 'id', 'ra', 'dec', ] cat = self.cat_orig add_dt = [] for d in cat.dtype.descr: n = d[0] if n not in odata.dtype.names: add_dt.append(d) obj_data = eu.numpy_util.add_fields( odata, add_dt, ) if copy: for n in always_copy: obj_data[n] = cat[n] for d in add_dt: n = d[0] if n in always_copy: continue # don't clobber things that should be left at # their default values if n not in odata.dtype.names: obj_data[n] = cat[n] return obj_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat", "def __add__(self, other):\n return self.__class__(\n {\n name:\n self.__getattribute__(name) + other.__getattribute__(name)\n for name in self._fields\n }\n )", "def merge(self, new_store):\n if new_store.name and len(new_store.name) > 0:\n self.name = new_store.name\n if new_store.address and len(new_store.address) > 0:\n self.address = new_store.address\n if new_store.city and len(new_store.city) > 0:\n self.city = new_store.city\n if new_store.state and len(new_store.state) > 0:\n self.state = new_store.state\n if new_store.zip and new_store.zip > 0:\n self.zipcode = new_store.zip\n if new_store.phone and new_store.phone > 0:\n self.phone = new_store.phone", "def _copy(self, *, new_instance:bool=False, new_alias:bool=False,\n _context:dict=None, _deep_copy:bool=True, **kwargs):\n self_id = id(self)\n if _context is None:\n _context = {}\n elif self_id in _context:\n return _context[self_id] # I've already been copied\n\n\n existing_items = {k:getattr(self, k) for k in self._nb_attrs}\n #It's a copy so shouldn't have the same uuid\n existing_items.pop('uuid', None)\n existing_items.update(kwargs)\n\n if new_instance:\n existing_items['_ref'] = self\n elif not ('_ref' in kwargs and kwargs['_ref']) and self._ref:\n existing_items['_ref'] = self._ref._copy(_context=_context)\n\n if new_alias:\n existing_items['_alias'] = True\n\n new_obj = type(self)(**existing_items)\n\n _context[self_id] = new_obj\n\n if _deep_copy:\n for obj in self:\n new_obj._add(obj._copy(new_alias=new_alias, _context=_context))\n\n return new_obj", "def test_copy_2(dset_full):\n dset_new = copy.deepcopy(dset_full)\n\n # Test internal references in the dataset\n assert id(dset_new.site_pos.other) == id(dset_new.sat_pos)\n assert id(dset_new.site_delta.ref_pos) == id(dset_new.site_pos)\n assert id(dset_new.site_posvel.other) == id(dset_new.sat_posvel)\n assert id(dset_new.site_posvel_delta.ref_pos) == id(dset_new.site_posvel)\n\n assert id(dset_new.group.site_pos.other) == id(dset_new.group.sat_pos)\n assert id(dset_new.group.site_delta.ref_pos) == id(dset_new.group.site_pos)\n assert id(dset_new.group.site_posvel.other) == id(dset_new.group.sat_posvel)\n assert id(dset_new.group.site_posvel_delta.ref_pos) == id(dset_new.group.site_posvel)\n\n # Verify that new dataset have different references than original object\n for field_name, field in dset_full._fields.items():\n assert id(field.data) != id(dset_new._fields[field_name].data)\n try:\n for group_field_name, group_field in field.data._fields.items():\n assert id(group_field.data) != id(dset_new._fields[field_name].data._fields[group_field_name].data)\n except AttributeError:\n # Field is not a group\n pass", "def copy_attrs(data_orig, data_new):\n\n if isinstance(data_orig, Dataset):\n\n # Variables\n for v in data_orig.data_vars:\n field = data_orig[v]\n for attr, val in field.attrs.items():\n data_new[v].attrs[attr] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n # Metadata\n for attr, val in data_orig.attrs.items():\n data_new.attrs[attr] = val\n\n elif isinstance(data_orig, DataArray):\n\n # Variable Metadata\n for att, val in data_orig.attrs.items():\n data_new.attrs[att] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n else:\n raise ValueError(\"Couldn't handle type %r\" % type(data_orig))\n\n return data_new", "def test_copy_features(self):\n fc = self.read_feature()\n other = FeatureCollection(features=fc.features,\n otherProperties=fc.otherProperties)\n assert len(other.features) == 1\n feature = other.features[0]\n\n self.check_feature(feature)", "def _merge_attributes(self, workout):\n keys = self.__table__.columns.keys()\n for key in keys:\n if key in [\"id\",\n \"external_id\",\n \"is_duplicate_with\",\n \"manual_check_required_with\",\n ]:\n continue\n elif getattr(self, key) == None:\n # copy attribute if empty; else keep existing \n setattr(self, key, getattr(workout, key))", "def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r", "def copyBooks(self):\n skipMods = set(('Morrowind.esm',self.fileInfo.name))\n for id,(record,modName) in (self.srcBooks.items() + self.altBooks.items()):\n if modName not in skipMods:\n self.setRecord(copy.copy(record))", "def append_ipma_metadata(orig: dict, dest: dict):\n for key in [key for key in orig.keys() if key != 'data']:\n dest[key] = orig[key]", "def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields", "def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def _copy_metadata_deep(value, old_value):\n if value is None or old_value is None or value is old_value: return\n\n if isinstance(value, dict):\n for k, v in value.iteritems():\n _copy_metadata_deep(v, old_value[k])\n elif isinstance(value, list):\n for v, old_v in zip(value, old_value):\n _copy_metadata_deep(v, old_v)\n else:\n try:\n value.__dict__.update(old_value.__dict__)\n except AttributeError:\n pass", "def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))", "def copy_attributes(var1, var2):\n for each in var1.ncattrs():\n if each != \"_FillValue\":\n setattr(var2, each, getattr(var1, each))", "def _copy_from_doc(doc):\n d = {\"has_props\": [], \"origins\": []}\n # Complex function to grab the keys and put them in the root doc\n # if the item is a list, it makes one doc per item with those corresponding keys\n for doc_key in summary_fields:\n sub_doc = doc.get(doc_key, None)\n if isinstance(sub_doc, list) and len(sub_doc) > 0:\n d[\"has_props\"].append(doc_key)\n d[doc_key] = []\n for sub_item in sub_doc:\n temp_doc = {\n copy_key: sub_item[copy_key]\n for copy_key in summary_fields[doc_key]\n if copy_key in sub_item\n }\n d[doc_key].append(temp_doc)\n elif isinstance(sub_doc, dict):\n d[\"has_props\"].append(doc_key)\n if sub_doc.get(\"origins\", None):\n d[\"origins\"].extend(sub_doc[\"origins\"])\n d.update(\n {\n copy_key: sub_doc[copy_key]\n for copy_key in summary_fields[doc_key]\n if copy_key in sub_doc\n }\n )\n return d", "def merge_contextual(self, other):\n # TODO: This is currently dependent on our data model? Make more robust to schema changes\n # Currently we assume all lists at Compound level, with 1 further potential nested level of lists\n for k in self.keys():\n # print('key: %s' % k)\n for item in self[k]:\n # print('item: %s' % item)\n for other_item in other.get(k, []):\n # Skip text properties (don't merge names, labels, roles)\n if isinstance(other_item, six.text_type):\n continue\n for otherk in other_item.keys():\n if isinstance(other_item[otherk], list):\n if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:\n other_nested_item = other_item[otherk][0]\n for othernestedk in other_nested_item.keys():\n for nested_item in item[otherk]:\n if not nested_item[othernestedk]:\n nested_item[othernestedk] = other_nested_item[othernestedk]\n elif not item[otherk]:\n item[otherk] = other_item[otherk]\n log.debug('Result: %s' % self.serialize())\n return self", "def mergeWith(self, others):", "def combine_data(self, object, additional_data):\n object[\"ancestors\"] = additional_data[\"ancestors\"] if self.cartographer_client else []\n object[\"position\"] = additional_data.get(\"order\", 0) if additional_data else 0\n object = super(ResourceMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def append(dest, field, value):\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]", "def merge(self, obj):\n pass", "def test__ActivityParty__copy_with__1():\n old_party_id = 'plain'\n old_size = 6\n old_max = 12\n new_party_id = 'asia'\n new_size = 1\n new_max = 8\n \n field = ActivityParty(\n party_id = old_party_id,\n size = old_size,\n max_ = old_max,\n )\n copy = field.copy_with(\n party_id = new_party_id,\n size = new_size,\n max_ = new_max,\n )\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(copy.id, new_party_id)\n vampytest.assert_eq(copy.size, new_size)\n vampytest.assert_eq(copy.max, new_max)", "def copy(self):", "def _copy_from_doc(doc):\n\n d = {\"has_props\": []}\n\n # Function to grab the keys and put them in the root doc\n for doc_key in summary_fields:\n sub_doc = doc.get(doc_key, None)\n if isinstance(sub_doc, list) and len(sub_doc) > 0:\n d[\"has_props\"].append(doc_key)\n for copy_key in summary_fields[doc_key]:\n d[copy_key] = dict()\n for sub_item in sub_doc:\n # In cases where multiple docs have the same properties,\n # they must differ by method\n if copy_key in sub_item and \"method\" in sub_item:\n d[copy_key][sub_item[\"method\"]] = sub_item[copy_key]\n\n elif isinstance(sub_doc, dict):\n d[\"has_props\"].append(doc_key)\n d.update(\n {\n copy_key: sub_doc[copy_key]\n for copy_key in summary_fields[doc_key]\n if copy_key in sub_doc\n }\n )\n\n return d", "def get_added_dicts(a, b):\n tmp = copy.deepcopy(a)\n for key, val in b.iteritems():\n if key not in tmp:\n tmp[key] = val\n return tmp", "def copy_attributes(ncin, ncout,exclude=None, include=None):\n att_dict = odict()\n for attribute_name in ncin.ncattrs():\n if include is not None and attribute_name not in include:\n continue #if include is defined, and this attribute is not there\n if exclude is not None and attribute_name in exclude:\n continue #if exclude is defined, and this attribute is there\n att_dict[attribute_name] = ncin.getncattr(attribute_name)\n ncout.setncatts(att_dict)", "def fusionne(self, new):\n if new == self:\n raise ValueError(\"une catégorie ne peut être fusionnée avec elle même\")\n self.alters_data = True\n if not isinstance(new, type(self)):\n raise TypeError(\"pas la même classe d'objet\")\n if self.type != new.type:\n raise TypeError(\"pas le même type de catégorie, %s est %s alors que %s est %s\" % (\n self.nom, self.type, new.nom, new.type))\n nb_change = Echeance.objects.filter(cat=self).update(cat=new)\n nb_change += Ope.objects.filter(cat=self).update(cat=new)\n self.delete()\n return nb_change", "def merge_fields(d, new):\n if not new:\n return\n\n for k, v in new.iteritems():\n if k not in d:\n d[k] = v\n elif isinstance(v, list) and isinstance(d[k], list):\n d[k].extend(v)\n elif isinstance(v, dict) and isinstance(d[k], dict):\n d[k].update(v)\n else:\n d[k] = v", "def _handle_load_unknown(self, data, original):\n for key, val in original.items():\n if key not in self.fields:\n data[key] = val\n return data", "def __duplicate_o2o_fields(self, duplicate):\n for f in self._meta.related_objects:\n if f.one_to_one:\n if any(\n [\n f.name in self._clone_o2o_fields\n and f not in self._meta.concrete_fields,\n self._clone_excluded_o2o_fields\n and f.name not in self._clone_excluded_o2o_fields\n and f not in self._meta.concrete_fields,\n ]\n ):\n rel_object = getattr(self, f.name, None)\n if rel_object:\n new_rel_object = CloneMixin._create_copy_of_instance(\n rel_object,\n force=True,\n sub_clone=True,\n )\n setattr(new_rel_object, f.remote_field.name, duplicate)\n new_rel_object.save()\n\n return duplicate", "def updated_object(self):\n o = deepcopy(self.object)\n o[\"name\"] += \"-copy\"\n return o", "def test_merge_aggregate_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def conditional_copy(self, other, key, altkey=None):\n if hasattr(self, key):\n possible = getattr(self, key)\n if possible:\n usekey = {True: altkey, False: key}[altkey is not None]\n if hasattr(other, usekey):\n exists = getattr(other, usekey)\n if exists:\n return\n if isinstance(possible, list):\n setattr(other, usekey, [deepcopy(i) for i in possible])\n else:\n setattr(other, usekey, deepcopy(possible))", "def copy_attrs(varin,varout):\n for attr_name in varin.ncattrs():\n varout.setncattr(attr_name,varin.getncattr(attr_name))", "def test_dict_merge_immutable():\n x1 = {'one': 1, 'two': 2}\n x1_cop = x1.copy()\n ir.dict_merge(x1, {'three': 3, 'two': None})\n assert x1 == x1_cop\n ir.dict_merge({'ten': 10, 'one': '1'}, x1)\n assert x1 == x1_cop", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def __add__(self, other):\n self.__dict__.update(other)\n return self", "def _copy_catalog(self):\n\n # load the IOL into an astropy table\n # the table is in iol.catalog\n self.iol = axeiol.InputObjectList(self.in_sex)\n\n # check for an empty table\n if len(self.iol.catalog) < 1:\n _log.info(\"Empty catalog found\\n\")\n return None\n\n # create a new GOL that's a copy of the input list\n self.gol = deepcopy(self.iol.catalog) # just make a copy", "def merge_schema(self, schema):\n super(CategoricalAttributeSchema, self).merge_schema(schema)\n self.categories.update(schema.categories)", "def concat(self, other):\n self.add_rules(other.cliques)\n self.prop_names.update(other.prop_names)", "def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)", "def __listunion(self, c1, c2):\n s1 = {}\n for delta in c1:\n s1[delta] = 1\n\n\tc = c1[:]\n\tfor delta in c2:\n if not s1.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c", "def __duplicate_m2o_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.concrete_fields:\n if f.many_to_one:\n if any(\n [\n f.name in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.name not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to one fields\n for field in fields:\n item = getattr(self, field.name)\n try:\n item_clone = item.make_clone()\n except IntegrityError:\n item_clone = item.make_clone(sub_clone=True)\n\n setattr(duplicate, field.name, item_clone)\n\n return duplicate", "def merge(self, object_class):\n other_oc = self.schema.get_object_class(object_class)\n self.required_attrs |= other_oc.required_attrs\n self.allowed_attrs |= other_oc.allowed_attrs", "def copyCommonFields(self):\n self.fetchDataToForm(self.selected_row, self.selected_column, fields = \"Recent\")", "def add_dict(dest, src):\n for key in src.keys():\n if key in dest.keys():\n dest[key] += src[key]\n else:\n dest[key] = src[key]", "def test_copy_attributes(self):\n\n v = Vector({ 'x': 3 }, { 'y': True })\n n = v.copy()\n\n self.assertEqual(v.attributes, n.attributes)\n\n v.attributes['y'] = False\n self.assertFalse(v.attributes['y'])\n self.assertTrue(n.attributes['y'])\n v.attributes['y'] = True", "def mergeMetadata(self, obj, dom): \n self.update_semantics = 'merge'\n # create a metadata dict that has all the values from obj, overridden\n # by the current dom values.\n metadata = self.getModuleMetadata(obj, {})\n metadata.update(self.getMetadata(dom, METADATA_MAPPING))\n for oerdc_name, cnx_name in METADATA_MAPPING.items():\n if cnx_name in ['keywords',]:\n old_value = getattr(obj, cnx_name)\n if old_value:\n current_value = list(metadata.get(cnx_name, []))\n current_value.extend(old_value)\n metadata[cnx_name] = current_value\n if metadata:\n self.validate_metadata(metadata)\n metadata = self.fixEntities(metadata, ATTRIBUTES_TO_FIX)\n if ICollection.providedBy(obj):\n obj.collection_metadata(**metadata)\n elif IModule.providedBy(obj):\n obj.update_metadata(**metadata)\n self.updateRoles(obj, dom)\n obj.reindexObject(idxs=metadata.keys())", "def test_deepcopy(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n t.transform([2])\n copy.deepcopy(t)", "def _merge(acc: Dict[str, str], cur: Any) -> Dict[str, str]:\n parsed = _parse_feature(cur)\n acc[\"timestamp\"] = parsed[\"timestamp\"]\n acc[\"lat\"] = parsed[\"lat\"]\n acc[\"lon\"] = parsed[\"lon\"]\n key = parsed[\"property\"]\n val = parsed[\"value\"]\n\n acc[key] = val\n\n return acc", "def copy(self):\n return self.update({})", "def _handle_dump_unknown(self, data, original):\n for key, val in original.items():\n if key not in self.fields:\n data[key] = val\n return data", "def add_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags.union(set(tags))\n return cp", "def copy_nc_attrs(src, dest):\n with xarray.open_dataset(src) as s:\n attrs = s.attrs\n # Write empty root dataset with attributes\n ds = xarray.Dataset(attrs=attrs)\n ds.to_netcdf(dest, mode=\"a\")", "def copy_oids(fc, fld_name):\n oid_fld = arcpy.Describe(fc).OIDFieldName\n arcpy.AddField_management(fc, fld_name, 'LONG')\n arcpy.CalculateField_management(\n fc, fld_name, '!{}!'.format(oid_fld), 'PYTHON_9.3')", "def add_data(self, in_data):\n old_data = {}\n for field in self.fields:\n # ToDo - might be a better way to determine the fieldname\n if field in in_data:\n if field in self.data:\n old_data = dict(self.data)\n self.data = {}\n\n self.data[field] = in_data[field]\n self.data['usUnits'] = in_data['usUnits']\n self.data['dateTime'] = in_data['dateTime']\n return old_data", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def __add__(self, other):\r\n # Make a defaultdict of defaultdicts, the latter of which returns\r\n # None when an key is not present\r\n merged_data = defaultdict(lambda: defaultdict(lambda: None))\r\n\r\n # We will keep track of all unique sample_ids and metadata headers\r\n # we have seen as we go\r\n all_sample_ids = set()\r\n all_headers = set()\r\n\r\n # add all values from self into the merged_data structure\r\n for sample_id, data in self._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n merged_data[sample_id][header] = value\r\n\r\n # then add all data from other\r\n for sample_id, data in other._metadata.iteritems():\r\n all_sample_ids.add(sample_id)\r\n for header, value in data.iteritems():\r\n all_headers.add(header)\r\n # if the two mapping files have identical sample_ids and\r\n # metadata columns but have DIFFERENT values, raise a value\r\n # error\r\n if merged_data[sample_id][header] is not None and \\\r\n merged_data[sample_id][header] != value:\r\n raise ValueError(\"Different values provided for %s for \"\r\n \"sample %s in different mapping files.\"\r\n % (header, sample_id))\r\n else:\r\n merged_data[sample_id][header] = value\r\n\r\n # Now, convert what we have seen into a normal dict\r\n normal_dict = {}\r\n for sample_id in all_sample_ids:\r\n if sample_id not in normal_dict:\r\n normal_dict[sample_id] = {}\r\n\r\n for header in all_headers:\r\n normal_dict[sample_id][header] = \\\r\n merged_data[sample_id][header]\r\n\r\n # and create a MetadataMap object from it; concatenate comments\r\n return self.__class__(normal_dict, self.Comments + other.Comments)", "def deepcopy(self, **datas):\n new_obj = self.__class__(**datas)\n new_obj.save()\n\n return new_obj", "def _copy_kwargs(self, **kwargs):\n ns = self.__dict__\n for attr, kw in {'_engine': 'engine', '_format': 'format'}.items():\n assert kw not in kwargs\n if attr in ns:\n kwargs[kw] = ns[attr]\n return super()._copy_kwargs(**kwargs)", "def copy(self):\n new = super().copy()\n new.drip_cal_config = deepcopy(self.drip_cal_config)\n new.drip_config = deepcopy(self.drip_config)\n new.pipecal_config = deepcopy(self.pipecal_config)\n return new", "def difference(self, other: Catalog) -> Catalog:\n cat1 = self.copy()\n cat2 = other.copy()\n\n new_cat = Catalog()\n for a_cat1, a_cat_2 in [(cat1, cat2), (cat2, cat1)]:\n for k in a_cat1.keys():\n for ver_id, version in a_cat1[k].versions.items():\n if k not in a_cat_2 or ver_id not in a_cat_2[k]:\n new_cat[k][ver_id] = version\n return new_cat", "def add_metadata_properties(self, sentence, result):\r\n for property in sentence.properties:\r\n if property.property_metadata.is_category:\r\n result[property.name] = property.value", "def _modified_copy(cls, copy, is_offset: bool, mod_mapping: dict):\n\n for attribute, value in mod_mapping.items():\n\n path = attribute.split(\"__\")\n\n if len(path) == 1:\n # Non-nested attribute change\n cls._set_stuff(is_offset, copy, attribute, value)\n\n else:\n # Nested attribute change, e.g. circle__position__x=4\n *nested_attr_chain, attribute = path\n obj = copy\n for nested_attr in nested_attr_chain:\n obj = getattr(obj, nested_attr)\n cls._set_stuff(is_offset, obj, attribute, value)\n\n return copy", "def osl_fill_from(self, other):\n #TODO: What about inherited properties?\n for p in self._osl.properties:\n conditional_copy(other, self, p[0])\n return self", "def __add__(self, other):\n merged_profile = super().__add__(other)\n\n # struct specific property merging\n merged_profile.row_has_null_count = \\\n self.row_has_null_count + other.row_has_null_count\n merged_profile.row_is_null_count = \\\n self.row_is_null_count + other.row_is_null_count\n merged_profile.hashed_row_dict.update(self.hashed_row_dict)\n merged_profile.hashed_row_dict.update(other.hashed_row_dict)\n\n self_to_other_idx = self._get_and_validate_schema_mapping(self._col_name_to_idx,\n other._col_name_to_idx)\n\n # merge profiles\n for idx in range(len(self._profile)):\n other_idx = self_to_other_idx[idx]\n merged_profile._profile.append(self._profile[idx] +\n other._profile[other_idx])\n\n # schemas are asserted to be identical\n merged_profile._col_name_to_idx = copy.deepcopy(self._col_name_to_idx)\n\n # merge correlation\n if (self.options.correlation.is_enabled\n and other.options.correlation.is_enabled):\n merged_profile.correlation_matrix = self._merge_correlation(other)\n\n # recompute chi2 if needed\n if self.options.chi2_homogeneity.is_enabled and \\\n other.options.chi2_homogeneity.is_enabled:\n\n chi2_mat1 = self.chi2_matrix\n chi2_mat2 = other.chi2_matrix\n n1 = self.total_samples - self.row_is_null_count\n n2 = other.total_samples - other.row_is_null_count\n if n1 == 0:\n merged_profile.chi2_matrix = chi2_mat2\n elif n2 == 0:\n merged_profile.chi2_matrix = chi2_mat1\n elif chi2_mat1 is None or chi2_mat2 is None:\n merged_profile.chi2_matrix = None\n else:\n merged_profile.chi2_matrix = merged_profile._update_chi2()\n\n return merged_profile", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def test__ApplicationCommandOptionMetadataNested__copy_with__1():\n old_options = [\n ApplicationCommandOption('nue', 'nue', ApplicationCommandOptionType.string),\n ApplicationCommandOption('seija', 'seija', ApplicationCommandOptionType.integer),\n ]\n \n new_options = [\n ApplicationCommandOption('aya', 'ayaya', ApplicationCommandOptionType.float),\n ApplicationCommandOption('momiji', 'awoo', ApplicationCommandOptionType.user),\n ]\n \n option_metadata = ApplicationCommandOptionMetadataNested(\n options = old_options,\n )\n \n copy = option_metadata.copy_with(\n options = new_options,\n )\n \n _asert_fields_set(copy)\n vampytest.assert_is_not(option_metadata, copy)\n \n vampytest.assert_eq(copy.options, tuple(new_options))", "def copy_properties(self, from_metadata, clear_properties=False):\n if (clear_properties):\n self.clear_properties()\n\n for key, value in from_metadata.properties.items():\n self.properties[key] = copy.deepcopy(value)", "def add_other_meta_data(self, other: _MetaData) -> None:\n\n for key in other._meta_data_dict.keys():\n self.add_data(key, other._meta_data_dict[key])", "def CopyData(self, p_int, vtkDataSetAttributes, p_int_1, vtkDataSetAttributes_1, p_int_2):\n ...", "def test_merge_overwrite_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)", "def varcopy(self, vars):", "def merge(self, new_attributes):\n for k, v in new_attributes.items():\n setattr(self, k, v)", "def testCopyCollection(self):\n copy = self.node.copy_collection()\n\n self.assertEqual(\n self.node.type,\n copy.type\n )\n\n self.assertEqual(\n self.node.desc,\n copy.desc\n )\n\n self.assertEqual(\n self.node.input_desc,\n copy.input_desc\n )\n\n self.assertEqual(\n self.node.viewing_desc,\n copy.viewing_desc\n )\n\n self.assertEqual(\n self.node.all_children,\n copy.all_children\n )", "def copy():\n copy2(per, per_old)", "def merge_object(self, obj):\n for key, value in obj.lines.items():\n if key not in self.lines:\n self.lines[key] = value\n self.lines[key] = self.lines[key] + value", "def flatten_others(self, obj, many, **kwargs):\r\n for k, v in obj['others'].items():\r\n obj[k] = v\r\n obj.pop('others')\r\n return obj", "def combine(self, other) -> None:\n assert self.id_ == other.id_\n assert self.type_ == other.type_\n self.count += other.count", "def test_addingnewattributes(self):\n b1 = BaseModel()\n b1.name = \"Holberton\"\n b1.my_number = 89\n dictionary = b1.to_dict()\n self.assertEqual('name' in dictionary, True)\n self.assertEqual('my_number' in dictionary, True)\n b2 = BaseModel()\n dictionary2 = b2.to_dict()\n self.assertEqual('name' in dictionary2, False)\n self.assertEqual('my_number' in dictionary2, False)", "def test__ActivityParty__copy_with__0():\n party_id = 'plain'\n size = 6\n max_ = 12\n \n field = ActivityParty(\n party_id = party_id,\n size = size,\n max_ = max_,\n )\n copy = field.copy_with()\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(field, copy)", "def test_copy(dset):\n dset_new = copy.deepcopy(dset)\n\n for field_name, field in dset._fields.items():\n print(f\"Testing {field_name}\")\n try:\n if field.data.dtype.type is np.str_:\n assert np.char.equal(field.data, dset_new._fields[field_name].data).all()\n else:\n assert np.equal(field.data, dset_new._fields[field_name].data).all()\n except AttributeError:\n for group_field_name, group_field in field.data._fields.items():\n if group_field.data.dtype.type is np.str_:\n assert np.char.equal(\n group_field.data, dset_new._fields[field_name].data._fields[group_field_name].data\n ).all()\n else:\n assert np.equal(\n group_field.data, dset_new._fields[field_name].data._fields[group_field_name].data\n ).all()", "def copy_doclist(doclist, no_copy = []):\n\n\tcl = []\n\n\t# main doc\n\tc = Document(fielddata = doclist[0].fields.copy())\n\n\t# clear no_copy fields\n\tfor f in no_copy:\n\t\tif c.fields.has_key(f):\n\t\t\tc.fields[f] = None\n\n\tc.name = None\n\tc.save(1)\n\tcl.append(c)\n\n\t# new parent name\n\tparent = c.name\n\n\t# children\n\tfor d in doclist[1:]:\n\t\tc = Document(fielddata = d.fields.copy())\n\t\tc.name = None\n\n\t\t# clear no_copy fields\n\t\tfor f in no_copy:\n\t\t\tif c.fields.has_key(f):\n\t\t\t\tc.fields[f] = None\n\n\t\tc.parent = parent\n\t\tc.save(1)\n\t\tcl.append(c)\n\n\treturn cl", "def test_copy(self):\n\n # Copy the 'orig' data pipe to the 'new' data pipe.\n pipes.copy('orig', 'new')\n\n # Test that the new data pipe exists.\n self.assert_('new' in ds)\n\n # Test that the new data pipe has the object 'x' and that its value is 1.\n self.assertEqual(ds['new'].x, 1)\n\n # Change the value of x.\n ds['new'].x = 2\n\n # Test that the two values are different.\n self.assert_(ds['orig'].x != ds['new'].x)\n\n # Test that the new data pipe has the object 'mol[0].res[0].spin[0].num' and that its value is 1.\n self.assertEqual(ds['new'].mol[0].res[0].spin[0].num, 1)\n\n # Change the spin system number.\n ds['new'].mol[0].res[0].spin[0].num = 2\n\n # Test that the original spin system number hasn't changed.\n self.assertEqual(ds['orig'].mol[0].res[0].spin[0].num, 1)", "def merge(target, source):\n for key, value in source.items():\n if key not in target:\n target[key] = value\n elif type(target[key]) is dict:\n if key in self.OVERRIDE_ON_EXTENDS:\n target[key].update(value)\n else:\n merge(target[key], value)\n elif type(target[key]) is list:\n target[key] += value\n return target", "def copy(self, rhs):\n\n\t\tself.name = rhs.name\n\t\tself.file = rhs.file\n\n\t\tfor rem in rhs.remark:\n\t\t\tself.addRemark(rem)\n\n\t\tself.bk_tot = rhs.bk_tot\n\t\tself.fa_rep = rhs.fa_rep\n\t\tself.fa_atr = rhs.fa_atr", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def concatenate_data():", "def __duplicate_o2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.related_objects:\n if f.one_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2o_or_o2m_fields,\n self._clone_excluded_m2o_or_o2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2o_or_o2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone one to many fields\n for field in fields:\n for item in getattr(self, field.get_accessor_name()).all():\n try:\n item.make_clone(attrs={field.remote_field.name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field.remote_field.name: duplicate}, sub_clone=True\n )\n\n return duplicate", "def test__ActivityParty__copy():\n party_id = 'plain'\n size = 6\n max_ = 12\n \n field = ActivityParty(\n party_id = party_id,\n size = size,\n max_ = max_,\n )\n copy = field.copy()\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(field, copy)", "def add_shallow_copy_of(self, child_to_add, merged=False):\n\n new_child = self.add_child(child_to_add.text)\n\n if merged:\n new_child.instances.append({\n 'hostname': child_to_add.host.hostname,\n 'comments': child_to_add.comments,\n 'tags': child_to_add.tags})\n new_child.comments.update(child_to_add.comments)\n new_child.tags.update(child_to_add.tags)\n new_child.order_weight = child_to_add.order_weight\n\n return new_child", "def copy(self):\n return super().copy()", "def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied", "def transfer_missing_elements(target_dict, source_dict, transfer_type=None):\r\n\r\n if transfer_type is None:\r\n transfer_type = source_dict.get(\"_transfer_type_\", \"recursive\")\r\n\r\n for key_, val_ in source_dict.items():\r\n # print(key_,isinstance(val_, dict), val_)\r\n if isinstance(val_, dict):\r\n if key_ not in target_dict:\r\n target_dict[key_] = EasyDict()\r\n if transfer_type is None:\r\n transfer_type = val_.get(\"_transfer_type_\", \"recursive\")\r\n # print(\"*********** \",transfer_type)\r\n\r\n if transfer_type == \"recursive\":\r\n transfer_missing_elements(target_dict[key_], val_, transfer_type)\r\n elif transfer_type == \"update\":\r\n target_dict[key_].update(val_)\r\n elif transfer_type == \"overwrite\":\r\n target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n # target_dict[key_] = val_\r\n\r\n elif key_ not in target_dict:\r\n target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n # target_dict[key_] = val_\r\n # else :\r\n # target_dict[key_] = val_\r\n # target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n\r\n\r\n # if isinstance(source_dict[key_],list) and isinstance(source_dict[key_][0],dict):\r\n # if key_ not in target_dict:\r\n # target_dict[key_] = []\r\n # for src_ in source_dict[key_]:\r\n # if not isinstance(src_,dict):\r\n # continue\r\n # match = False\r\n # for tar_ in target_dict[key_]:\r\n # # TODO make a list of bool with ID keys loaded from odb and check if any(matches):\r\n # if key_matches(\"pth_full\", src_, tar_) or key_matches(\"pth_alias\", src_, tar_) :\r\n # match = True\r\n # if not match:\r\n # temp = EasyDict()\r\n # target_dict[key_].append(temp)\r\n # transfer_missing_elements(temp, src_)\r", "def mergeWith(self, newFL):\n srcMods = self.srcMods\n for levls, newLevls in ((self.levcs,newFL.levcs),(self.levis,newFL.levis)):\n for listId, newLevl in newLevls.items():\n if listId not in srcMods: \n srcMods[listId] = [newFL.fileInfo.name]\n levl = levls[listId] = copy.deepcopy(newLevl)\n self.records.append(levl)\n else:\n srcMods[listId].append(newFL.fileInfo.name)\n levls[listId].mergeWith(newLevl)", "def add(self, other):\n\n def merge_dicts(d1, d2):\n \"\"\"\n Merge two dictionaries\n\n param d1: dictionary changed in place to have combined values\n type d1: dictionary(key -> set)\n param d2: dictioanry to be merged\n type d2: dictionary(key -> set)\n \"\"\"\n for key,value in d2.items():\n if key not in d1:\n d1[key] = value\n else:\n d1[key] |= value\n \n self.num_documents += other.num_documents\n self.num_expressions += other.num_expressions\n self.global_expressions += other.global_expressions\n self.expressions_with_e += other.expressions_with_e\n self.num_keywords += other.num_keywords\n merge_dicts(self.missing_tags, other.missing_tags)\n merge_dicts(self.problem_files, other.problem_files)", "def extend(self, other_rollout):\n\n assert not self.is_terminal()\n assert all(k in other_rollout.fields for k in self.fields)\n for k, v in other_rollout.data.items():\n self.data[k].extend(v)\n self.last_r = other_rollout.last_r" ]
[ "0.60299045", "0.5626615", "0.55989486", "0.55208635", "0.54832995", "0.54745245", "0.5468035", "0.54594445", "0.5416989", "0.54133993", "0.53944564", "0.5360663", "0.5277778", "0.5271018", "0.52541333", "0.5244835", "0.5185914", "0.518226", "0.5180149", "0.51750094", "0.5171218", "0.5156771", "0.5155819", "0.51528907", "0.5145961", "0.5141367", "0.51112574", "0.51023513", "0.50964636", "0.5067921", "0.5064287", "0.505925", "0.5057087", "0.5055498", "0.50506383", "0.50484616", "0.5046127", "0.5044431", "0.50399566", "0.50249225", "0.5022714", "0.5017102", "0.5014292", "0.50117606", "0.5003421", "0.50026613", "0.49966183", "0.49893728", "0.49813414", "0.49805868", "0.49730048", "0.4970015", "0.49678364", "0.49635708", "0.4960009", "0.4958939", "0.49501386", "0.49419808", "0.49388835", "0.49311534", "0.49293113", "0.4928486", "0.49264878", "0.49242815", "0.4921829", "0.49175835", "0.49108863", "0.48965535", "0.4896271", "0.4890504", "0.48883256", "0.4886281", "0.4872201", "0.48690856", "0.4867692", "0.48665535", "0.48660877", "0.4858043", "0.48436648", "0.48419997", "0.4839987", "0.48382923", "0.48313022", "0.48279047", "0.48185426", "0.48178884", "0.4813285", "0.48069173", "0.48068625", "0.4805273", "0.4800627", "0.47968727", "0.47960165", "0.47928083", "0.47907597", "0.47903785", "0.4789817", "0.47898117", "0.4788695", "0.47845078" ]
0.7400884
0
make a new struct with ncutoutsizedarrays based on the actual maximum ncutout
def _make_resized_data(self, odata): nmax = odata['file_id'].shape[1] new_nmax = odata['ncutout'].max() if new_nmax < 2: new_nmax = 2 temp_obj_data = odata nobj = temp_obj_data.size new_data = meds.util.get_meds_output_struct( nobj, new_nmax, extra_fields=self._get_fields(new_nmax), ) new_data = self._add_cat_fields(new_data, copy=False) for name in new_data.dtype.names: if name in temp_obj_data.dtype.names: shape = new_data[name].shape lshape = len(shape) if lshape > 1 and shape[1] == new_nmax: new_data[name][:,:] = temp_obj_data[name][:,0:new_nmax] else: new_data[name][:] = temp_obj_data[name][:] del temp_obj_data return new_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_maxcut_data_model():\n n = 5\n V = np.arange(0, n, 1)\n E = [(0, 1, 3.0), (1, 2, 2.0), (2, 3, 2.0), (3, 4, 3.0), (4, 0, 1.0), (0, 3, 3.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n return G", "def expanding_max_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = expanding_max_1d_nb(a[:, col], minp=minp)\n return out", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.phasedarray_sptr_max_noutput_items(self)", "def get_bins(size, n, max_value):\n bin_lims = get_bin_lims(n, max_value)\n return sort_by_rows(np.array(list(itertools.product(bin_lims, repeat=size))))", "def arrayManipulation_shortpeak(n, queries):\n a_s = []\n b_s = []\n k_s = []\n\n for i, row in enumerate(queries):\n a_s.append(row[0])\n b_s.append(row[1])\n k_s.append(row[2])\n\n # breakpoint()\n x = a_s + b_s\n all_indices = list(set(x))\n all_indices.sort()\n short_arr = [0] * len(all_indices)\n\n # mapping index of n-long array to index of shorter array\n index_lookup = {}\n for j, el in enumerate(all_indices):\n index_lookup[el] = j\n\n # breakpoint()\n for m in range(len(a_s)):\n short_arr[index_lookup[a_s[m]]] += k_s[m]\n short_arr[index_lookup[b_s[m]]] -= k_s[m]\n\n maxval = 0\n cumsum = 0\n for i, el in enumerate(short_arr):\n cumsum += el\n maxval = max(maxval, cumsum)\n\n print(f'{maxval: <15,d}: Max value')\n arr_size = short_arr.__sizeof__() / 1000000\n total = ((a_s.__sizeof__() / 1000000)\n + b_s.__sizeof__() / 1000000\n + k_s.__sizeof__() / 1000000\n + queries.__sizeof__() / 1000000\n + index_lookup.__sizeof__() / 1000000\n + short_arr.__sizeof__() / 1000000)\n print(f'{total: <15.2f}: All objects size(MB)')\n print(f'{arr_size: <15.2f}: Array size(MB)')\n return maxval, arr_size", "def process_lim(pool_lim, area):\n\n pool_nolim = [] # No limitation\n pool_lim_n = [] # N limitation\n pool_lim_p = [] # P limitation\n # Colimitation driven by N (When the realized NPP allocation is smaller\n # thant the potential due to N but the other element is also limitant)\n pool_colim_n = []\n # Colimitation driven by P (When the realized NPP allocation is smaller\n # than the potential due to P but the other element is also limitant\n pool_colim_p = []\n # Real Colimitation = K <= 1D-6 (K is difference between P and N realized NPP allocation)\n pool_colim_np = []\n\n ndays = pool_lim.shape[1]\n npls = pool_lim.shape[0]\n\n for pls in range(npls):\n if area[pls]:\n no_lim = (pool_lim[pls, :] == 0).sum() / ndays * area[pls]\n lim_n = (np.count_nonzero(\n pool_lim[pls, :] == 1) / ndays) * area[pls]\n lim_p = (np.count_nonzero(\n pool_lim[pls, :] == 2) / ndays) * area[pls]\n colim_n = (np.count_nonzero(\n pool_lim[pls, :] == 4) / ndays) * area[pls]\n colim_p = (np.count_nonzero(\n pool_lim[pls, :] == 5) / ndays) * area[pls]\n colim_np = (np.count_nonzero(\n pool_lim[pls, :] == 6) / ndays) * area[pls]\n\n pool_nolim.append(no_lim)\n pool_lim_n.append(lim_n)\n pool_lim_p.append(lim_p)\n pool_colim_n.append(colim_n)\n pool_colim_p.append(colim_p)\n pool_colim_np.append(colim_np)\n\n return (np.sum(pool_nolim),\n np.sum(pool_lim_n),\n np.sum(pool_lim_p),\n np.sum(pool_colim_n),\n np.sum(pool_colim_p),\n np.sum(pool_colim_np))", "def max_pooling(img):\n result_img = img.copy()\n heignt, width, _ = result_img.shape\n for h in range(0, heignt, 8):\n for w in range(0, width, 8):\n result_img[h:h+8, w:w+8, 0] = np.max(result_img[h:h+8, w:w+8, 0])\n result_img[h:h+8, w:w+8, 1] = np.max(result_img[h:h+8, w:w+8, 1])\n result_img[h:h+8, w:w+8, 2] = np.max(result_img[h:h+8, w:w+8, 2])\n result_img[(heignt//8)*8:heignt, :, :] = 0\n result_img[:, (width//8)*8:width, :] = 0\n return result_img", "def create_array( n ):", "def max_noutput_items(self):\n return _spacegrant_swig.invert_bit_sptr_max_noutput_items(self)", "def expanding_max_1d_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n maxv = a[0]\n cnt = 0\n for i in range(a.shape[0]):\n if np.isnan(maxv) or a[i] > maxv:\n maxv = a[i]\n if ~np.isnan(a[i]):\n cnt += 1\n if cnt < minp:\n out[i] = np.nan\n else:\n out[i] = maxv\n return out", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_max_noutput_items(self)", "def cut(self, max_lenght):\n self.V_estimates = self.V_estimates[:max_lenght]\n super().cut(max_lenght)", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]", "def set_max_noutput_items(self, m: \"int\") -> \"void\":\n return _beamforming_swig.phasedarray_sptr_set_max_noutput_items(self, m)", "def __init__(self, maxlen, dtype):\n self._start_index = np.int64(0)\n self._len = np.int64(0)\n self._maxlen = np.array(maxlen)\n initial_len = 10 if np.isinf(self._maxlen) else self._maxlen\n self._buffer = np.zeros(shape=(initial_len,), dtype=dtype)", "def _get_optimal_threshold(arr, num_bins=1001, num_quantized_bins=255):\n if not isinstance(arr, np.ndarray):\n raise TypeError('get_optimal_threshold only supports input type of np.ndarray,'\n ' while received type=%s' % (str(type(arr))))\n min_val = np.min(arr)\n max_val = np.max(arr)\n th = max(abs(min_val), abs(max_val))\n\n hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th))\n zero_bin_idx = num_bins // 2\n num_half_quantized_bins = num_quantized_bins // 2\n assert np.allclose(hist_edges[zero_bin_idx] + hist_edges[zero_bin_idx + 1],\n 0, rtol=1e-5, atol=1e-7)\n\n thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2)\n divergence = np.zeros_like(thresholds)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32)\n # i means the number of bins on half axis excluding the zero bin.\n for i in range(num_quantized_bins // 2,\n num_bins // 2 + 1):\n p_bin_idx_start = zero_bin_idx - i\n p_bin_idx_stop = zero_bin_idx + i + 1\n thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop]\n sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]\n\n # generate reference distribution p\n p = sliced_nd_hist.copy()\n assert p.size % 2 == 1\n assert p.size >= num_quantized_bins\n # put left outlier count in p[0]\n left_outlier_count = np.sum(hist[0:p_bin_idx_start])\n p[0] += left_outlier_count\n # put right outlier count in p[-1]\n right_outlier_count = np.sum(hist[p_bin_idx_stop:])\n p[-1] += right_outlier_count\n # is_nonzeros[k] indicates whether hist[k] is nonzero\n is_nonzeros = (sliced_nd_hist != 0).astype(np.int32)\n\n # calculate how many bins should be merged to generate quantized distribution q\n num_merged_bins = p.size // num_quantized_bins\n # merge hist into num_quantized_bins bins\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n stop = start + num_merged_bins\n quantized_bins[j] = sliced_nd_hist[start:stop].sum()\n quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()\n # expand quantized_bins into p.size bins\n q = np.zeros(p.size, dtype=np.float32)\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n if j == num_quantized_bins - 1:\n stop = -1\n else:\n stop = start + num_merged_bins\n norm = is_nonzeros[start:stop].sum()\n if norm != 0:\n q[start:stop] = float(quantized_bins[j]) / float(norm)\n q[sliced_nd_hist == 0] = 0\n p = _smooth_distribution(p)\n # There is a chance that q is an invalid probability distribution.\n try:\n q = _smooth_distribution(q)\n except ValueError:\n divergence[i - num_half_quantized_bins] = float(\"inf\")\n else:\n divergence[i - num_half_quantized_bins] = stats.entropy(p, q)\n quantized_bins[:] = 0\n\n min_divergence_idx = np.argmin(divergence)\n min_divergence = divergence[min_divergence_idx]\n opt_th = thresholds[min_divergence_idx]\n return min_val, max_val, min_divergence, opt_th", "def max_noutput_items(self):\n return _spacegrant_swig.general_burster_2_sptr_max_noutput_items(self)", "def test_compute_c_max_output():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=True)\n\n # test\n assert len(output) == 3\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False)\n\n # test\n assert len(output) == 2", "def max_cut(g):\n # Write your code here.\n return []", "def max_pool2d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('MAX', utils._pair, **locals())", "def max_pool3d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('MAX', utils._triple, **locals())", "def create_new_targets(window, data):\n new_data = np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=1, arr=(rolling_window(data.astype(int), 0, window)))\n\n return new_data", "def maxpool(input, filter_h, filter_w, stride_h, stride_w, padding, name):\n with tf.name_scope(name):\n mp = tf.nn.max_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1],\n padding=padding)\n # print(name + \" : \", str(mp.shape))\n return mp", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def max_noutput_items(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_max_noutput_items(self)", "def max_pool1d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('MAX', utils._single, **locals())", "def _resize(self, maxval):\n assert maxval >= self._N\n temp = [None for i in range(maxval)] # (Item[]) new [maxval] # Item[]\n q_len = len(self._q)\n for i in range(self._N):\n temp[i] = self._q[(self._first + i) % q_len]\n self._q = temp\n self._first = 0\n self._last = self._N", "def create_split_bounds(N, train_pct):\n train_len = int(round(train_pct * N))\n if ((N - train_len) % 2) != 0:\n train_len += 1\n\n # NOTE: We're assume the dev and test set are equal in length.\n test_len = dev_len = int((N - train_len) / 2)\n\n assert \"Not all data points are being used. Check create_split_bounds()\", \\\n (train_len + test_len + dev_len) == N\n\n return train_len, dev_len, test_len", "def non_max_suppression(inputs, n_classes, max_output_size, iou_threshold, confidence_threshold):\n batch = tf.unstack(inputs)\n boxes_dicts = []\n\n for boxes in batch:\n boxes = tf.boolean_mask(boxes, boxes[:, 4] > confidence_threshold)\n classes = tf.argmax(boxes[:, 5:], axis=-1)\n classes = tf.expand_dims(tf.cast(classes, tf.float32), axis=-1)\n boxes = tf.concat([boxes[:, :5], classes], axis=-1)\n\n boxes_dict = dict()\n for cls in range(n_classes):\n mask = tf.equal(boxes[:, 5], cls)\n mask_shape = mask.get_shape()\n if mask_shape.ndims != 0:\n class_boxes = tf.boolean_mask(boxes, mask)\n boxes_coords, boxes_conf_scores, _ = tf.split(class_boxes, [4, 1, -1], axis=-1)\n boxes_conf_scores = tf.reshape(boxes_conf_scores, [-1])\n indices = tf.image.non_max_suppression(boxes_coords,\n boxes_conf_scores,\n max_output_size,\n iou_threshold)\n class_boxes = tf.gather(class_boxes, indices)\n boxes_dict[cls] = class_boxes[:, :5]\n\n boxes_dicts.append(boxes_dict)\n return boxes_dicts", "def adaptive_max_pool3d(input, output_size):\n args = utils._get_adaptive_pool_args(\n input.size()[-3:], utils._triple(output_size))\n return _pool('MAX', utils._triple, input, **args)", "def pick_largest(self, cut_off):\r\n for i in range(self.dimension):\r\n m = self.masked[int(self.rank_yx(self.rank[i])[0]) # locating the corresponding mark array\r\n ,int(self.rank_yx(self.rank[i])[1])]\r\n if m * self.image_data[i] == self.image_data[i]:\r\n if self.image_data[i] <= cut_off:\r\n print(\"Surveying completed\")\r\n return -1,-1 # returns -1,-1 if scan is completed\r\n else:\r\n return self.image_data[i], np.array(self.rank[i])", "def __len__(self):\n return max(self.A_size, self.B50_size, self.B100_size, self.B150_size)", "def slice_sample_bounded_max(N, burn, logdist, xx, widths, step_out, max_attempts, bounds):\n xx = copy.deepcopy(xx)\n D = len(xx)\n samples = []\n if (not isinstance(widths, list)) or len(widths) == 1:\n widths = np.ones(D) * widths\n\n log_Px = logdist(xx)\n\n for ii in range(N + burn):\n log_uprime = np.log(random.random()) + log_Px\n for dd in random.sample(range(D), D):\n x_l = copy.deepcopy(xx)\n x_r = copy.deepcopy(xx)\n xprime = copy.deepcopy(xx)\n\n # Create a horizontal interval (x_l, x_r) enclosing xx\n rr = random.random()\n x_l[dd] = max(xx[dd] - rr*widths[dd], bounds[dd][0])\n x_r[dd] = min(xx[dd] + (1-rr)*widths[dd], bounds[dd][1])\n\n if step_out:\n while logdist(x_l) > log_uprime and x_l[dd] > bounds[dd][0]:\n\n x_l[dd] = max(x_l[dd] - widths[dd], bounds[dd][0])\n while logdist(x_r) > log_uprime and x_r[dd] < bounds[dd][1]:\n x_r[dd] = min(x_r[dd] + widths[dd], bounds[dd][1])\n\n # Propose xprimes and shrink interval until good one found\n zz = 0\n num_attempts = 0\n while True:\n zz += 1\n # print(x_l)\n xprime[dd] = random.random()*(x_r[dd] - x_l[dd]) + x_l[dd]\n \n log_Px = logdist(xx)\n if log_Px > log_uprime:\n xx[dd] = xprime[dd]\n break\n else:\n # Shrink in\n num_attempts += 1\n if num_attempts >= max_attempts:\n # print('Failed to find something')\n break\n elif xprime[dd] > xx[dd]:\n x_r[dd] = xprime[dd]\n elif xprime[dd] < xx[dd]:\n x_l[dd] = xprime[dd]\n else:\n raise Exception('Slice sampling failed to find an acceptable point')\n # Record samples\n if ii >= burn:\n samples.append(copy.deepcopy(xx))\n return samples", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.beamformer_sptr_max_noutput_items(self)", "def max_noutput_items(self):\n return _TestA_swig.my_qpsk_demod_cb_sptr_max_noutput_items(self)", "def max_pool(inputs):\n return tf.layers.max_pooling2d(\n inputs,\n pool_size=[2, 2],\n strides=[2, 2],\n padding='same',\n )", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.general_burster_2_sptr_set_max_noutput_items(self, m)", "def create_max_pool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "def max_noutput_items(self):\n return _TestA_swig.cleanslate_sptr_max_noutput_items(self)", "def over_classification_filter(self, in_metadata, max_classes=30):\n print('size before:', len(in_metadata))\n out_metadata = {}\n for img_id, img_dict in in_metadata.iteritems():\n if img_dict['n_labels'] <= max_classes:\n out_metadata[img_id] = img_dict\n\n print('size after:', len(out_metadata))\n return out_metadata", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.invert_bit_sptr_set_max_noutput_items(self, m)", "def set_max_noutput_items(self, m):\n return _add_vector_swig.add_vector_2_cpp_sptr_set_max_noutput_items(self, m)", "def build_convpool_max(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=3):\n convnets = []\n w_init = None\n # Build 7 parallel CNNs with shared weights\n for i in range(n_timewin):\n if i == 0:\n convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)\n else:\n convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)\n convnets.append(convnet)\n # convpooling using Max pooling over frames\n convpool = ElemwiseMergeLayer(convnets, theano.tensor.maximum)\n # A fully-connected layer of 512 units with 50% dropout on its inputs:\n convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),\n num_units=512, nonlinearity=lasagne.nonlinearities.rectify)\n # And, finally, the output layer with 50% dropout on its inputs:\n convpool = lasagne.layers.DenseLayer(lasagne.layers.dropout(convpool, p=.5),\n num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)\n return convpool", "def max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y", "def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass", "def _infer_dimension_(spectrum, n_samples, n_features):\n n_spectrum = len(spectrum)\n ll = np.empty(n_spectrum)\n for rank in range(n_spectrum):\n ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)\n return ll.argmax()", "def look_for_biggest_structure(game, chunk, imgs, hmap, nmax, type_):\n for n in range(nmax,0,-1):\n i = 0\n m = parameters.MAX_VILLAGE_WIDTH * n / parameters.MAX_VILLAGE_SIZE\n while i < parameters.VILLAGE_TRY:\n chunkpos = np.random.randint(0,parameters.S,2)\n cx,cy = chunkpos\n h = np.sum(hmap[cx:cx+m,cy:cy+m]) / (m*m)\n if h > parameters.VILLAGE_LEVEL:\n force_build_structure(game, imgs, chunk, chunkpos, n, type_)\n return n\n i += 1\n return 0", "def _max_pool(x):\n return tf.nn.max_pool(value=x,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.doaesprit_sptr_max_noutput_items(self)", "def filter_halo_pnum(data, Ncut=1000):\n npart = np.array(data['np'][0])\n ind =np.where(npart > Ncut)[0]\n print(\"# of halos:\",len(ind))\n return ind", "def max_noutput_items(self):\n return _spacegrant_swig.NRZI_sptr_max_noutput_items(self)", "def margin_sampling(predictions, number):\n maxes = []\n maxesBis = []\n tmp = []\n for i in range(0, predictions.shape[0]):\n maxes.append(np.max(predictions[i]))\n tmp.append(np.delete(predictions[i], np.where(predictions[i] == np.max(predictions[i]))[0]))\n maxesBis.append(np.max(tmp[i]))\n\n val = np.array(maxes) - np.array(maxesBis)\n\n return __get_min_indexes(val, number)", "def cutout(a):\n x = np.arange(a.shape[0])\n c = a.shape[0] / 2\n\n if len(a.shape) == 2:\n x = x.reshape(-1, 1)\n y = x.reshape(1, -1)\n zero = ((x-c)**2 + (y-c)**2) < c**2\n elif len(a.shape) == 3:\n x = x.reshape(-1, 1, 1)\n y = x.reshape(1, -1, 1)\n z = x.reshape(1, -1, 1)\n zero = ((x-c)**2 + (y-c)**2 + (z-c)**2) < c**2\n else:\n raise ValueError(\"Cutout array must have dimension 2 or 3!\")\n a *= zero\n return a", "def __init__(self, maxSize=3):\r\n try:\r\n if maxSize % 2 == 1 and maxSize >= 3:\r\n self._maxSize = maxSize\r\n else:\r\n raise ValueError(\"maxSize must be an odd integer >= 3\")\r\n except ValueError:\r\n raise\r\n self._data = np.ndarray(0)", "def get_arraymax_section_data(key_list, max_size=1300000000):\n result_list = []\n data_key_list = DATA_BLOCK_SPEC.keys()\n for key in key_list:\n if key in data_key_list:\n result_list.append(max_size)\n else:\n result_list.append(0)\n return np.array(result_list)", "def cov_seg_max(n, L):\n n_seg = 1\n while int(n) > n_seg:\n n_seg += 1\n return int(L/n_seg) # Samples within one segment ", "def get_num_bins(train_input: np.array, max_splits: int) -> List[int]:\n num_bins = [2 for _ in train_input[0]]\n max_bins = [len(set(column)) for column in train_input.T]\n entropies = [ExamDropExtractor.__entropy(np.expand_dims(column, axis = 1), 2) for column in train_input.T]\n options = PriorityQueue()\n\n for i, data in enumerate(train_input.T):\n if max_bins[i] > 2:\n data = np.expand_dims(data, axis = 1)\n new_entropy = ExamDropExtractor.__entropy(data, 3)\n options.put((-(new_entropy - entropies[i]), i))\n\n for _ in range(max_splits):\n if options.empty():\n break\n\n entropy, i = options.get()\n num_bins[i] = num_bins[i] + 1\n entropies[i] = entropies[i] - entropy\n if num_bins[i] != max_bins[i]:\n data = np.expand_dims(train_input[:, i], axis = 1)\n new_entropy = ExamDropExtractor.__entropy(data, num_bins[i] + 1)\n options.put((-(new_entropy - entropies[i]), i))\n\n return num_bins", "def max_noutput_items(self):\n return _spacegrant_swig.DeNRZI_sptr_max_noutput_items(self)", "def __reduce__(self):\n return ImageNetDownsample, (self.cutout,)", "def rolling_max_nb(a, window, minp=None):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = rolling_max_1d_nb(a[:, col], window, minp=minp)\n return out", "def max_noutput_items(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_max_noutput_items(self)", "def set_max_noutput_items(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr_set_max_noutput_items(self, *args, **kwargs)", "def __init__(self, size=(2, 2), **kwargs):\n super(MaxUnpooling2D, self).__init__(**kwargs)\n self.size = size", "def min_noutput_items(self) -> \"int\":\n return _beamforming_swig.phasedarray_sptr_min_noutput_items(self)", "def max_noutput_items(self):\n return _spacegrant_swig.binary_sink_sptr_max_noutput_items(self)", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.binary_sink_sptr_set_max_noutput_items(self, m)", "def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n estimates.append(np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id]))\n return self.bandits[np.argmax(estimates)]", "def post_processing(conf_thresh, nms_thresh, output):\n # anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]\n # num_anchors = 9\n # anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n # strides = [8, 16, 32]\n # anchor_step = len(anchors) // num_anchors\n\n # [batch, num, 1, 4]\n box_array = output[0]\n # [batch, num, num_classes]\n confs = output[1]\n\n if type(box_array).__name__ != \"ndarray\":\n box_array = box_array.cpu().detach().numpy()\n confs = confs.cpu().detach().numpy()\n\n num_classes = confs.shape[2]\n\n # [batch, num, 4]\n box_array = box_array[:, :, 0]\n\n # [batch, num, num_classes] --> [batch, num]\n max_conf = np.max(confs, axis=2)\n max_id = np.argmax(confs, axis=2)\n\n bboxes_batch = []\n for batch in range(box_array.shape[0]):\n\n argwhere = max_conf[batch] > conf_thresh\n l_box_array = box_array[batch, argwhere, :]\n l_max_conf = max_conf[batch, argwhere]\n l_max_id = max_id[batch, argwhere]\n\n bboxes = []\n # nms for each class\n for cls_id in range(num_classes):\n\n cls_argwhere = l_max_id == cls_id\n ll_box_array = l_box_array[cls_argwhere, :]\n ll_max_conf = l_max_conf[cls_argwhere]\n ll_max_id = l_max_id[cls_argwhere]\n\n keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)\n\n if keep.size > 0:\n ll_box_array = ll_box_array[keep, :]\n ll_max_conf = ll_max_conf[keep]\n ll_max_id = ll_max_id[keep]\n\n for box in range(ll_box_array.shape[0]):\n bboxes.append(\n [\n ll_box_array[box, 0],\n ll_box_array[box, 1],\n ll_box_array[box, 2],\n ll_box_array[box, 3],\n ll_max_conf[box],\n ll_max_conf[box],\n ll_max_id[box],\n ]\n )\n\n bboxes_batch.append(bboxes)\n\n return bboxes_batch", "def nanmax_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.nanmax(a[:, col])\n return out", "def nmax2t(cls, n_max: int, oversampling: float = 10.) -> np.ndarray:\n critical_res = 4 * SHT.nmax2nside(n_max) - 1\n return np.linspace(-1, 1, np.ceil(oversampling * critical_res).astype(int))", "def __init__(self, max_n):\n self._max_n = max_n\n self.__pq = [0] * (max_n + 1)\n self.__qp = [-1] * (max_n + 1)\n self.__keys = [None] * (max_n + 1)\n self.__n = 0", "def adaptive_max_pool2d(input, output_size):\n args = utils._get_adaptive_pool_args(\n input.size()[-2:], utils._pair(output_size))\n return _pool('MAX', utils._pair, input, **args)", "def set_max_noutput_items(self, m: \"int\") -> \"void\":\n return _beamforming_swig.randomsampler_sptr_set_max_noutput_items(self, m)", "def max_pool_2x2(x):\n#{{{\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "def contraction_max_algos():\n return cutensor.contractionMaxAlgos()", "def max_output_buffer(self, i):\n return _spacegrant_swig.general_burster_2_sptr_max_output_buffer(self, i)", "def __init__(self):\n self.counts = [0] * 10\n self.values = [0] * 10\n self.softmaxvalues = [0] * 10\n self.t = 0.3", "def compute(self, node, input_vals):\r\n #start = time.time()\r\n\r\n #assert len(input_vals) == 1\r\n strides = node.const_attr[1]\r\n ksize = node.const_attr[0]\r\n ish = list(input_vals[0].shape)\r\n input = input_vals[0]\r\n output = np.zeros([ish[0],(ish[1]-ksize[1])//strides[1]+1,(ish[2]-ksize[2])//strides[2]+1,ish[3]])\r\n osh = output.shape\r\n #print(osh)\r\n for i in range(osh[1]):\r\n for j in range(osh[2]):\r\n output[:,i,j,:] = np.amax(input[:,i*strides[1]:(i+1)*strides[1],j*strides[1]:(j+1)*strides[1],:],axis=(1,2))\r\n #end = time.time() \r\n #print(\"max_pool\") \r\n #print(end - start) \r\n return output\r\n \r\n #assert False\r", "def test_max_N_too_small(self):\n\t\t\n\t\t\n\t\tparams = DEFAULT_PARAMS.copy()\n\t\tparams[MAX_N] = DEFAULT_MAX_EVALS+1\n\t\t\n\t\titerator = self.watcher.make_layer_iterator(model=self.model, params=params)\n\t\tfor ww_layer in iterator:\n\t\t\tif ww_layer.N > params[MAX_N]:\n\t\t\t\tself.assertTrue(ww_layer.skipped)\n\t\t\n\t\tdetails = self.watcher.describe(max_N=DEFAULT_MAX_EVALS+1)\n\t\tprint(details[['N','M']])\n\t\tself.assertEqual(10,len(details))\n\n\t\treturn", "def constraints_max_offer_per_cust(n_row, n_col):\n constraints = np.identity(n_row * n_col)\n return constraints", "def non_maximum_suppression(prediction, iou_threshold=0.45, score_threshold=0.25):\n\n # num_classes = len(names)\n max_wh = 4096\n max_det = 300\n max_nms = 30000\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n\n for xi, x in enumerate(prediction):\n x = x[x[..., 4] > score_threshold]\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = x[:, :4]\n\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > score_threshold]\n\n # Filter by class\n # if classes is not None:\n # x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n # sort by confidence\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n\n # Batched NMS\n c = x[:, 5:6] * max_wh # classes\n # boxes (offset by class), scores\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = nms(boxes, scores, iou_threshold) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n\n return output", "def max_noutput_items(self):\n return _spacegrant_swig.hdlc_deframer_sptr_max_noutput_items(self)", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.NRZI_sptr_set_max_noutput_items(self, m)", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.DeNRZI_sptr_set_max_noutput_items(self, m)", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.G3RUH_descramble_sptr_set_max_noutput_items(self, m)", "def MaxHks(N): \n return np.log2(N-1)/2", "def __init__(self, max_len, max_num):\n self.a = {}\n self.a[0] = [[]]\n self.a[1] = [[1]]\n\n self.max_len = max_len\n self.max_num = max_num", "def nms(boxes, scores, overlap=0.5, top_k=200):\n\n keep = torch.Tensor(scores.size(0)).fill_(0).long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count", "def set_max_noutput_items(self, *args, **kwargs):\n return _TestA_swig.cleanslate_sptr_set_max_noutput_items(self, *args, **kwargs)", "def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=10):\n # TOP_K was originally -1, to keep all faces, but trying to filter\n # CANDIDATE_SIZE was originally 200, trying to limit # of faces\n scores = box_scores[:, -1]\n boxes = box_scores[:, :-1]\n picked = []\n indexes = np.argsort(scores)\n indexes = indexes[-candidate_size:]\n while len(indexes) > 0:\n current = indexes[-1]\n picked.append(current)\n if 0 < top_k == len(picked) or len(indexes) == 1:\n break\n current_box = boxes[current, :]\n\n indexes = indexes[:-1]\n rest_boxes = boxes[indexes, :]\n iou = iou_of(\n rest_boxes,\n np.expand_dims(current_box, axis=0),\n )\n indexes = indexes[iou <= iou_threshold]\n \n # additional method of discrimination, only the boxes\n # with the largest areas are selected\n new_boxes = box_scores[picked, :]\n areas = []\n for box in new_boxes:\n left_top = np.asarray([box[0], box[1]])\n right_bottom = np.asarray([box[2], box[3]])\n area = area_of(left_top, right_bottom)\n areas.append(area)\n areas = np.asarray(areas)\n biggest = np.argsort(areas)\n last_index = len(biggest) - 1\n middle = max(len(biggest)// 2, 1)\n size = min(middle, candidate_size / 2)\n \n final_boxes = []\n for i in range(size):\n final_boxes.append(new_boxes[biggest[last_index-i]])\n final_boxes = np.asarray(final_boxes)\n \n return final_boxes\n #return box_scores[picked, :]", "def max_output_buffer(self, i: \"int\") -> \"long\":\n return _beamforming_swig.phasedarray_sptr_max_output_buffer(self, i)", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.hdlc_deframer_sptr_set_max_noutput_items(self, m)", "def set_max_noutput_items(self, m: \"int\") -> \"void\":\n return _beamforming_swig.beamformer_sptr_set_max_noutput_items(self, m)", "def max_level(data: np.ndarray) -> int:\n shape = data.shape[1:] # exclude channel dimension\n return min(shape).bit_length() - 1", "def n_largest_channels(summed, n=6):\r\n\r\n rows, columns = summed.shape\r\n\r\n # n has to be smaller than the number of rows\r\n assert n <= rows\r\n\r\n n_out_of_m = np.zeros(summed.shape)\r\n\r\n for col in range(columns):\r\n column = summed[:, col]\r\n indices = np.argsort(column)[:-n]\r\n tmp = column\r\n tmp[indices] = 0\r\n n_out_of_m[:, col] = tmp\r\n\r\n return n_out_of_m", "def __init__(self, max_num_of_rounds_to_retain=100, num_of_last_check_rounds_consider=2):\n self.data = list()\n self.max_num_of_rounds_to_retain = max_num_of_rounds_to_retain\n self.num_of_last_check_rounds_consider = num_of_last_check_rounds_consider", "def nms(boxes, scores, overlap=0.5, top_k=200):\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count", "def max_output_buffer(self, i):\n return _add_vector_swig.add_vector_2_cpp_sptr_max_output_buffer(self, i)", "def set_max_noutput_items(self, m):\n return _spacegrant_swig.hdlc_framer_sptr_set_max_noutput_items(self, m)" ]
[ "0.5717705", "0.5661527", "0.5603735", "0.5556727", "0.5533146", "0.54946077", "0.54890066", "0.5484546", "0.54764545", "0.5465916", "0.54387826", "0.54167676", "0.5408915", "0.5392852", "0.5391776", "0.53902745", "0.5386068", "0.53750616", "0.5368087", "0.5362609", "0.5357239", "0.53557456", "0.53500104", "0.5347422", "0.5346802", "0.53459895", "0.5344874", "0.53320915", "0.53192997", "0.531634", "0.531553", "0.5312746", "0.5309329", "0.5303385", "0.53004473", "0.5283153", "0.52820146", "0.5268377", "0.52659464", "0.52625155", "0.52608544", "0.5254505", "0.5253804", "0.52489066", "0.52300704", "0.52268285", "0.52228117", "0.5220608", "0.5216667", "0.5216445", "0.52157915", "0.52139103", "0.52054644", "0.52045923", "0.52042705", "0.5201294", "0.5181019", "0.5180875", "0.51801574", "0.51800007", "0.51779133", "0.51764745", "0.51695186", "0.51625425", "0.5158056", "0.51523143", "0.5150153", "0.51498204", "0.514667", "0.5145773", "0.51413447", "0.51408887", "0.5130253", "0.51254153", "0.51242703", "0.5122433", "0.51210934", "0.5105053", "0.5104855", "0.5104497", "0.5095511", "0.5094798", "0.50944513", "0.50830334", "0.5082763", "0.50778705", "0.5074727", "0.5071113", "0.5070279", "0.5069339", "0.5065598", "0.50636744", "0.50618577", "0.50573176", "0.5055059", "0.5045263", "0.50441986", "0.50359535", "0.5035791", "0.5029401" ]
0.58375233
0