hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
304b8b9594f4e1bb2aaa5b3d57cf13f6031133c3 | 480 | py | Python | mdl_scrapper/models/Search.py | dragneelfps/mdl_scrapper | cf9f8892d36e82713ee24b9727a728cf7a63faac | [
"MIT"
] | 1 | 2020-04-30T13:27:43.000Z | 2020-04-30T13:27:43.000Z | mdl_scrapper/models/Search.py | dragneelfps/mdl_scrapper | cf9f8892d36e82713ee24b9727a728cf7a63faac | [
"MIT"
] | null | null | null | mdl_scrapper/models/Search.py | dragneelfps/mdl_scrapper | cf9f8892d36e82713ee24b9727a728cf7a63faac | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional
@dataclass
class SearchDrama:
id: int
title: str
cover_url: str
ranking: Optional[int]
score: Optional[float]
description: str
@dataclass
class SearchResult:
url: str
dramas: List[SearchDrama]
timestamp: datetime = datetime.now()
def is_empty(self):
return self.count() == 0
def count(self):
return len(self.dramas)
| 17.777778 | 40 | 0.68125 |
77156a00d78d9568e8209ccfa2e1293a4d6611e8 | 5,281 | py | Python | scripts/add_comp_self.py | shaunstoltz/kunlun | 3974857ff88418db0ccc30284f67438d590546d1 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | scripts/add_comp_self.py | shaunstoltz/kunlun | 3974857ff88418db0ccc30284f67438d590546d1 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | scripts/add_comp_self.py | shaunstoltz/kunlun | 3974857ff88418db0ccc30284f67438d590546d1 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 ZettaDB inc. All rights reserved.
# This source code is licensed under Apache 2.0 License,
# combined with Common Clause Condition 1.0, as detailed in the NOTICE file.
# add one or more computing nodes
import os
import os.path
import mysql.connector
import argparse
import json
import common
import socket
import add_comp_nodes
import install_pg
import sys
import psycopg2
# config file format:
#
#[
# {
# "id":1,
# "name":"comp1",
# "ip":"127.0.0.1",
# "port":5431,
# "user":"abc",
# "password":"abc"
# "datadir":"/data/pg_data_dir1"
# }
#]
def gethostip():
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
return ip
def checkserver(sip, sport, suser, spass, sdb):
conn = psycopg2.connect(host=sip, port=sport, user=suser, database=sdb, password=spass)
conn.close()
return conn
def add_comp_self(install_path, config_template_file, mysql_conn_params, config_path, args):
selfip = args.hostname
meta_conn = mysql.connector.connect(**mysql_conn_params)
meta_cursor = meta_conn.cursor()
meta_cursor.execute("start transaction")
stmt = "insert into comp_nodes_id_seq values();"
meta_cursor.execute(stmt)
stmt = "select last_insert_id();"
meta_cursor.execute(stmt)
row = meta_cursor.fetchone()
meta_cursor.execute("commit")
maxid = 1
if row is not None and row[0] is not None:
maxid = int(row[0])
meta_cursor.close()
meta_conn.close()
selfobj = {"id" : maxid,
"name" : "comp" + str(maxid),
"ip" : selfip,
"port" : args.port,
"user": args.user,
"password": args.password,
"datadir" : args.datadir
}
selfarr = [selfobj]
outf = open(config_path, "w")
json.dump(selfarr, outf, indent=4)
outf.close()
if args.install:
if args.docker:
# install is not performed here currently, since the meta_config file needs to
os.system("chmod a+rwx /kunlun/env.sh")
os.system("chown -R postgres:postgres /pgdatadir")
os.system("su postgres -c 'cd /kunlun && . ./env.sh; cd $PG_DIR/scripts; python2 install_pg.py --config=./%s --install_ids=%d' " % (config_path, maxid))
else:
install_pg.install_pg(config_template_file, install_path, selfobj)
conn = checkserver(selfip, args.port, args.user, args.password, 'postgres')
if conn is None:
raise Exception("Computing server is not installed correctly, please check the installation!")
add_comp_nodes.add_computing_nodes(mysql_conn_params, args, config_path, [maxid], False)
sys.stdout.flush()
sys.stderr.flush()
# Reset the comp_node_id. It should be removed when comp_node_id is removed from postgresql.conf
if not args.install:
cmd0 = "export PATH=" + install_path + "/bin:$PATH;"
cmd1 = "export LD_LIBRARY_PATH=" + install_path + "/lib:$LD_LIBRARY_PATH;"
if args.docker:
os.system("sed -i 's/comp_node_id.*=.*/comp_node_id=%d/g' %s/postgresql.conf" % (maxid, args.datadir))
os.system("su postgres -c 'cd /kunlun && . ./env.sh && pg_ctl -D %s stop -m immediate' " % args.datadir)
os.system("su postgres -c 'cd /kunlun && . ./env.sh && cd $PG_DIR/scripts && python2 start_pg.py port=%d' " % args.port)
else:
os.system("sed -i 's/comp_node_id.*=.*/comp_node_id=%d/g' %s/postgresql.conf" % (maxid, args.datadir))
os.system(cmd0 + cmd1 + "pg_ctl -D %s stop -m immediate " % args.datadir)
# start_pg.py set the env well.
os.system("python2 start_pg.py port=%d " % args.port)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add current computing node to the cluster.')
parser.add_argument('--meta_config', type=str, help="metadata cluster config file path")
parser.add_argument('--cluster_name', type=str, help = "The cluster name")
parser.add_argument('--user', type=str, help="The user name")
parser.add_argument('--password', type=str, help="The password")
parser.add_argument('--hostname', type=str, help="The hostname", default=gethostip())
parser.add_argument('--port', type=int, help="The port", default=5432)
parser.add_argument('--datadir', type=str, help="The data directory", default='/pgdatadir')
parser.add_argument('--install', help="install it first", default=False, action='store_true')
parser.add_argument('--docker', help="process is in docker container", default=False, action='store_true')
parser.add_argument('--ha_mode', type=str, default='mgr', choices=['mgr','no_rep'])
args = parser.parse_args()
meta_jsconf = open(args.meta_config)
meta_jstr = meta_jsconf.read()
meta_jscfg = json.loads(meta_jstr)
install_path = os.path.dirname(os.getcwd())
config_template_file = install_path + "/resources/postgresql.conf"
mysql_conn_params = {}
mysql_conn_params = common.mysql_shard_check(meta_jscfg, args.ha_mode)
mysql_conn_params['database'] = 'Kunlun_Metadata_DB'
add_comp_self(install_path, config_template_file, mysql_conn_params, "self.json", args)
print "Current computing node successfully added to cluster " + args.cluster_name
| 40.623077 | 164 | 0.666162 |
9cdce11ca44b41ac1ae1ca8acca27c66d63d769b | 1,393 | py | Python | snacks/test/test_nutriment.py | NicolasFlandrois/Pure-Beurre | b64db344e3eabed8b123a6127fe0d038da53ff6e | [
"MIT"
] | null | null | null | snacks/test/test_nutriment.py | NicolasFlandrois/Pure-Beurre | b64db344e3eabed8b123a6127fe0d038da53ff6e | [
"MIT"
] | 7 | 2020-02-12T03:27:56.000Z | 2022-03-12T00:12:09.000Z | snacks/test/test_nutriment.py | NicolasFlandrois/PurBeurre-LinuxDeploy | de0a6677647fd6df5f4856dc6ac42275dae6aff4 | [
"MIT"
] | 2 | 2020-01-17T11:23:27.000Z | 2021-02-15T10:54:19.000Z | #!/usr/bin/python3.7
# UTF8
# Date:
# Author: Nicolas Flandrois
import json
import urllib.request
from io import BytesIO
from snacks.nutriment import nutriments
# Mock Testing Json API Open Food Facts
def test_nutriments(monkeypatch):
results = {"product":
{"nutriments": {
"sugars_unit": "g",
"energy_100g": 1660,
"proteins_100g": 8.97,
"saturated-fat_unit": "g",
"saturated-fat_100g": 1.28,
"sodium_100g": 0.819,
"proteins_unit": "g",
"salt_100g": 2.05,
"nutrition-score-fr_100g": 16,
"fiber_unit": "g",
"fiber_100g": 0,
"energy_unit": "kcal",
"sodium_unit": "g",
"fat_unit": "g",
"salt_unit": "g",
"sugars_100g": 12.8,
"fat_100g": 8.97,
"carbohydrates_100g": 70.5
},
},
}
def mockreturn(request):
return BytesIO(json.dumps(results, sort_keys=True, indent=4,
separators=(',', ': ')).encode())
monkeypatch.setattr(urllib.request, 'urlopen', mockreturn)
test_ean = '1234567890123'
assert results == nutriments(test_ean)
| 28.428571 | 68 | 0.470208 |
d5228f3d01182dd44e6d2a8a4e01af9a2e060943 | 11,918 | py | Python | posthog/api/dashboard.py | alx-a/posthog | a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e | [
"MIT"
] | null | null | null | posthog/api/dashboard.py | alx-a/posthog | a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e | [
"MIT"
] | null | null | null | posthog/api/dashboard.py | alx-a/posthog | a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e | [
"MIT"
] | null | null | null | import secrets
from typing import Any, Dict, Optional
import posthoganalytics
from django.db.models import Model, Prefetch, QuerySet
from django.db.models.query_utils import Q
from django.http import HttpRequest
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.views.decorators.clickjacking import xframe_options_exempt
from rest_framework import authentication, response, serializers, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import AuthenticationFailed, NotFound
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from posthog.api.routing import StructuredViewSetMixin
from posthog.api.shared import UserBasicSerializer
from posthog.auth import PersonalAPIKeyAuthentication, PublicTokenAuthentication
from posthog.helpers import create_dashboard_from_template
from posthog.models import Dashboard, DashboardItem, Team
from posthog.permissions import ProjectMembershipNecessaryPermissions
from posthog.tasks.update_cache import update_dashboard_item_cache, update_dashboard_items_cache
from posthog.utils import get_safe_cache, render_template, str_to_bool
class DashboardSerializer(serializers.ModelSerializer):
items = serializers.SerializerMethodField()
created_by = UserBasicSerializer(read_only=True)
use_template = serializers.CharField(write_only=True, allow_blank=True, required=False)
class Meta:
model = Dashboard
fields = [
"id",
"name",
"description",
"pinned",
"items",
"created_at",
"created_by",
"is_shared",
"share_token",
"deleted",
"creation_mode",
"use_template",
"filters",
"tags",
]
read_only_fields = ("creation_mode",)
def create(self, validated_data: Dict, *args: Any, **kwargs: Any) -> Dashboard:
request = self.context["request"]
validated_data["created_by"] = request.user
team = Team.objects.get(id=self.context["team_id"])
use_template: str = validated_data.pop("use_template", None)
creation_mode = "template" if use_template else "default"
dashboard = Dashboard.objects.create(team=team, creation_mode=creation_mode, **validated_data)
if use_template:
try:
create_dashboard_from_template(use_template, dashboard)
except AttributeError:
raise serializers.ValidationError({"use_template": "Invalid value provided."})
elif request.data.get("items"):
for item in request.data["items"]:
DashboardItem.objects.create(
**{key: value for key, value in item.items() if key not in ("id", "deleted", "dashboard", "team")},
dashboard=dashboard,
team=team,
)
posthoganalytics.capture(
request.user.distinct_id,
"dashboard created",
{**dashboard.get_analytics_metadata(), "from_template": bool(use_template), "template_key": use_template},
)
return dashboard
def update(self, instance: Dashboard, validated_data: Dict, *args: Any, **kwargs: Any,) -> Dashboard:
validated_data.pop("use_template", None) # Remove attribute if present
if validated_data.get("is_shared") and not instance.share_token:
instance.share_token = secrets.token_urlsafe(22)
instance = super().update(instance, validated_data)
if "request" in self.context:
posthoganalytics.capture(
self.context["request"].user.distinct_id, "dashboard updated", instance.get_analytics_metadata()
)
return instance
def get_items(self, dashboard: Dashboard):
if self.context["view"].action == "list":
return None
if self.context["request"].GET.get("refresh"):
update_dashboard_items_cache(dashboard)
items = dashboard.items.filter(deleted=False).order_by("order").all()
self.context.update({"dashboard": dashboard})
return DashboardItemSerializer(items, many=True, context=self.context).data
class DashboardsViewSet(StructuredViewSetMixin, viewsets.ModelViewSet):
legacy_team_compatibility = True # to be moved to a separate Legacy*ViewSet Class
queryset = Dashboard.objects.all()
serializer_class = DashboardSerializer
authentication_classes = [
PublicTokenAuthentication,
PersonalAPIKeyAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication,
]
# Empty list means we can allow users to not be authenticated.
permission_classes = [] # type: ignore
def get_queryset(self) -> QuerySet:
queryset = super().get_queryset().order_by("name")
if self.action == "list":
queryset = queryset.filter(deleted=False)
queryset = queryset.prefetch_related(
Prefetch("items", queryset=DashboardItem.objects.filter(deleted=False).order_by("order"),)
)
if self.request.GET.get("share_token"):
return queryset.filter(share_token=self.request.GET["share_token"])
elif self.request.user.is_authenticated and not self.request.user.team:
raise NotFound()
elif not self.request.user.is_authenticated or "team_id" not in self.get_parents_query_dict():
raise AuthenticationFailed(detail="You're not logged in, but also not using add share_token.")
return queryset
def retrieve(self, request: Request, *args: Any, **kwargs: Any) -> response.Response:
pk = kwargs["pk"]
queryset = self.get_queryset()
dashboard = get_object_or_404(queryset, pk=pk)
dashboard.last_accessed_at = now()
dashboard.save()
serializer = DashboardSerializer(dashboard, context={"view": self, "request": request})
return response.Response(serializer.data)
def get_parents_query_dict(self) -> Dict[str, Any]: # to be moved to a separate Legacy*ViewSet Class
if not self.request.user.is_authenticated or "share_token" in self.request.GET or not self.request.user.team:
return {}
return {"team_id": self.request.user.team.id}
class DashboardItemSerializer(serializers.ModelSerializer):
result = serializers.SerializerMethodField()
last_refresh = serializers.SerializerMethodField()
_get_result: Optional[Dict[str, Any]] = None
class Meta:
model = DashboardItem
fields = [
"id",
"short_id",
"name",
"description",
"filters",
"filters_hash",
"order",
"deleted",
"dashboard",
"layouts",
"color",
"last_refresh",
"refreshing",
"result",
"is_sample",
"saved",
"created_at",
"created_by",
]
def create(self, validated_data: Dict, *args: Any, **kwargs: Any) -> DashboardItem:
request = self.context["request"]
team = Team.objects.get(id=self.context["team_id"])
validated_data.pop("last_refresh", None) # last_refresh sometimes gets sent if dashboard_item is duplicated
if not validated_data.get("dashboard", None):
dashboard_item = DashboardItem.objects.create(team=team, created_by=request.user, **validated_data)
return dashboard_item
elif validated_data["dashboard"].team == team:
created_by = validated_data.pop("created_by", request.user)
dashboard_item = DashboardItem.objects.create(
team=team, last_refresh=now(), created_by=created_by, **validated_data
)
return dashboard_item
else:
raise serializers.ValidationError("Dashboard not found")
def update(self, instance: Model, validated_data: Dict, **kwargs) -> DashboardItem:
# Remove is_sample if it's set as user has altered the sample configuration
validated_data.setdefault("is_sample", False)
return super().update(instance, validated_data)
def get_result(self, dashboard_item: DashboardItem):
# If it's more than a day old, don't return anything
if dashboard_item.last_refresh and (now() - dashboard_item.last_refresh).days > 0:
return None
if not dashboard_item.filters_hash:
return None
if self.context["request"].GET.get("refresh"):
update_dashboard_item_cache(dashboard_item, None)
result = get_safe_cache(dashboard_item.filters_hash)
if not result or result.get("task_id", None):
return None
return result.get("result")
def get_last_refresh(self, dashboard_item: DashboardItem):
if self.get_result(dashboard_item):
return dashboard_item.last_refresh
dashboard_item.last_refresh = None
dashboard_item.save()
return None
def to_representation(self, instance):
representation = super().to_representation(instance)
representation["filters"] = instance.dashboard_filters(dashboard=self.context.get("dashboard"))
return representation
class DashboardItemsViewSet(StructuredViewSetMixin, viewsets.ModelViewSet):
legacy_team_compatibility = True # to be moved to a separate Legacy*ViewSet Class
queryset = DashboardItem.objects.all()
serializer_class = DashboardItemSerializer
permission_classes = [IsAuthenticated, ProjectMembershipNecessaryPermissions]
def get_queryset(self) -> QuerySet:
queryset = super().get_queryset()
if self.action == "list":
queryset = queryset.filter(deleted=False)
queryset = self._filter_request(self.request, queryset)
order = self.request.GET.get("order", None)
if order:
queryset = queryset.order_by(order)
else:
queryset = queryset.order_by("order")
return queryset
def _filter_request(self, request: Request, queryset: QuerySet) -> QuerySet:
filters = request.GET.dict()
for key in filters:
if key == "saved":
if str_to_bool(request.GET["saved"]):
queryset = queryset.filter(Q(saved=True) | Q(dashboard__isnull=False))
else:
queryset = queryset.filter(Q(saved=False))
elif key == "user":
queryset = queryset.filter(created_by=request.user)
elif key == "insight":
queryset = queryset.filter(filters__insight=request.GET["insight"])
return queryset
@action(methods=["patch"], detail=False)
def layouts(self, request, **kwargs):
team_id = self.team_id
for data in request.data["items"]:
self.queryset.filter(team_id=team_id, pk=data["id"]).update(layouts=data["layouts"])
serializer = self.get_serializer(self.queryset.filter(team_id=team_id), many=True)
return response.Response(serializer.data)
def retrieve(self, request: Request, *args: Any, **kwargs: Any) -> response.Response:
pk = kwargs["pk"]
queryset = self.get_queryset()
dashboard_item = get_object_or_404(queryset, pk=pk)
serializer = self.get_serializer(dashboard_item, context={"view": self, "request": request})
return response.Response(serializer.data)
@xframe_options_exempt
def shared_dashboard(request: HttpRequest, share_token: str):
dashboard = get_object_or_404(Dashboard, is_shared=True, share_token=share_token)
return render_template(
"shared_dashboard.html", request=request, context={"dashboard": dashboard, "team_name": dashboard.team.name},
)
| 40.675768 | 119 | 0.663031 |
d707507a091303ce0392fb98cf16d6d15cc75e07 | 13,428 | py | Python | wavepal/timefreq_analysis_prelims.py | metegenez/WAVEPAL | fa2bb91e2c7e63681ae4592929215c96bc523597 | [
"MIT"
] | 22 | 2017-03-09T20:46:31.000Z | 2022-02-16T08:18:30.000Z | wavepal/timefreq_analysis_prelims.py | metegenez/WAVEPAL | fa2bb91e2c7e63681ae4592929215c96bc523597 | [
"MIT"
] | 6 | 2017-03-19T20:22:33.000Z | 2021-01-05T11:41:31.000Z | wavepal/timefreq_analysis_prelims.py | metegenez/WAVEPAL | fa2bb91e2c7e63681ae4592929215c96bc523597 | [
"MIT"
] | 9 | 2017-11-06T18:00:31.000Z | 2021-07-21T19:20:09.000Z | import numpy as np
from tqdm import trange
from dt_central import dt_central
import copy
def timefreq_analysis_prelims(time,tau,scale,w0,gauss_spread,eps,dt_GCD,shannonnyquistexclusionzone,weighted_CWT,smoothing_coeff,smoothing_type):
""" timefreq_analysis_prelims returns some variables for the time-frequency analysis in 'Wavepal' class.
Inputs:
- time [1-dim numpy array of floats]: the times of the time series, distinct and in ascending order.
- tau [1-dim numpy array of floats]: the times at which the CWT is to be computed, distinct and in ascending order.
- scale [1-dim numpy array of floats]: the scales, distinct and in ascending order.
- w0 [float]: the usual parameter for the Morlet wavelet controlling the time-frequency resolution. Minimal allowed value is w0=5.5.
- gauss_spread [float]: parameter for the spread of gaussian. 2*gauss_spread*std (where std is the standard dev. of the gaussian) is the approximate SUPPORT (i.e. where the function is not zero) of a gaussian. Typical values are gauss_spread=3.0 (conservative choice) or sqrt(2.0) (value taken in Torrence and Compo, 1998, and some subsequent papers). This is used for the computation of the cone of influence and for the max. allowed scale.
- eps [float]: parameter controlling the flexibility on the value of the border of the Shannon-Nyquist exclusion zone. Typical value is eps=1.0e-06.
- dt_GCD [float]: the greatest common divisor of the time steps of the times in 'time'.
- shannonnyquistexclusionzone [str - value=True or False]: activate or not the Shannon-Nyquist exclusion zone.
- weighted_CWT [str - value=True or False]: True if weighted scalogram, or False if not.
- smoothing_coeff [float]: smoothing the CWT is performed this way: at a given (tau,scale), the CWT_smoothed is the average of the CWT along neighbouring values of tau (at the same scale). The interval of those neighbouring values of tau is delimited by: tau-smoothing_coeff*std and tau+smoothing_coeff*std, where std is the standard deviation of the gaussian. Note that:
-> the std depends on the scale.
-> Near the edges of the time series or close the the Shannon-Nyquist exclusion zone, the full interval over tau-smoothing_coeff*std to tau+smoothing_coeff*std cannot be considered. In such case, the CWT_smoothed at (tau,scale) is either ignored (if smoothing_type='fixed') or the interval around tau is shortened (if smoothing_type='variable').
- smoothing_type [str - value='fixed' or 'variable']: See above the explanations for 'smoothing_coeff'.
Outputs:
- scale [1-dim numpy array of floats]: the scales on which the analysis is to be performed.
- coi1 [1-dim numpy array of floats - size=scale.size]: cone of influence - left part
- coi2 [1-dim numpy array of floats - size=scale.size]: cone of influence - right part
- coi1_smooth [1-dim numpy array of floats - size=scale.size]: border of the forbidden zone on the left side. Only used if smoothing_type='fixed'.
- coi2_smooth [1-dim numpy array of floats - size=scale.size]: border of the forbidden zone on the right side. Only used if smoothing_type='fixed'.
- coi_smooth_ind [1-dim numpy array of ints - size=tau.size]: indices, of the vector 'tau', corresponding to coi1_smooth and coi2_smooth.
- weight_cwt [numpy array of floats - dim=(tau.size,scale.size)]: the weights for the weighted scalogram.
- scalelim1_ind [1-dim numpy array of ints - size=tau.size]: scale indices at the border of the Shannon-Nyquist exclusion zone. For a given tau, this is the first scale index for which the CWT is to be computed.
- scalelim1_smooth [1-dim numpy array of floats - size=tau.size]: scales at the border of the Shannon-Nyquist exclusion zone when there is smoothing. N.B.: if smoothing_type='fixed' and if there is smoothing (smoothing_coeff>0), the Shannon zone is refined.
- scalelim1_ind_smooth [1-dim numpy array of ints - size=tau.size]: indices of the scales in scalelim1_smooth, plus 1. For a given tau, this is the first scale index for which the CWT_smoothed is drawn.
- Qmax [int]: maximum length over which smoothing is to be performed, expressed in number of indices of tau.
- n_outside_scalelim1 [1-dim numpy array of ints - size=scale.size]: number of tau's outside the Shannon-Nyquist exclusion zone, for each scale.
-----------------------------
This is part of WAVEPAL
(C) 2016 G. Lenoir"""
N=time.size
J=scale.size
Q=tau.size
# Cone of influence
coi1=time[0]+gauss_spread*w0*scale
coi2=time[-1]-gauss_spread*w0*scale
# Central time step
dt_centr=dt_central(time)
# Normal time step and timemid
dt=np.zeros(N-1)
timemid=np.zeros(N-1)
for k in range(N-1):
dt[k]=time[k+1]-time[k]
timemid[k]=(time[k]+time[k+1])/2.
# Weights for the CWT squared norm and Shannon-Nyquist exclusion zone
print "Weights for the scalogram and Shannon-Nyquist exclusion zone:"
dcon=1./2./w0**2
scalelim1=np.ones(Q)*dt_GCD/np.pi*(1.+eps)
scalelim1_ind=np.zeros(Q,dtype=int)
scalelim1_ind_ghost=np.zeros(Q,dtype=int)
weight_cwt=-np.ones((Q,J))
if shannonnyquistexclusionzone is True and weighted_CWT is True:
for k in trange(Q):
tau_k=tau[k]
time_ind0=0
time_ind1=N-1
for l in range(J-1,-1,-1):
scale_l=scale[l]
time_ind0=np.argmin(np.absolute(time[time_ind0:]-tau_k+3.*w0*scale_l))+time_ind0
time_ind1=np.argmin(np.absolute(time[:(time_ind1+1)]-tau_k-3.*w0*scale_l))
ind0=max(time_ind0-1,0) # take the index minus 1
ind1=min(time_ind1+1,N-1)+1 # take the index plus 1
mydt=dt[ind0:ind1-1]
mytime=timemid[ind0:ind1-1]-tau_k
hvec=np.exp(-dcon/scale_l**2*mytime**2)
dtmp1=sum(hvec*mydt)/sum(hvec)
mytime=time[ind0:ind1]-tau_k
mydt=dt_centr[ind0:ind1]
gvec=np.exp(-dcon/scale_l**2*mytime**2)
dtmp2=sum(gvec*mydt)/sum(gvec)
dtmp=max(dtmp1,dtmp2)
if scale_l<=dtmp/np.pi*(1.+eps):
scalelim1[k]=scale[l]
scalelim1_ind[k]=l+1
break
weight_cwt[k,l]=np.sqrt(2.*sum(gvec**2)/sum(gvec)**2)
elif shannonnyquistexclusionzone is False and weighted_CWT is True:
for k in trange(Q):
tau_k=tau[k]
time_ind0=0
time_ind1=N-1
Ipass=0
for l in range(J-1,-1,-1):
scale_l=scale[l]
time_ind0=np.argmin(np.absolute(time[time_ind0:]-tau_k+3.*w0*scale_l))+time_ind0
time_ind1=np.argmin(np.absolute(time[:(time_ind1+1)]-tau_k-3.*w0*scale_l))
ind0=max(time_ind0-1,0) # take the index minus 1
ind1=min(time_ind1+1,N-1)+1 # take the index plus 1
mydt=dt[ind0:ind1-1]
mytime=timemid[ind0:ind1-1]-tau_k
hvec=np.exp(-dcon/scale_l**2*mytime**2)
dtmp1=sum(hvec*mydt)/sum(hvec)
mytime=time[ind0:ind1]-tau_k
mydt=dt_centr[ind0:ind1]
gvec=np.exp(-dcon/scale_l**2*mytime**2)
weight_cwt[k,l]=np.sqrt(2.*sum(gvec**2)/sum(gvec)**2)
dtmp2=sum(gvec*mydt)/sum(gvec)
dtmp=max(dtmp1,dtmp2)
if scale_l<=dtmp/np.pi*(1.+eps) and Ipass==0:
scalelim1[k]=scale[l]
scalelim1_ind_ghost[k]=l+1
Ipass=1
elif shannonnyquistexclusionzone is True and weighted_CWT is False:
for k in trange(Q):
tau_k=tau[k]
time_ind0=0
time_ind1=N-1
for l in range(J-1,-1,-1):
scale_l=scale[l]
time_ind0=np.argmin(np.absolute(time[time_ind0:]-tau_k+3.*w0*scale_l))+time_ind0
time_ind1=np.argmin(np.absolute(time[:(time_ind1+1)]-tau_k-3.*w0*scale_l))
ind0=max(time_ind0-1,0) # take the index minus 1
ind1=min(time_ind1+1,N-1)+1 # take the index plus 1
mydt=dt[ind0:ind1-1]
mytime=timemid[ind0:ind1-1]-tau_k
hvec=np.exp(-dcon/scale_l**2*mytime**2)
dtmp1=sum(hvec*mydt)/sum(hvec)
mytime=time[ind0:ind1]-tau_k
mydt=dt_centr[ind0:ind1]
gvec=np.exp(-dcon/scale_l**2*mytime**2)
dtmp2=sum(gvec*mydt)/sum(gvec)
dtmp=max(dtmp1,dtmp2)
if scale_l<=dtmp/np.pi*(1.+eps):
scalelim1[k]=scale[l]
scalelim1_ind[k]=l+1
break
weight_cwt[k,l]=1.
elif shannonnyquistexclusionzone is False and weighted_CWT is False:
for k in trange(Q):
tau_k=tau[k]
time_ind0=0
time_ind1=N-1
Ipass=0
for l in range(J-1,-1,-1):
scale_l=scale[l]
time_ind0=np.argmin(np.absolute(time[time_ind0:]-tau_k+3.*w0*scale_l))+time_ind0
time_ind1=np.argmin(np.absolute(time[:(time_ind1+1)]-tau_k-3.*w0*scale_l))
ind0=max(time_ind0-1,0) # take the index minus 1
ind1=min(time_ind1+1,N-1)+1 # take the index plus 1
mydt=dt[ind0:ind1-1]
mytime=timemid[ind0:ind1-1]-tau_k
hvec=np.exp(-dcon/scale_l**2*mytime**2)
dtmp1=sum(hvec*mydt)/sum(hvec)
mytime=time[ind0:ind1]-tau_k
mydt=dt_centr[ind0:ind1]
gvec=np.exp(-dcon/scale_l**2*mytime**2)
dtmp2=sum(gvec*mydt)/sum(gvec)
dtmp=max(dtmp1,dtmp2)
weight_cwt[k,l]=1.
if scale_l<=dtmp/np.pi*(1.+eps) and Ipass==0:
scalelim1[k]=scale[l]
scalelim1_ind_ghost[k]=l+1
Ipass=1
# redefine the scale and other variables because new min scale
min_scalelim1_ind=min(scalelim1_ind)
scale=copy.copy(scale[min_scalelim1_ind:])
coi1=copy.copy(coi1[min_scalelim1_ind:])
coi2=copy.copy(coi2[min_scalelim1_ind:])
weight_cwt=copy.copy(weight_cwt[:,min_scalelim1_ind:])
scalelim1_ind[:]=scalelim1_ind[:]-min_scalelim1_ind
# Smoothing the CWT => parameters
#print "Parameters for smoothing the CWT:"
if shannonnyquistexclusionzone is True:
if smoothing_type=="fixed":
scalelim1_ind_max=np.amax(scalelim1_ind)-1
scalelim1_smooth=copy.copy(scalelim1)
scalelim1_ind_smooth=copy.copy(scalelim1_ind)
for k in range(Q):
tau_k=tau[k]
for l in range(scalelim1_ind_max,-1,-1):
scale_l=scale[l]
ind_left=np.argmin(np.absolute(tau-(tau_k-smoothing_coeff*w0*scale_l)))
ind_right=np.argmin(np.absolute(tau-(tau_k+smoothing_coeff*w0*scale_l)))
if np.sum(scalelim1[ind_left:(ind_right+1)]>=scale_l)>0:
scalelim1_smooth[k]=scale[l]
scalelim1_ind_smooth[k]=l+1
break
# redefine the scale and other variables because new min scale
min_scalelim1_ind_smooth=min(scalelim1_ind_smooth)
scale=copy.copy(scale[min_scalelim1_ind_smooth:])
weight_cwt=copy.copy(weight_cwt[:,min_scalelim1_ind_smooth:])
scalelim1_ind[:]=scalelim1_ind[:]-min_scalelim1_ind_smooth
scalelim1_ind_smooth[:]=scalelim1_ind_smooth[:]-min_scalelim1_ind_smooth
# Redefine the cone of influence
coi1_smooth=tau[0]+smoothing_coeff*w0*scale
coi2_smooth=tau[-1]-smoothing_coeff*w0*scale
coi1=time[0]+(smoothing_coeff+gauss_spread)*w0*scale
coi2=time[-1]-(smoothing_coeff+gauss_spread)*w0*scale
# Indices for cwt coi1_smooth and coi2_smooth
J=scale.size
coi_smooth_ind=np.zeros(Q,dtype=int)
for k in range(Q):
tau_k=tau[k]
for l in range(J-1,scalelim1_ind_smooth[k]-1,-1):
if tau_k>coi1_smooth[l] and tau_k<coi2_smooth[l]:
coi_smooth_ind[k]=l
break
elif smoothing_type=="variable":
scalelim1_smooth=copy.copy(scalelim1)
scalelim1_ind_smooth=copy.copy(scalelim1_ind)
coi1_smooth=time[0]*np.ones(coi1.size)
coi2_smooth=time[-1]*np.ones(coi2.size)
J=scale.size
coi_smooth_ind=np.ones(Q,dtype=int)*(J-1)
elif shannonnyquistexclusionzone is False:
if smoothing_type=="fixed":
scalelim1_ind_smooth=copy.copy(scalelim1_ind)
scalelim1_ind_max=np.amax(scalelim1_ind_ghost)-1
scalelim1_smooth=copy.copy(scalelim1)
for k in range(Q):
tau_k=tau[k]
for l in range(scalelim1_ind_max,-1,-1):
scale_l=scale[l]
ind_left=np.argmin(np.absolute(tau-(tau_k-smoothing_coeff*w0*scale_l)))
ind_right=np.argmin(np.absolute(tau-(tau_k+smoothing_coeff*w0*scale_l)))
if np.sum(scalelim1[ind_left:(ind_right+1)]>=scale_l)>0:
scalelim1_smooth[k]=scale[l]
break
# Redefine the cone of influence
coi1_smooth=tau[0]+smoothing_coeff*w0*scale
coi2_smooth=tau[-1]-smoothing_coeff*w0*scale
coi1=time[0]+(smoothing_coeff+gauss_spread)*w0*scale
coi2=time[-1]-(smoothing_coeff+gauss_spread)*w0*scale
# Indices for cwt coi1_smooth and coi2_smooth
J=scale.size
coi_smooth_ind=np.zeros(Q,dtype=int)
for k in range(Q):
tau_k=tau[k]
for l in range(J-1,scalelim1_ind_smooth[k]-1,-1):
if tau_k>coi1_smooth[l] and tau_k<coi2_smooth[l]:
coi_smooth_ind[k]=l
break
elif smoothing_type=="variable":
scalelim1_smooth=copy.copy(scalelim1)
scalelim1_ind_smooth=copy.copy(scalelim1_ind)
coi1_smooth=time[0]*np.ones(coi1.size)
coi2_smooth=time[-1]*np.ones(coi2.size)
J=scale.size
coi_smooth_ind=np.ones(Q,dtype=int)*(J-1)
# Computes the maximum length over which smoothing is to be performed - That naturally occurs at the highest scale
Qmax=0
for k in range(Q):
tau_k=tau[k]
# Indices for cwt smoothing
ind_left=np.argmin(np.absolute(tau-(tau_k-smoothing_coeff*w0*scale[-1])))
ind_right=np.argmin(np.absolute(tau-(tau_k+smoothing_coeff*w0*scale[-1])))
if smoothing_type=="fixed":
# average the cwt over range(ind_left,ind_right+1)
Qmax=np.maximum(Qmax,ind_right+1-ind_left)
if smoothing_type=="variable":
Qmax=np.maximum(Qmax,np.sum(scalelim1[ind_left:(ind_right+1)]<scale_l))
# number of tau[k]'s on which the CWT is to be computed (for each scale), i.e. outside the Shannon-Nyquist exclusion zone.
n_outside_scalelim1=np.ones(J,dtype=int)*Q
J=scale.size
for l in range(J):
count=0
for k in range(Q):
if l>=scalelim1_ind[k]:
count+=1
n_outside_scalelim1[l]=count
return scale,coi1,coi2,coi1_smooth,coi2_smooth,coi_smooth_ind,weight_cwt,scalelim1_ind,scalelim1_smooth,scalelim1_ind_smooth,Qmax,n_outside_scalelim1
| 47.957143 | 443 | 0.725573 |
fd8f527b50d298fdffebd4917eb84703639dddbb | 3,206 | py | Python | nikola/plugins/task/tagcloud.py | vault-the/nikola | c7a556b587004df442fdc9127c07997715556265 | [
"MIT"
] | 1 | 2021-01-03T01:54:37.000Z | 2021-01-03T01:54:37.000Z | nikola/plugins/task/tagcloud.py | vault-the/nikola | c7a556b587004df442fdc9127c07997715556265 | [
"MIT"
] | 13 | 2021-01-21T04:54:51.000Z | 2022-03-21T04:15:56.000Z | nikola/plugins/task/tagcloud.py | vault-the/nikola | c7a556b587004df442fdc9127c07997715556265 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2012-2018 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Render the tag cloud."""
import json
import os
from nikola.plugin_categories import Task
from nikola import utils
class RenderTagCloud(Task):
"""Classify the posts by tags."""
name = "render_tag_cloud"
def gen_tasks(self):
"""Render the tag cloud."""
self.site.scan_posts()
yield self.group_task()
# Tag cloud json file
tag_cloud_data = {}
for tag, posts in self.site.posts_per_tag.items():
if tag in self.site.config['HIDDEN_TAGS']:
continue
tag_posts = dict(posts=[{'title': post.meta[post.default_lang]['title'],
'date': post.date.strftime('%m/%d/%Y'),
'isodate': post.date.isoformat(),
'url': post.permalink(post.default_lang)}
for post in reversed(sorted(self.site.timeline, key=lambda post: post.date))
if tag in post.alltags])
tag_cloud_data[tag] = [len(posts), self.site.link(
'tag', tag, self.site.config['DEFAULT_LANG']), tag_posts]
output_name = os.path.join(self.site.config['OUTPUT_FOLDER'],
'assets', 'js', 'tag_cloud_data.json')
def write_tag_data(data):
"""Write tag data into JSON file, for use in tag clouds."""
utils.makedirs(os.path.dirname(output_name))
with open(output_name, 'w+') as fd:
json.dump(data, fd, sort_keys=True)
if self.site.config['WRITE_TAG_CLOUD']:
task = {
'basename': str(self.name),
'name': str(output_name)
}
task['uptodate'] = [utils.config_changed(tag_cloud_data, 'nikola.plugins.task.tags:tagdata')]
task['targets'] = [output_name]
task['actions'] = [(write_tag_data, [tag_cloud_data])]
task['clean'] = True
yield utils.apply_filters(task, self.site.config['FILTERS'])
| 40.582278 | 112 | 0.620711 |
c762fdb9bc53742643ba4cd3dc217fe009af4d8c | 4,390 | py | Python | seapy/couplings/couplingsurfaceplateacoustical.py | FRidh/seapy | de63acdb3722c2558fc1ad1e1eca92abdd027932 | [
"BSD-3-Clause"
] | 8 | 2015-07-02T13:34:06.000Z | 2021-05-17T21:34:07.000Z | seapy/couplings/couplingsurfaceplateacoustical.py | FRidh/seapy | de63acdb3722c2558fc1ad1e1eca92abdd027932 | [
"BSD-3-Clause"
] | 2 | 2015-11-09T17:16:07.000Z | 2020-02-19T14:00:20.000Z | seapy/couplings/couplingsurfaceplateacoustical.py | FRidh/seapy | de63acdb3722c2558fc1ad1e1eca92abdd027932 | [
"BSD-3-Clause"
] | 2 | 2021-02-03T08:56:10.000Z | 2022-01-22T02:21:43.000Z | import numpy as np
from .coupling import Coupling
def radiation_efficiency(coupling, component):
"""
Radiation efficiency of a plate to a cavity and vice versa.
Where:
* area of plate :math:`S = L_x L_y`
* circumference of plate :math:`U = 2 (L_x + L_y)`
* :math:`\\alpha = \\sqrt{\\frac{f}{f_c}}`
When :math:`f < 0.5 f_c`:
.. math:: g_1 = \\frac{4}{\\pi^4} (1 - 2 \\alpha^2) (1 - \\alpha^2)^(-0.5)
When :math:`f > 0.5 f_c`:
.. math:: g_1 = 0
.. math::
See TA2 Radiation plateapp_ta2
"""
# component = self.subsystem_from.component
f = np.array(component.frequency.center, dtype=complex)
"""Cast to complex numbers to prevent errors with sqrt further down."""
fc = coupling.critical_frequency
lc = coupling.critical_wavelength
Lx = component.length
Ly = component.width
S = component.area
U = 2.0 * (Lx + Ly)
fc_band = (fc > coupling.frequency.lower) * (fc < coupling.frequency.upper)
f_lower = fc > coupling.frequency.upper
f_upper = fc < coupling.frequency.lower
alpha = np.sqrt(f / fc)
g1 = (
4.0 / np.pi ** 4.0 * (1.0 - 2.0 * alpha ** 2.0) / np.sqrt(1.0 - alpha ** 2.0)
) * (f < 0.5 * fc)
g2 = (
1.0
/ (4.0 * np.pi ** 4.0)
* ((1.0 - alpha ** 2) * np.log((1.0 + alpha) / (1.0 - alpha)) + 2.0 * alpha)
/ (1.0 - alpha ** 2.0) ** 1.5
)
sigma1 = lc ** 2.0 / S * (2.0 * g1 + U / lc * g2)
sigma2 = np.sqrt(Lx / lc) + np.sqrt(Ly / lc)
sigma3 = (1.0 - fc / f) ** (-0.5)
sigma1 = np.nan_to_num(sigma1)
sigma2 = np.nan_to_num(sigma2)
sigma3 = np.nan_to_num(sigma3)
"""Replace NaN with zeros"""
sigma = sigma1 * f_lower + sigma2 * fc_band + sigma3 * f_upper * (sigma3 < sigma2)
sigma = np.real(np.nan_to_num(sigma))
return sigma
def critical_frequency(subsystem_plate, subsystem_cavity):
"""
Critical frequency.
.. math:: f_c = \\frac{ c_0^2 \\sqrt{3} } {\\ pi c_L h}
.. math:: f_c = \\frac{f c_0^2}{c_B^2}
See Craik, table 3.3, page 51.
"""
return (
subsystem_plate.frequency.center
* (
subsystem_cavity.soundspeed_group
/ subsystem_plate.component.subsystem_bend.soundspeed_phase
)
** 2.0
)
# return subsystem_cavity.soundspeed_group**2.0 / (1.81818181 * subsystem_plate.component.subsystem_long.soundspeed_group * subsystem_plate.component.height)
class CouplingSurfacePlateAcoustical(Coupling):
"""
A model describing the coupling between a plate and a cavity.
"""
@property
def impedance_from(self):
return self.subsystem_from.impedance
@property
def impedance_to(self):
return self.subsystem_to.impedance
@property
def critical_frequency(self):
"""
Critical frequency.
.. math:: f_c = \\frac{ c_0^2 } {1.8 c_L t}
See BAC, 3.2.2 script.
"""
return critical_frequency(self.subsystem_from, self.subsystem_to)
@property
def critical_wavelength(self):
"""
Wavelength belonging to critical frequency.
.. math:: \\lambda_c = c_{g} / f_c
"""
try:
return self.subsystem_to.soundspeed_group / self.critical_frequency
except FloatingPointError:
return np.zeros(len(self.frequency))
@property
def radiation_efficiency(self):
"""
Radiation efficiency of a plate for bending waves.
"""
return radiation_efficiency(self, self.subsystem_from.component)
@property
def clf(self):
"""
Coupling loss factor for plate to cavity radiation.
.. math:: \\eta_{plate, cavity} = \\frac{\\rho_0 c_0 \\sigma}{\\omega m^{''}}
.. attention::
Which speed of sound???
See BAC, equation 3.6
"""
try:
return (
self.subsystem_from.component.material.density
* self.subsystem_to.soundspeed_group
* self.radiation_efficiency
/ (self.frequency.angular * self.subsystem_from.component.mass_per_area)
)
except (ZeroDivisionError, FloatingPointError):
return np.zeros(len(self.frequency))
| 27.78481 | 161 | 0.569248 |
8a087ddd3b369759e6f991f6d256278369f48fa7 | 1,970 | py | Python | features/steps/picture.py | just4jc/python-pptx | ec433085d84d48b5539c379e52eb3c279ab2cbc0 | [
"MIT"
] | 1 | 2019-01-24T11:50:05.000Z | 2019-01-24T11:50:05.000Z | features/steps/picture.py | just4jc/python-pptx | ec433085d84d48b5539c379e52eb3c279ab2cbc0 | [
"MIT"
] | 4 | 2021-03-18T20:28:17.000Z | 2022-03-11T23:18:51.000Z | features/steps/picture.py | just4jc/python-pptx | ec433085d84d48b5539c379e52eb3c279ab2cbc0 | [
"MIT"
] | 2 | 2016-02-11T20:12:33.000Z | 2016-02-11T20:50:03.000Z | # encoding: utf-8
"""
Gherkin step implementations for picture-related features.
"""
from __future__ import absolute_import
from behave import given, when, then
from pptx import Presentation
from pptx.compat import BytesIO
from pptx.package import Package
from pptx.util import Inches
from helpers import saved_pptx_path, test_image, test_pptx
# given ===================================================
@given('a picture of known position and size')
def given_a_picture_of_known_position_and_size(context):
prs = Presentation(test_pptx('shp-pos-and-size'))
context.picture = prs.slides[1].shapes[0]
# when ====================================================
@when('I add the image {filename} using shapes.add_picture()')
def when_I_add_the_image_filename_using_shapes_add_picture(context, filename):
shapes = context.slide.shapes
shapes.add_picture(test_image(filename), Inches(1.25), Inches(1.25))
@when('I add the stream image {filename} using shapes.add_picture()')
def when_I_add_the_stream_image_filename_using_add_picture(context, filename):
shapes = context.slide.shapes
with open(test_image(filename), 'rb') as f:
stream = BytesIO(f.read())
shapes.add_picture(stream, Inches(1.25), Inches(1.25))
# then ====================================================
@then('a {ext} image part appears in the pptx file')
def step_then_a_ext_image_part_appears_in_the_pptx_file(context, ext):
pkg = Package().open(saved_pptx_path)
partnames = [part.partname for part in pkg.parts]
image_partname = '/ppt/media/image1.%s' % ext
assert image_partname in partnames, (
'got %s' % [p for p in partnames if 'image' in p]
)
@then('the picture appears in the slide')
def step_then_picture_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
slide = prs.slides[0]
shapes = slide.shapes
cls_names = [sp.__class__.__name__ for sp in shapes]
assert 'Picture' in cls_names
| 31.774194 | 78 | 0.683756 |
f6298b954d59ba2b3b924307468efe27e4705e59 | 1,661 | py | Python | comparison/compare_sORF.py | Sung-Huan/ANNOgesic | af3de26f6c5ff9d2218f18a84bbc863a1bb95550 | [
"0BSD"
] | 26 | 2016-02-25T19:27:55.000Z | 2022-01-22T09:54:59.000Z | comparison/compare_sORF.py | Sung-Huan/ANNOgesic | af3de26f6c5ff9d2218f18a84bbc863a1bb95550 | [
"0BSD"
] | 28 | 2018-11-22T19:51:06.000Z | 2022-03-20T23:02:13.000Z | comparison/compare_sORF.py | Sung-Huan/ANNOgesic | af3de26f6c5ff9d2218f18a84bbc863a1bb95550 | [
"0BSD"
] | 18 | 2016-06-01T11:53:45.000Z | 2021-12-27T03:41:03.000Z | #!/usr/bin/python
import os
import sys
import csv
import argparse
from gff3 import Gff3Parser
__author__ = "Sung-Huan Yu <sung-huan.yu@uni-wuerzburg.de>"
__email__ = "sung-huan.yu@uni-wuerzburg.de"
parser = argparse.ArgumentParser()
parser.add_argument("-k","--benchmark_file",help="the benchmarking set of sORF")
parser.add_argument("-p","--predict_file",help="ANNOgesic predicted sORF file")
args = parser.parse_args()
def main():
sorfs = []
pres = []
num_ref = 0
detect = 0
for sorf in Gff3Parser().entries(open(args.benchmark_file)):
num_ref += 1
sorfs.append(sorf)
for pre in Gff3Parser().entries(open(args.predict_file)):
pres.append(pre)
for sorf in sorfs:
for pre in pres:
if pre.strand == sorf.strand:
if ((pre.start >= sorf.start) and (
pre.end <= sorf.end)) or (
(pre.start <= sorf.start) and (
pre.end >= sorf.end)) or (
(pre.start >= sorf.start) and (
pre.start <= sorf.end) and (
pre.end >= sorf.end)) or (
(pre.start <= sorf.start) and (
pre.end >= sorf.start) and (
pre.end <= sorf.end)):
detect += 1
sorf.attributes["detect"] = True
break
print("the number of known sORFs which can be detected by ANNOgesic:" + str(detect))
print("the total number of known sORFs:" + str(num_ref))
print("the detection rate:"+ str(float(detect) / float(num_ref)))
if __name__ == "__main__":
main()
| 33.22 | 88 | 0.551475 |
60c1a18a03c2400c7a2fb81e76cd31900e92a194 | 4,412 | py | Python | mh4.py | hanjae1122/Boggle | 0921ac2ab916652fa176037c96c981625211e038 | [
"MIT"
] | null | null | null | mh4.py | hanjae1122/Boggle | 0921ac2ab916652fa176037c96c981625211e038 | [
"MIT"
] | null | null | null | mh4.py | hanjae1122/Boggle | 0921ac2ab916652fa176037c96c981625211e038 | [
"MIT"
] | null | null | null | import sys
import re
import random
import numpy as np
import time
DIM = 4
DICE = 'rifobx ifehey denows utoknd hmsrao lupets acitoa ylgkue qbmjoa \
ehispn vetign baliyt ezavnd ralesc uwilrg pacemd'.split()
ALL_WORDS = []
DATAPATH = 'data/words_alpha.txt'
# DATAPATH = 'data/new_dictionary.txt'
try:
TRIALS = int(sys.argv[1])
except (IndexError, ValueError):
print("\nSetting number of trials to default of 1000")
TRIALS = 1000
def score(roll):
def solve():
for y, row in enumerate(s):
for x, letter in enumerate(row):
for result in extending(letter, ((x, y),)):
yield result
def extending(prefix, path):
if prefix in words:
yield (prefix, path)
for (nx, ny) in neighbors(*path[-1]):
if (nx, ny) not in path:
prefix1 = prefix + s[ny][nx]
if prefix1 in prefixes:
for result in extending(prefix1, path + ((nx, ny),)):
yield result
def neighbors(x, y):
for nx in range(max(0, x-1), min(x+2, DIM)):
for ny in range(max(0, y-1), min(y+2, DIM)):
yield (nx, ny)
s = [roll[i:i+DIM] for i in range(0, DIM**2, DIM)]
alphabet = ''.join(set(roll))
valid_word = re.compile('[' + alphabet + ']{3,}$', re.I).match
words = []
for word in ALL_WORDS:
if valid_word(word):
words.append(word)
prefixes = set(w[:i] for w in words for i in range(2, len(w)+1))
ans = []
for result in solve():
ans.append(result[0])
ans = set(ans)
return len(ans), ans
def swap(dice, roll, num):
new_roll = roll.copy()
new_dice = dice.copy()
swaps = random.sample(range(DIM**2), num)
for i in range(num-1):
new_dice[swaps[i]] = dice[swaps[i+1]]
new_dice[swaps[-1]] = dice[swaps[0]]
for i in swaps:
new_roll[i] = random.sample(new_dice[i], 1)[0]
return new_dice, new_roll
def sample_rolls(k):
samples = {}
for i in range(k):
roll = []
dice = random.sample(DICE, DIM ** 2)
for d in dice:
roll.append(random.sample(d, 1)[0])
samples[''.join(roll)] = score(roll)
return samples
def print_roll(r):
for row in [r[i:i+DIM] for i in range(0, DIM**2, DIM)]:
print(row)
def main():
print('\nRunning algorithm for {0} steps'.format(TRIALS))
print('\nReading in dictionary from {0}...'.format(DATAPATH))
with open(DATAPATH) as f:
for word in f:
w = word.rstrip()
length = len(w)
if length >= 3 and length <= DIM**2:
# check if all q's are qu's
if 'q' in w:
is_q = False
# if q appears as last letter, ignore
for m in re.finditer('q', w):
i = m.start()
if w[i:(i+2)] != 'qu':
is_q = True
break
if is_q:
continue
else:
w = re.sub('qu', 'q', w)
ALL_WORDS.append(w)
print('Finished reading {0} words'.format(len(ALL_WORDS)))
scores = {}
max_words = []
dice = random.sample(DICE, DIM ** 2)
roll = [random.sample(d, 1)[0] for d in dice]
scores[''.join(roll)] = old = score(roll)
start = time.time()
msg_time = time.time()
for i in range(TRIALS):
if i == 0 or time.time() - msg_time >= 5:
print('\nTrial {0}, Time elapsed: {1:.2f}'
.format(i, time.time() - start))
print('Number of words found: {0:g}'.format(old[0]))
print_roll(roll)
msg_time = time.time()
new_dice, new_roll = swap(dice, roll, 3)
roll_key = ''.join(new_roll)
if roll_key in scores:
new = scores[roll_key]
else:
scores[roll_key] = new = score(new_roll)
if random.uniform(0, 1) < (np.exp(new[0] - old[0])):
max_words.append(new[0])
dice = new_dice
roll = new_roll
old = new
roll_key = ''.join(roll)
ans = scores[roll_key]
print('\nFinal board arrangement has {0} words:'
.format(ans[0]))
print_roll(roll)
print(ans[1])
if __name__ == '__main__':
main()
| 28.101911 | 73 | 0.513826 |
664dc6d0aff4ffd0c6626424ce015c7f14dbc7b2 | 21,642 | py | Python | test/functional/test_framework/util.py | badrkarni/saudicoin | 18c221ef1eb576d52bec53dcd449d2c6766684a2 | [
"MIT"
] | 17 | 2018-01-17T01:39:21.000Z | 2021-12-23T10:23:38.000Z | test/functional/test_framework/util.py | badrkarni/saudicoin | 18c221ef1eb576d52bec53dcd449d2c6766684a2 | [
"MIT"
] | 4 | 2015-09-22T03:51:39.000Z | 2017-12-28T21:03:49.000Z | test/functional/test_framework/util.py | badrkarni/saudicoin | 18c221ef1eb576d52bec53dcd449d2c6766684a2 | [
"MIT"
] | 5 | 2016-06-25T10:13:37.000Z | 2021-02-19T15:37:06.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node" + str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node" + str(n_node), "regtest", logname)
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
from_connection.disconnectnode(nodeid=peer_id)
for _ in range(50):
if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []:
break
time.sleep(0.1)
else:
raise AssertionError("timed out waiting for disconnect")
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
| 38.035149 | 119 | 0.650818 |
6b285890452b336c5e167cef2dd4926d1425af55 | 1,450 | py | Python | vsutillib/vsutillib-media/setup.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | vsutillib/vsutillib-media/setup.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | vsutillib/vsutillib-media/setup.py | akai10tsuki/vsutillib | 6d623171cc2a5c66a94fb508bfc312abeab49ff2 | [
"MIT"
] | null | null | null | """
setup for vsutillib-media
"""
import io
import os
import shutil
import sys
from pathlib import Path
from setuptools import setup
from vsutillib import config
sys.path.insert(0, os.path.abspath("../.."))
ROOT = os.path.abspath(os.path.dirname(__file__))
PACKAGE = "media"
def removeTmpDirs():
"""
delete build directory setup was including files from other builds
"""
p = Path(".")
eggDirs = [x for x in p.glob("*.egg-info") if x.is_dir()]
eggDirs.append(Path("build"))
for d in eggDirs:
if d.is_dir():
shutil.rmtree(d)
def readme():
"""get README.rst"""
try:
with io.open(os.path.join(ROOT, "README.rst"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = "vsutillib." + PACKAGE + " sub package part of vsutillib"
return long_description
setup(
name=config.NAME + "-" + PACKAGE,
version=config.MEDIA_VERSION,
description="vsutillib." + PACKAGE + " sub package part of vsutillib",
long_description=readme(),
author=config.AUTHOR,
author_email=config.EMAIL,
license="MIT",
packages=["vsutillib." + PACKAGE, "vsutillib." + PACKAGE + ".classes"],
install_requires=["pymediainfo" + config.PYMEDIAINFO_VERSION],
zip_safe=False,
url="https://pypi.org/project/vsutillib-" + PACKAGE + "/",
python_requires=config.PYTHON_VERSION,
)
removeTmpDirs()
| 23.770492 | 84 | 0.653793 |
02c7e260c64622b87b0cfc88669da2e31bce7fe1 | 5,066 | py | Python | tests/toranj/test-009-insecure-traffic-join.py | JeffreyHayes/openthread | 0dde90edcb0aafef5f4b5fc3d30e19f756e27ee4 | [
"BSD-3-Clause"
] | 2 | 2018-08-24T05:14:27.000Z | 2018-09-25T03:02:36.000Z | tests/toranj/test-009-insecure-traffic-join.py | JeffreyHayes/openthread | 0dde90edcb0aafef5f4b5fc3d30e19f756e27ee4 | [
"BSD-3-Clause"
] | 4 | 2016-09-09T17:10:04.000Z | 2016-09-29T05:18:09.000Z | tests/toranj/test-009-insecure-traffic-join.py | turon/openthread | 20145cb42fca90d791c4918475db28b7b91290d6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Check insecure data transmission during joining.
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
node1 = wpan.Node()
node2 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
node1.form("insec-join-test")
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
insecure_port = 1234
NUM_MSGS = 4
# Make node1 joinable and set the insecure port
node1.permit_join(duration_sec='100', port=str(insecure_port))
verify(node1.get(wpan.WPAN_NETWORK_ALLOW_JOIN) == 'true')
# Join node1 network from node2 without setting the key
node2.join_node(node1, should_set_key=False)
verify(node2.get(wpan.WPAN_STATE) == wpan.STATE_CREDENTIALS_NEEDED)
verify(node2.get(wpan.WPAN_NAME) == node1.get(wpan.WPAN_NAME))
verify(node2.get(wpan.WPAN_PANID) == node1.get(wpan.WPAN_PANID))
verify(node2.get(wpan.WPAN_XPANID) == node1.get(wpan.WPAN_XPANID))
ll1 = node1.get(wpan.WPAN_IP6_LINK_LOCAL_ADDRESS)[1:-1]
ll2 = node2.get(wpan.WPAN_IP6_LINK_LOCAL_ADDRESS)[1:-1]
# Send insecure traffic from node2 to node1 using link-local IP address
# for src/dst and insecure port number
sender = node2.prepare_tx(ll2, (ll1, insecure_port), "Hi (insecure)", NUM_MSGS)
recver = node1.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful)
verify(recver.was_successful)
# Get the random src port number used by node1 and ensure node2 allows
# insecure rx traffic on that port
rx_port = recver.all_rx_msg[0][1][1]
node2.permit_join(duration_sec='100', port=str(rx_port))
# Send insecure reply from node1 to node2
sender2 = node1.prepare_tx(
(ll1, insecure_port), (ll2, rx_port), "Hi back! (insecure)", NUM_MSGS
)
recver2 = node2.prepare_rx(sender2)
wpan.Node.perform_async_tx_rx()
verify(sender2.was_successful)
verify(recver2.was_successful)
# Now node2 fully joins the network (set the network key), check all
# secure traffic exchange between the nodes
node2.set(wpan.WPAN_KEY, node1.get(wpan.WPAN_KEY)[1:-1], binary_data=True)
verify(node2.is_associated())
node1.permit_join('0')
sender = node2.prepare_tx(
ll2, (ll1, insecure_port), "Hi (now secure)", NUM_MSGS
)
recver = node1.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful)
verify(recver.was_successful)
node2.permit_join('0')
sender2 = node1.prepare_tx(
(ll1, insecure_port), (ll2, rx_port), "Hi back! (secure now)", NUM_MSGS
)
recver2 = node2.prepare_rx(sender2)
wpan.Node.perform_async_tx_rx()
verify(sender2.was_successful)
verify(recver2.was_successful)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| 38.090226 | 121 | 0.656139 |
2590a825306e38455546da346a2aa06f48352c75 | 585 | py | Python | 13. group_by/group_by.py | jeury301/python-morsels | fdbe0b1c80120e2d1388808816538fea5dab8892 | [
"MIT"
] | 2 | 2018-08-21T10:29:57.000Z | 2019-04-17T07:05:17.000Z | 13. group_by/group_by.py | jeury301/python-morsels | fdbe0b1c80120e2d1388808816538fea5dab8892 | [
"MIT"
] | null | null | null | 13. group_by/group_by.py | jeury301/python-morsels | fdbe0b1c80120e2d1388808816538fea5dab8892 | [
"MIT"
] | null | null | null | def default(n):
return n
def group_by(numbers, key_func=default):
"""Return a dict-like object containing items in the list grouped by key.
Args:
numbers (iterator): items to group by key
key_func (function): function to group items by
Returns:
Dictionary of numbers grouped by key_func
"""
groups = {}
for number in numbers:
groups.setdefault(key_func(number), [])
groups[key_func(number)].append(number)
return groups
if __name__ == "__main__":
grouped = group_by([1, 2, 1, 3, 2, 1])
print(grouped)
| 25.434783 | 77 | 0.639316 |
e0d90dea5d8b95c3b844f22be12409961876d024 | 293 | py | Python | recaptcha-python-2/mysite/core/models.py | sibtc/simple-recaptcha | 224ef3994a398b04f9405bcdaeef339af31c7305 | [
"MIT"
] | 16 | 2017-02-21T13:57:26.000Z | 2021-03-17T15:25:16.000Z | recaptcha-python-3/mysite/core/models.py | Cleyson/simple-recaptcha | 224ef3994a398b04f9405bcdaeef339af31c7305 | [
"MIT"
] | null | null | null | recaptcha-python-3/mysite/core/models.py | Cleyson/simple-recaptcha | 224ef3994a398b04f9405bcdaeef339af31c7305 | [
"MIT"
] | 8 | 2018-03-21T04:11:52.000Z | 2021-08-23T17:04:16.000Z | from __future__ import unicode_literals
from django.db import models
class Comment(models.Model):
text = models.TextField(max_length=1000)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'comment'
verbose_name_plural = 'comments' | 24.416667 | 56 | 0.737201 |
3febdbed2cceee5ffe77784387590de0447f0839 | 1,475 | py | Python | src/probnum/diffeq/perturbed/step/_perturbedstepsolution.py | treid5/probnum | fabb51243d0952fbd35e542aeb5c2dc9a449ec81 | [
"MIT"
] | 1 | 2021-04-16T14:45:26.000Z | 2021-04-16T14:45:26.000Z | src/probnum/diffeq/perturbed/step/_perturbedstepsolution.py | simeoncarstens/probnum | b69587b07e2fffbdcd4c850acc98bb3de97a6e0b | [
"MIT"
] | 42 | 2021-03-08T07:20:40.000Z | 2022-03-28T05:04:48.000Z | src/probnum/diffeq/perturbed/step/_perturbedstepsolution.py | JonathanWenger/probnum | 1c5499883672cfa029c12045848ea04491c69e08 | [
"MIT"
] | null | null | null | """Output of PerturbedStepSolver."""
from typing import List, Optional
import numpy as np
from scipy.integrate._ivp import rk
from probnum import randvars
from probnum.diffeq import _odesolution
from probnum.typing import FloatArgType
class PerturbedStepSolution(_odesolution.ODESolution):
"""Probabilistic ODE solution corresponding to the :class:`PerturbedStepSolver`."""
def __init__(
self,
scales: List[float],
locations: np.ndarray,
states: randvars._RandomVariableList,
interpolants: List[rk.DenseOutput],
):
self.scales = scales
self.interpolants = interpolants
super().__init__(locations, states)
def interpolate(
self,
t: FloatArgType,
previous_index: Optional[FloatArgType] = None,
next_index: Optional[FloatArgType] = None,
):
# For the first state, no interpolation has to be performed.
if t == self.locations[0]:
return self.states[0]
if t == self.locations[-1]:
return self.states[-1]
else:
interpolant = self.interpolants[previous_index]
relative_time = (t - self.locations[previous_index]) * self.scales[
previous_index
]
previous_time = self.locations[previous_index]
evaluation = interpolant(previous_time + relative_time)
res_as_rv = randvars.Constant(evaluation)
return res_as_rv
| 31.382979 | 87 | 0.648814 |
94c70ca440553f23d2b57a652e2a3bae54cb0a39 | 3,105 | py | Python | sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/models/sb_namespace.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/models/sb_namespace.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/models/sb_namespace.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class SBNamespace(TrackedResource):
"""Description of a namespace resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. The Geo-location where the resource lives
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param sku: Properties of Sku
:type sku: ~azure.mgmt.servicebus.models.SBSku
:ivar provisioning_state: Provisioning state of the namespace.
:vartype provisioning_state: str
:ivar created_at: The time the namespace was created.
:vartype created_at: datetime
:ivar updated_at: The time the namespace was updated.
:vartype updated_at: datetime
:ivar service_bus_endpoint: Endpoint you can use to perform Service Bus
operations.
:vartype service_bus_endpoint: str
:ivar metric_id: Identifier for Azure Insights metrics
:vartype metric_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'created_at': {'readonly': True},
'updated_at': {'readonly': True},
'service_bus_endpoint': {'readonly': True},
'metric_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SBSku'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
'service_bus_endpoint': {'key': 'properties.serviceBusEndpoint', 'type': 'str'},
'metric_id': {'key': 'properties.metricId', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SBNamespace, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.provisioning_state = None
self.created_at = None
self.updated_at = None
self.service_bus_endpoint = None
self.metric_id = None
| 37.865854 | 88 | 0.603865 |
712331465a4a56407fea5b488c35beb2415ee02f | 24,197 | py | Python | horizon/horizon/base.py | citrix-openstack/horizon | 7987e68f135895728f891c2377b589f701d8106e | [
"Apache-2.0"
] | 2 | 2015-05-18T13:50:21.000Z | 2015-05-18T14:47:47.000Z | horizon/horizon/base.py | andrewsmedina/horizon | 6892653c0573a6a55f359cce6c1796053ef65cbf | [
"Apache-2.0"
] | null | null | null | horizon/horizon/base.py | andrewsmedina/horizon | 6892653c0573a6a55f359cce6c1796053ef65cbf | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import copy
import inspect
import logging
from django.conf import settings
from django.conf.urls.defaults import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.functional import SimpleLazyObject
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils.translation import ugettext as _
from horizon.decorators import require_roles, _current_component
LOG = logging.getLogger(__name__)
# Default configuration dictionary. Do not mutate directly. Use copy.copy().
HORIZON_CONFIG = {
# Allow for ordering dashboards; list or tuple if provided.
'dashboards': None,
# Name of a default dashboard; defaults to first alphabetically if None
'default_dashboard': None,
'user_home': None,
}
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
class NotRegistered(Exception):
pass
class HorizonComponent(object):
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __unicode__(self):
return getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
cls = self._registry.get(cls, None)
if cls:
return cls
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.name})
else:
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.'
% {"type": class_name, "slug": cls})
class Panel(HorizonComponent):
""" A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing role-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute: roles
A list of role names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any roles required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.__unicode__()
def get_absolute_url(self):
""" Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
roles = getattr(self, 'roles', [])
_decorate_urlconf(urlpatterns, require_roles, roles)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
class Dashboard(Registry, HorizonComponent):
""" A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing role-based
access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a dictionary of tuples which define groups of panels
as in the following example::
class Syspanel(horizon.Dashboard):
panels = {'System Panel': ('overview', 'instances', ...)}
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute: roles
A list of role names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any roles required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
Optional boolean to control whether or not this dashboard should
appear in automatically-generated navigation. Default: ``True``.
.. attribute:: supports_tenants
Optional boolean that indicates whether or not this dashboard includes
support for projects/tenants. If set to ``True`` this dashboard's
naviagtion will include a UI element that allows the user to select
project/tenant. Default: ``False``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
supports_tenants = False
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.__unicode__()
def get_panel(self, panel):
"""
Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""
Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order.
"""
registered = copy.copy(self._registry)
if isinstance(self.panels, dict):
panels = {}
for heading, items in self.panels.iteritems():
panels.setdefault(heading, [])
for item in items:
panel = self._registered(item)
panels[heading].append(panel)
registered.pop(panel.__class__)
if len(registered):
panels.setdefault(_("Other"), []).extend(registered.values())
else:
panels = []
for item in self.panels:
panel = self._registered(item)
panels.append(panel)
registered.pop(panel.__class__)
panels.extend(registered.values())
return panels
def get_absolute_url(self):
""" Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
urlpatterns += patterns('',
url(r'^%s/' % panel.slug, include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'', include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, login_required)
# Apply access controls to all views in the patterns
roles = getattr(self, 'roles', [])
_decorate_urlconf(urlpatterns, require_roles, roles)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
""" Discovers panels to register from the current dashboard module. """
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
panels = []
if isinstance(self.panels, dict):
[panels.extend(values) for values in self.panels.values()]
else:
panels = self.panels
for panel in panels:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
@classmethod
def register(cls, panel):
""" Registers a :class:`~horizon.Panel` with this dashboard. """
from horizon import Horizon
return Horizon.register_panel(cls, panel)
@classmethod
def unregister(cls, panel):
""" Unregisters a :class:`~horizon.Panel` from this dashboard. """
from horizon import Horizon
return Horizon.unregister_panel(cls, panel)
class Workflow(object):
def __init__(*args, **kwargs):
raise NotImplementedError()
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is None:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is None:
self._setup()
return reversed(self._wrapped)
class Site(Registry, HorizonComponent):
""" The core OpenStack Dashboard class. """
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.__unicode__()
@property
def _conf(self):
conf = copy.copy(HORIZON_CONFIG)
conf.update(getattr(settings, 'HORIZON_CONFIG', {}))
return conf
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
""" Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
""" Unregisters a :class:`~horizon.Dashboard` from Horizon. """
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
""" Returns the specified :class:`~horizon.Dashboard` instance. """
return self._registered(dashboard)
def get_dashboards(self):
""" Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``settings.HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``settings.HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = registered.values()
extra.sort()
dashboards.extend(extra)
return dashboards
else:
dashboards = self._registry.values()
dashboards.sort()
return dashboards
def get_default_dashboard(self):
""" Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``settings.HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
""" Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``settings.HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
This can be useful if the default dashboard may not be accessible
to all users.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, basestring):
# Assume we've got a URL if there's a slash in it
if user_home.find("/") != -1:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
""" Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
""" Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
""" Constructs the URLconf for Horizon from registered Dashboards. """
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Add in each dashboard's views.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug, include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.defaults.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
""" Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# The one true Horizon
Horizon = Site()
| 36.773556 | 79 | 0.616481 |
9d9a5b6d966a834ecdf85bf14d043892172391e8 | 4,745 | py | Python | test_end_to_end_diff_text.py | jayantabh/Real-Time-Voice-Cloning | 503f866c01a42feedad20ce7afa1a9f32abf8c31 | [
"MIT"
] | null | null | null | test_end_to_end_diff_text.py | jayantabh/Real-Time-Voice-Cloning | 503f866c01a42feedad20ce7afa1a9f32abf8c31 | [
"MIT"
] | null | null | null | test_end_to_end_diff_text.py | jayantabh/Real-Time-Voice-Cloning | 503f866c01a42feedad20ce7afa1a9f32abf8c31 | [
"MIT"
] | null | null | null | from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
import numpy as np
import librosa
from tqdm import tqdm
import os
import pickle
import torch
from Trained_Models.model import Network
from sklearn.metrics import accuracy_score as acc
my_encoder = Network().cuda()
my_encoder.load_state_dict(torch.load("Trained_Models/Kev_Model.pwf"))
encoder_weights = Path("encoder/saved_models/pretrained.pt")
vocoder_weights = Path("vocoder/saved_models/pretrained/pretrained.pt")
syn_dir = Path("synthesizer/saved_models/logs-pretrained/taco_pretrained")
encoder.load_model(encoder_weights)
synthesizer = Synthesizer(syn_dir)
vocoder.load_model(vocoder_weights)
## Audio
mel_window_length = 25 # In milliseconds
mel_window_step = 10 # In milliseconds
mel_n_channels = 40
sampling_rate = 16000
partials_n_frames = 160 # 1600 ms
inference_n_frames = 80 # 800 ms
vad_window_length = 30 # In milliseconds
vad_moving_average_width = 8
vad_max_silence_length = 6
print( " \n\n ##### Loaded Weights #####")
c_text = 0
c_audio = 0
npy_ctr = 1
all_ctr = 0
speaker = -1
y_pred = []
y = []
dataset_path = 'LibriSpeech/train-clean-100/'
for i in tqdm(sorted(os.listdir(dataset_path))):
speaker += 1
# print( i, " : ", c_audio)
if all_ctr==100:
break
if i=='103':
speaker -= 1
print("Skipped")
continue
print(i, speaker)
path_i = dataset_path + i
for j in os.listdir(path_i):
path_j = path_i + "/"+ j
sorted_paths = sorted(os.listdir(path_j))
## Text Part ##
texts_list = []
text_file_path = path_j+ "/" + sorted_paths[-1]
with open(text_file_path) as fp:
line = fp.readline()
while line:
#print(line.split('-')[2][5:])
texts_list.append(line)
line = fp.readline()
c_text += 1
## Audio Part ##
text_idx = 0
for num, k in enumerate(sorted_paths[:-1]):
if num==5:
# print(num,k,text_idx)
with torch.no_grad():
text = texts_list[text_idx]
in_fpath = path_j + "/" + k
original_wav, sampling_rate = librosa.load(in_fpath)
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
'''
###
frames = librosa.feature.melspectrogram(preprocessed_wav,sampling_rate,n_fft=int(sampling_rate * mel_window_length / 1000),hop_length=int(sampling_rate * mel_window_step / 1000),n_mels=mel_n_channels)
frames = frames.astype(np.float32).T[:160,:]
t = torch.Tensor(frames).unsqueeze(0).unsqueeze(0)
embed = my_encoder(t.cuda(), embed=True)
# y.append(embed.argmax().item())
# y_pred.append(speaker)
embed_my = embed.cpu().numpy()
# print("Embed.shape:",embed.shape, torch.type(embed))
# print("Og : ",op.shape, op.argmax(), speaker)
###
'''
embed = encoder.embed_utterance(preprocessed_wav)
# print("Type : ",(embed.max(),embed.min()),(embed_my.max(),embed_my.min()))
# specs = synthesizer.synthesize_spectrograms([text], [np.abs(embed_my)/100])
specs = synthesizer.synthesize_spectrograms([text], [embed])
generated_wav = vocoder.infer_waveform(specs[0])
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
preprocessed_wav = encoder.preprocess_wav(generated_wav, synthesizer.sample_rate)
frames = librosa.feature.melspectrogram(generated_wav,sampling_rate,n_fft=int(sampling_rate * mel_window_length / 1000),hop_length=int(sampling_rate * mel_window_step / 1000),n_mels=mel_n_channels)
frames = frames.astype(np.float32).T[:160,:]
t = torch.Tensor(frames).unsqueeze(0).unsqueeze(0)
op = my_encoder(t.cuda())
print("Gen : ",op.shape, op.argmax(), speaker)
y.append(op.argmax().item())
y_pred.append(speaker)
all_ctr+=1
c_audio+=1
break
text_idx+=1
break
print(c_text, c_audio)
print(y,y_pred)
print(acc(y,y_pred))
| 37.362205 | 220 | 0.577871 |
847e08af317a561fa921458c0937f885e375b73b | 10,059 | py | Python | mecc/apps/utils/documents_generator/models/preview_mecc.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | null | null | null | mecc/apps/utils/documents_generator/models/preview_mecc.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | 3 | 2021-03-19T10:36:10.000Z | 2021-09-08T01:37:47.000Z | mecc/apps/utils/documents_generator/models/preview_mecc.py | unistra/eva | 9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0 | [
"Apache-2.0"
] | null | null | null | import re
from django.db.models import Q
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.units import cm
from reportlab.platypus import Paragraph, Table, CondPageBreak
from mecc.apps.rules.models import Rule
from mecc.apps.training.models import SpecificParagraph, AdditionalParagraph
from mecc.apps.utils.documents_generator.utils.pdf import filter_content
from .preview_mecctable import PreviewMeccTable, LandscapeLeftNumberedCanvas
class PreviewMecc(PreviewMeccTable):
def __init__(self, trainings=None, reference='both'):
super().__init__(trainings, reference)
self.title_header = "Prévisualisation des MECC"
def set_doc_title(self):
self.title = "Previsualisation des MECC"
def make_styles(self):
super().make_styles()
self.styles.add(ParagraphStyle(
name='H1',
fontSize=12,
fontName="Helvetica-Bold",
textColor=colors.steelblue,
spaceBefore=5,
spaceAfter=5,
))
self.styles.add(ParagraphStyle(
name='H2',
fontSize=11,
fontName="Helvetica-Bold",
textColor=colors.steelblue,
spaceBefore=3,
spaceAfter=3,
))
self.styles.add(ParagraphStyle(
name='IndentedText',
leftIndent=20
))
self.styles.add(ParagraphStyle(
name='CenterNormal',
fontSize=10,
alignment=TA_CENTER
))
self.styles.add(ParagraphStyle(
name='Justify',
alignment=TA_JUSTIFY
))
self.styles.add(ParagraphStyle(
name='Bullet_1',
bulletIndent=25,
bulletText="•"
))
def build_doc(self):
self.write_preview_header()
self.write_landscape_training_infos()
self.write_derogs_and_adds()
self.story.append(CondPageBreak(8*cm))
self.write_table_title()
self.write_mecctable()
self.document.build(
self.story,
canvasmaker=LandscapeLeftNumberedCanvas
)
pdf = self.buffer.getvalue()
self.buffer.close()
self.response.write(pdf)
return self.response
def write_derogs_and_adds(self, motivations=True):
derogs = SpecificParagraph.objects.\
filter(training_id=self.training).\
order_by('paragraph_gen_id')
adds = AdditionalParagraph.objects.filter(training_id=self.training)
rules = Rule.objects.\
filter(code_year=self.training.code_year).\
filter(
Q(id__in=[derog.rule_gen_id for derog in derogs]) \
| \
Q(id__in=[add.rule_gen_id for add in adds])
)
shared_adds = adds.filter(rule_gen_id__in=[derog.rule_gen_id for derog in derogs])
table_derogs = []
table_derogs_style = [
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('TOPPADDING', (0, 0), (-1, -1), 0),
('BOTTOMPADDING', (0, 0), (-1, -1), 0),
('LEFTPADDING', (0, 0), (-1, -1), 0),
('RIGHTPADDING', (0, 0), (-1, -1), 0),
# ('GRID', (0, 0), (-1, -1), 0.5, colors.green),
]
subtable_style = [
('TOPPADDING', (0, 0), (-1, -1), 0),
('BOTTOMPADDING', (0, 0), (-1, -1), 0),
('LEFTPADDING', (0, 0), (-1, -1), 0),
('RIGHTPADDING', (0, 0), (-1, -1), 0),
# ('GRID', (0, 0), (-1, -1), 0.5, colors.red),
]
count_lines = -1
self.story.append(
Paragraph(
"<para>Dérogations et alinéas additionnels</para>",
self.styles['H1']
)
)
if rules:
if derogs:
for rule in rules.filter(id__in=[derog.rule_gen_id for derog in derogs]):
count_lines += 1
table_derogs.append(
[Paragraph(
rule.label,
style=self.styles['H2']
)]
)
table_derogs_style.extend([
('SPAN', (0, count_lines), (-1, count_lines)),
('TOPPADDING', (0, count_lines), (-1, count_lines), 0),
('BOTTOMPADDING', (0, count_lines), (-1, count_lines), 0),
])
for derog in derogs.filter(rule_gen_id=rule.id):
count_lines += 1
table_derogs.append(
[
Paragraph(
"<para textColor=steelblue><b>(D)</b></para>",
style=self.styles['CenterNormal']
),
Table(
[[self.clean_up(derog.text_specific_paragraph)]],
style=subtable_style,
),
Paragraph(
"<para textColor=red><u>Motif de la dérogation</u> : %s" \
% filter_content(derog.text_motiv),
style=self.styles['Normal']
) if motivations else ''
]
)
table_derogs_style.extend([
('TOPPADDING', (0, count_lines), (-1, count_lines), 3),
('BOTTOMPADDING', (0, count_lines), (-1, count_lines), 3),
('RIGHTPADDING', (1, count_lines), (1, count_lines), 3),
('LEFTPADDING', (2, count_lines), (2, count_lines), 3),
])
if motivations:
table_derogs_style.append(
('LINEAFTER', (1, count_lines), (1, count_lines), 0.5, colors.red),
)
if shared_adds and rule.id in [add.rule_gen_id for add in shared_adds]:
add = shared_adds.get(rule_gen_id=derog.rule_gen_id).text_additional_paragraph
count_lines += 1
table_derogs.append([
Paragraph(
"<para textColor=green><b>(A)</b></para>",
self.styles['CenterNormal']
),
Table(
[[self.clean_up(add)]],
style=subtable_style,
),
""
])
table_derogs_style.extend([
('BOTTOMPADDING', (0, count_lines), (-1, count_lines), 3),
('RIGHTPADDING', (1, count_lines), (1, count_lines), 3),
])
if adds:
if shared_adds:
adds = adds.exclude(id__in=[add.id for add in shared_adds])
for rule in rules.filter(id__in=[add.rule_gen_id for add in adds]):
count_lines += 1
table_derogs.append(
[Paragraph(
rule.label,
style=self.styles['H2']
)]
)
table_derogs_style.extend([
('SPAN', (0, count_lines), (-1, count_lines)),
('TOPPADDING', (0, count_lines), (-1, count_lines), 0),
('BOTTOMPADDING', (0, count_lines), (-1, count_lines), 0),
])
for add in adds.filter(rule_gen_id=rule.id):
count_lines += 1
table_derogs.append(
[
Paragraph(
"<para textColor=green><b>(A)</b></para>",
style=self.styles['CenterNormal']
),
Table(
[[self.clean_up(add.text_additional_paragraph)]],
style=subtable_style,
),
""
]
)
table_derogs_style.extend([
('TOPPADDING', (0, count_lines), (-1, count_lines), 3),
('BOTTOMPADDING', (0, count_lines), (-1, count_lines), 3),
])
self.story.append(
Table(
table_derogs,
style=table_derogs_style,
colWidths=[1*cm, 16.30*cm, 8.35*cm]
)
)
else:
self.story.append(
Paragraph(
"Néant",
self.styles['IndentedText']
)
)
def clean_up(self, text, style=''):
"""
Return correct string in order to be displayed as list
"""
text = filter_content(text)
text = text.replace('\\r\\n', '<br/>')
reg = re.compile(r'>(.*?)</(p|li)>')
r = reg.findall(text.replace('r\\n\\', '<br><\\br>'))
_list = []
for t, v in r:
if v == 'li':
_list.append(Paragraph(
"<para %s leftIndent=40>%s</para>" % (
style, t), self.styles['Bullet_1']))
else:
_list.append(Paragraph(
"<para %s >%s</para>" % (
style, t), self.styles['Justify']))
return _list
| 40.236 | 102 | 0.435729 |
a781d77b7ecfd3c85b1653b281d14c4c1b29125c | 2,455 | py | Python | influxdb_client/domain/xy_geom.py | Rajpratik71/influxdb-client-python | ae537018b638600552b3ac11f1b070c048719910 | [
"MIT"
] | null | null | null | influxdb_client/domain/xy_geom.py | Rajpratik71/influxdb-client-python | ae537018b638600552b3ac11f1b070c048719910 | [
"MIT"
] | null | null | null | influxdb_client/domain/xy_geom.py | Rajpratik71/influxdb-client-python | ae537018b638600552b3ac11f1b070c048719910 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Influx API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class XYGeom(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
LINE = "line"
STEP = "step"
STACKED = "stacked"
BAR = "bar"
MONOTONEX = "monotoneX"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501,D401,D403
"""XYGeom - a model defined in OpenAPI.""" # noqa: E501 self.discriminator = None
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, XYGeom):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 26.117021 | 120 | 0.550713 |
3244b045194193b124f57ca4278cb005038d6b3b | 5,331 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ppp_ma_ssrp_cfg.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ppp_ma_ssrp_cfg.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ppp_ma_ssrp_cfg.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """ Cisco_IOS_XR_ppp_ma_ssrp_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ppp\-ma\-ssrp package configuration.
This module contains definitions
for the following management objects\:
ssrp\: Shared plane SSRP configuration data
This YANG module augments the
Cisco\-IOS\-XR\-config\-mda\-cfg,
Cisco\-IOS\-XR\-ifmgr\-cfg
modules with configuration data.
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Ssrp(Entity):
"""
Shared plane SSRP configuration data
.. attribute:: profiles
Table of SSRP Profiles
**type**\: :py:class:`Profiles <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ma_ssrp_cfg.Ssrp.Profiles>`
"""
_prefix = 'ppp-ma-ssrp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(Ssrp, self).__init__()
self._top_entity = None
self.yang_name = "ssrp"
self.yang_parent_name = "Cisco-IOS-XR-ppp-ma-ssrp-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("profiles", ("profiles", Ssrp.Profiles))])
self._leafs = OrderedDict()
self.profiles = Ssrp.Profiles()
self.profiles.parent = self
self._children_name_map["profiles"] = "profiles"
self._segment_path = lambda: "Cisco-IOS-XR-ppp-ma-ssrp-cfg:ssrp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ssrp, [], name, value)
class Profiles(Entity):
"""
Table of SSRP Profiles
.. attribute:: profile
SSRP Profile configuration
**type**\: list of :py:class:`Profile <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ppp_ma_ssrp_cfg.Ssrp.Profiles.Profile>`
"""
_prefix = 'ppp-ma-ssrp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(Ssrp.Profiles, self).__init__()
self.yang_name = "profiles"
self.yang_parent_name = "ssrp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("profile", ("profile", Ssrp.Profiles.Profile))])
self._leafs = OrderedDict()
self.profile = YList(self)
self._segment_path = lambda: "profiles"
self._absolute_path = lambda: "Cisco-IOS-XR-ppp-ma-ssrp-cfg:ssrp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ssrp.Profiles, [], name, value)
class Profile(Entity):
"""
SSRP Profile configuration
.. attribute:: name (key)
The name of the profile
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: max_hops
This specifies the maximum number of hops for packets on the SSO channel
**type**\: int
**range:** 1..255
.. attribute:: peer_ipv4_address
This specifies the remote end's IPv4\-address for the SSO channel
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ppp-ma-ssrp-cfg'
_revision = '2015-11-09'
def __init__(self):
super(Ssrp.Profiles.Profile, self).__init__()
self.yang_name = "profile"
self.yang_parent_name = "profiles"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('max_hops', (YLeaf(YType.uint32, 'max-hops'), ['int'])),
('peer_ipv4_address', (YLeaf(YType.str, 'peer-ipv4-address'), ['str'])),
])
self.name = None
self.max_hops = None
self.peer_ipv4_address = None
self._segment_path = lambda: "profile" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ppp-ma-ssrp-cfg:ssrp/profiles/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ssrp.Profiles.Profile, ['name', 'max_hops', 'peer_ipv4_address'], name, value)
def clone_ptr(self):
self._top_entity = Ssrp()
return self._top_entity
| 32.705521 | 177 | 0.561433 |
9e6b461323d942e40f831c38a8163856c696185a | 252 | py | Python | src/apps/search/views.py | snicoper/snicoper.com | 22c17b5ead6096227a3415770c0cbd2923f2f14a | [
"MIT"
] | 2 | 2017-04-22T11:35:21.000Z | 2017-09-01T19:49:59.000Z | src/apps/search/views.py | snicoper/snicoper.com | 22c17b5ead6096227a3415770c0cbd2923f2f14a | [
"MIT"
] | null | null | null | src/apps/search/views.py | snicoper/snicoper.com | 22c17b5ead6096227a3415770c0cbd2923f2f14a | [
"MIT"
] | null | null | null | from haystack.generic_views import SearchView
from haystack.query import SearchQuerySet
class ArticleSearchView(SearchView):
template_name = 'search/search_article.html'
queryset = SearchQuerySet().order_by('-create_at')
paginate_by = 10
| 28 | 54 | 0.789683 |
8d9f11f2d6136cc94ddbd690ac99c4fb28ba63a8 | 4,407 | py | Python | sts_wrldom/utils.py | BigBossAnwer/STS-Pipeline | 952d2c577dd4b8a66c99b80a24589a98e20c2e60 | [
"MIT"
] | null | null | null | sts_wrldom/utils.py | BigBossAnwer/STS-Pipeline | 952d2c577dd4b8a66c99b80a24589a98e20c2e60 | [
"MIT"
] | null | null | null | sts_wrldom/utils.py | BigBossAnwer/STS-Pipeline | 952d2c577dd4b8a66c99b80a24589a98e20c2e60 | [
"MIT"
] | null | null | null | import sys
from pathlib import Path
import numpy as np
import pandas as pd
def log_frame(df, name, tag):
"""Logs a dataframe as a .csv to $cwd/log
Args:
df: dataframe to log
name (str): the name of the logged .csv
tag (str): the tag of the logged .csv
"""
try:
Path("log").mkdir(exist_ok=True)
df.to_csv(str(Path("log", name + "_" + tag + ".csv")), index=False)
except IOError:
print("Error: Log write failed")
except:
print("Unexpected error: ", sys.exc_info()[0])
raise
def write_results(df, name, tag):
"""Writes results (predictions) in the format requested as a .txt in $cwd/results
Args:
df: the results dataframe
name (str): the name of the written .txt
tag (str): the tag of the written .txt
"""
try:
Path("results").mkdir(exist_ok=True)
df.to_csv(str(Path("results", name + "_" + tag + ".txt")), sep="\t", index=False)
except IOError:
print("Error: Log write failed")
except:
print("Unexpected error: ", sys.exc_info()[0])
raise
def rmse(predictions, targets):
"""Computes Root Mean Squared Error
Args:
predictions (list): a list of predicted labels
targets (list): a list of gold labels
Returns:
numpy.float64: the RMSE between the predictions and the gold labels
"""
assert len(predictions) == len(targets)
return np.sqrt(((predictions - targets) ** 2).mean())
def accuracy(predictions, targets):
"""Computes raw accuracy (True Predictions) / (All Predictions)
Args:
predictions (list): a list of predicted labels
targets (list): a list of gold labels
Returns:
float: the raw accuracy between the predictions and the gold labels
"""
assert len(predictions) == len(targets)
count_pos = 0
for predic, gold in zip(predictions, targets):
if predic == gold:
count_pos += 1
return float(count_pos) / len(targets)
def get_scores(predictions, targets, prec=3):
"""Returns a dictionary containing overall and for each label their respective
recall, precision, and F1 score
Args:
predictions (list): a list of predicted labels
targets (list): a list of of gold labels
prec (int, optional): precision of metric rounding. Defaults to 3.
Returns:
dict: a dictionary of metrics like:
{
"micro": {
"recall": float,
"precision": float,
"fscore": float
}
"1": ...
...
"5": ...
}
"""
label_set = [1, 2, 3, 4, 5]
classification_report = {}
classification_report["micro"] = {"recall": 0.0, "precision": 0.0, "fscore": 0.0}
for label in label_set:
classification_report[label] = {"recall": 0.0, "precision": 0.0, "fscore": 0.0}
tp, fp, fn = 0, 0, 0
for idx, gold in enumerate(targets):
prediction = predictions[idx]
if gold == prediction:
if prediction == label:
tp += 1
else:
if prediction == label:
fp += 1
else:
fn += 1
try:
recall = float(tp) / (tp + fn)
except ZeroDivisionError:
recall = 0.0
try:
precision = float(tp) / (tp + fp)
except ZeroDivisionError:
precision = 0.0
try:
fscore = 2 * precision * recall / (precision + recall)
except ZeroDivisionError:
fscore = 0.0
classification_report[label]["recall"] = round(recall, prec)
classification_report[label]["precision"] = round(precision, prec)
classification_report[label]["fscore"] = round(fscore, prec)
classification_report["micro"]["recall"] += recall
classification_report["micro"]["precision"] += precision
classification_report["micro"]["fscore"] += fscore
for key in classification_report["micro"].keys():
classification_report["micro"][key] /= len(label_set)
classification_report["micro"][key] = round(
classification_report["micro"][key], prec
)
return classification_report
| 30.818182 | 89 | 0.561833 |
645ab4c0295f939b3b914e8132c093916b82f6d4 | 13,506 | py | Python | python/ccxt/async_support/base/exchange.py | chiragmatkar/ccxt | d04a286a802fa1a59048a51eb91913bedd0dfdc6 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | chiragmatkar/ccxt | d04a286a802fa1a59048a51eb91913bedd0dfdc6 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | chiragmatkar/ccxt | d04a286a802fa1a59048a51eb91913bedd0dfdc6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.34.5'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
self.markets_loading = None
self.reloading_markets = False
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop, enable_cleanup_closed=True)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle(self.rateLimit)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
self.print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets_helper(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def load_markets(self, reload=False, params={}):
if (reload and not self.reloading_markets) or not self.markets_loading:
self.reloading_markets = True
coroutine = self.load_markets_helper(reload, params)
# coroutines can only be awaited once so we wrap it in a task
self.markets_loading = asyncio.ensure_future(coroutine)
try:
result = await self.markets_loading
except Exception as e:
self.reloading_markets = False
self.markets_loading = None
raise e
self.reloading_markets = False
return result
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
async def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
async def fetch_deposit_address(self, code=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposit_address() is not supported yet')
async def sleep(self, milliseconds):
return await asyncio.sleep(milliseconds / 1000)
| 42.471698 | 355 | 0.619725 |
ebaf6935a513a784b5d5b2c2f1a680b651f64d21 | 15,142 | py | Python | goose/extractors/images.py | chyt/gisty-goose | b1ac57a8d859d3f34fec6d8e5062f9601cdf0fda | [
"Apache-2.0",
"MIT"
] | 1 | 2021-12-13T18:14:02.000Z | 2021-12-13T18:14:02.000Z | goose/extractors/images.py | chyt/gisty-goose | b1ac57a8d859d3f34fec6d8e5062f9601cdf0fda | [
"Apache-2.0",
"MIT"
] | null | null | null | goose/extractors/images.py | chyt/gisty-goose | b1ac57a8d859d3f34fec6d8e5062f9601cdf0fda | [
"Apache-2.0",
"MIT"
] | 1 | 2021-12-13T18:14:17.000Z | 2021-12-13T18:14:17.000Z | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import os
from six.moves.urllib.parse import urlparse, urljoin
from goose.extractors import BaseExtractor
from goose.image import Image
from goose.utils import FileHelper
from goose.utils.images import ImageUtils
KNOWN_IMG_DOM_NAMES = [
"yn-story-related-media",
"cnn_strylccimg300cntr",
"big_photo",
"ap-smallphoto-a",
]
class DepthTraversal(object):
def __init__(self, node, parent_depth, sibling_depth):
self.node = node
self.parent_depth = parent_depth
self.sibling_depth = sibling_depth
class ImageExtractor(BaseExtractor):
def __init__(self, fetcher, config, article):
super(ImageExtractor, self).__init__(config, article)
self.fetcher = fetcher
self.custom_site_mapping = {}
self.load_customesite_mapping()
# What's the minimum bytes for an image we'd accept is
self.images_min_bytes = 4000
# the webpage url that we're extracting content from
self.target_url = article.final_url
# stores a hash of our url for
# reference and image processing
self.link_hash = article.link_hash
# this lists all the known bad button names that we have
self.badimages_names_re = re.compile(
".html|.gif|.ico|button|twitter.jpg|facebook.jpg|ap_buy_photo"
"|digg.jpg|digg.png|delicious.png|facebook.png|reddit.jpg"
"|doubleclick|diggthis|diggThis|adserver|/ads/|ec.atdmt.com"
"|mediaplex.com|adsatt|view.atdmt"
)
def get_best_image(self, doc, topNode):
image = self.check_known_elements()
if image:
return image
image = self.check_large_images(topNode, 0, 0)
if image:
return image
image = self.check_meta_tag()
if image:
return image
return Image()
def check_meta_tag(self):
# check link tag
image = self.check_link_tag()
if image:
return image
# check opengraph tag
image = self.check_opengraph_tag()
if image:
return image
def check_large_images(self, node, parent_depth_level, sibling_depth_level):
"""\
although slow the best way to determine the best image is to download
them and check the actual dimensions of the image when on disk
so we'll go through a phased approach...
1. get a list of ALL images from the parent node
2. filter out any bad image names that we know of (gifs, ads, etc..)
3. do a head request on each file to make sure it meets
our bare requirements
4. any images left over let's do a full GET request,
download em to disk and check their dimensions
5. Score images based on different factors like height/width
and possibly things like color density
"""
good_images = self.get_image_candidates(node)
if good_images:
scored_images = self.fetch_images(good_images, parent_depth_level)
if scored_images:
highscore_image = sorted(scored_images.items(),
key=lambda x: x[1], reverse=True)[0][0]
main_image = Image()
main_image.src = highscore_image.src
main_image.width = highscore_image.width
main_image.height = highscore_image.height
main_image.extraction_type = "bigimage"
main_image.confidence_score = 100 / len(scored_images) \
if len(scored_images) > 0 else 0
return main_image
depth_obj = self.get_depth_level(node, parent_depth_level, sibling_depth_level)
if depth_obj:
return self.check_large_images(depth_obj.node,
depth_obj.parent_depth, depth_obj.sibling_depth)
return None
def get_depth_level(self, node, parent_depth, sibling_depth):
MAX_PARENT_DEPTH = 2
if parent_depth > MAX_PARENT_DEPTH:
return None
else:
sibling_node = self.parser.previousSibling(node)
if sibling_node is not None:
return DepthTraversal(sibling_node, parent_depth, sibling_depth + 1)
elif node is not None:
parent = self.parser.getParent(node)
if parent is not None:
return DepthTraversal(parent, parent_depth + 1, 0)
return None
def fetch_images(self, images, depth_level):
"""\
download the images to temp disk and set their dimensions
- we're going to score the images in the order in which
they appear so images higher up will have more importance,
- we'll count the area of the 1st image as a score
of 1 and then calculate how much larger or small each image after it is
- we'll also make sure to try and weed out banner
type ad blocks that have big widths and small heights or vice versa
- so if the image is 3rd found in the dom it's
sequence score would be 1 / 3 = .33 * diff
in area from the first image
"""
image_results = {}
initial_area = float(0.0)
total_score = float(0.0)
cnt = float(1.0)
MIN_WIDTH = 50
for image in images[:30]:
src = self.parser.getAttribute(image, attr='src')
src = self.build_image_path(src)
local_image = self.get_local_image(src)
width = local_image.width
height = local_image.height
src = local_image.src
file_extension = local_image.file_extension
if file_extension != '.gif' or file_extension != 'NA':
if (depth_level >= 1 and local_image.width > 300) or depth_level < 1:
if not self.is_banner_dimensions(width, height):
if width > MIN_WIDTH:
sequence_score = float(1.0 / cnt)
area = float(width * height)
total_score = float(0.0)
if initial_area == 0:
initial_area = area * float(1.48)
total_score = 1
else:
area_difference = float(area / initial_area)
total_score = sequence_score * area_difference
image_results.update({local_image: total_score})
cnt += 1
return image_results
def get_image(self, element, src, score=100, extraction_type="N/A"):
# build the Image object
image = Image()
image.src = self.build_image_path(src)
image.extraction_type = extraction_type
image.confidence_score = score
# check if we have a local image
# in order to add more information
# on the Image object
local_image = self.get_local_image(image.src)
if local_image:
image.bytes = local_image.bytes
image.height = local_image.height
image.width = local_image.width
# return the image
return image
def is_banner_dimensions(self, width, height):
"""\
returns true if we think this is kind of a bannery dimension
like 600 / 100 = 6 may be a fishy dimension for a good image
"""
if width == height:
return False
if width > height:
diff = float(width / height)
if diff > 5:
return True
if height > width:
diff = float(height / width)
if diff > 5:
return True
return False
def get_node_images(self, node):
images = self.parser.getElementsByTag(node, tag='img')
if images is not None and len(images) < 1:
return None
return images
def filter_bad_names(self, images):
"""\
takes a list of image elements
and filters out the ones with bad names
"""
good_images = []
for image in images:
if self.is_valid_filename(image):
good_images.append(image)
return good_images if len(good_images) > 0 else None
def is_valid_filename(self, imageNode):
"""\
will check the image src against a list
of bad image files we know of like buttons, etc...
"""
src = self.parser.getAttribute(imageNode, attr='src')
if not src:
return False
if self.badimages_names_re.search(src):
return False
return True
def get_image_candidates(self, node):
good_images = []
filtered_images = []
images = self.get_node_images(node)
if images:
filtered_images = self.filter_bad_names(images)
if filtered_images:
good_images = self.get_images_bytesize_match(filtered_images)
return good_images
def get_images_bytesize_match(self, images):
"""\
loop through all the images and find the ones
that have the best bytez to even make them a candidate
"""
cnt = 0
MAX_BYTES_SIZE = 15728640
good_images = []
for image in images:
if cnt > 30:
return good_images
src = self.parser.getAttribute(image, attr='src')
src = self.build_image_path(src)
local_image = self.get_local_image(src)
if local_image:
bytes = local_image.bytes
if (bytes == 0 or bytes > self.images_min_bytes) \
and bytes < MAX_BYTES_SIZE:
good_images.append(image)
else:
images.remove(image)
cnt += 1
return good_images if len(good_images) > 0 else None
def get_node(self, node):
return node if node else None
def check_link_tag(self):
"""\
checks to see if we were able to
find open link_src on this page
"""
node = self.article.raw_doc
meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src')
for item in meta:
src = self.parser.getAttribute(item, attr='href')
if src:
return self.get_image(item, src, extraction_type='linktag')
return None
def check_opengraph_tag(self):
"""\
checks to see if we were able to
find open graph tags on this page
"""
node = self.article.raw_doc
meta = self.parser.getElementsByTag(node, tag='meta', attr='property', value='og:image')
for item in meta:
src = self.parser.getAttribute(item, attr='content')
if src:
return self.get_image(item, src, extraction_type='opengraph')
return None
def get_local_image(self, src):
"""\
returns the bytes of the image file on disk
"""
return ImageUtils.store_image(self.fetcher, self.link_hash, src, self.config)
def get_clean_domain(self):
if self.article.domain:
return self.article.domain.replace('www.', '')
return None
def check_known_elements(self):
"""\
in here we check for known image contains from sites
we've checked out like yahoo, techcrunch, etc... that have
* known places to look for good images.
* TODO: enable this to use a series of settings files
so people can define what the image ids/classes
are on specific sites
"""
domain = self.get_clean_domain()
if domain in self.custom_site_mapping.keys():
classes = self.custom_site_mapping.get(domain).split('|')
for classname in classes:
KNOWN_IMG_DOM_NAMES.append(classname)
image = None
doc = self.article.raw_doc
def _check_elements(elements):
image = None
for element in elements:
tag = self.parser.getTag(element)
if tag == 'img':
image = element
return image
else:
images = self.parser.getElementsByTag(element, tag='img')
if images:
image = images[0]
return image
return image
# check for elements with known id
for css in KNOWN_IMG_DOM_NAMES:
elements = self.parser.getElementsByTag(doc, attr="id", value=css)
image = _check_elements(elements)
if image is not None:
src = self.parser.getAttribute(image, attr='src')
if src:
return self.get_image(image, src, score=90, extraction_type='known')
# check for elements with known classes
for css in KNOWN_IMG_DOM_NAMES:
elements = self.parser.getElementsByTag(doc, attr='class', value=css)
image = _check_elements(elements)
if image is not None:
src = self.parser.getAttribute(image, attr='src')
if src:
return self.get_image(image, src, score=90, extraction_type='known')
return None
def build_image_path(self, src):
"""\
This method will take an image path and build
out the absolute path to that image
* using the initial url we crawled
so we can find a link to the image
if they use relative urls like ../myimage.jpg
"""
o = urlparse(src)
# we have a full url
if o.hostname:
return o.geturl()
# we have a relative url
return urljoin(self.target_url, src)
def load_customesite_mapping(self):
# TODO
path = os.path.join('images', 'known-image-css.txt')
data_file = FileHelper.loadResourceFile(path)
lines = data_file.splitlines()
for line in lines:
domain, css = line.split('^')
self.custom_site_mapping.update({domain: css})
| 36.138425 | 96 | 0.592128 |
13a5047e089e6ae401f20aaae852d450fb4928d3 | 135,485 | py | Python | salt/state.py | diego-treitos/salt | d2aec156ff2ef48ac21b4db211efb43220c6465c | [
"Apache-2.0"
] | 1 | 2020-10-19T11:49:49.000Z | 2020-10-19T11:49:49.000Z | salt/state.py | diego-treitos/salt | d2aec156ff2ef48ac21b4db211efb43220c6465c | [
"Apache-2.0"
] | null | null | null | salt/state.py | diego-treitos/salt | d2aec156ff2ef48ac21b4db211efb43220c6465c | [
"Apache-2.0"
] | 1 | 2020-10-19T11:49:50.000Z | 2020-10-19T11:49:50.000Z | # -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
import copy
import site
import fnmatch
import logging
import datetime
import traceback
# Import salt libs
import salt.utils
import salt.loader
import salt.minion
import salt.pillar
import salt.fileclient
import salt.utils.event
import salt.utils.url
import salt.syspaths as syspaths
from salt.utils import context, immutabletypes
from salt.template import compile_template, compile_template_str
from salt.exceptions import SaltRenderError, SaltReqTimeoutError, SaltException
from salt.utils.odict import OrderedDict, DefaultOrderedDict
# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset([
'onchanges',
'onfail',
'prereq',
'prerequired',
'watch',
'require',
'listen',
])
STATE_REQUISITE_IN_KEYWORDS = frozenset([
'onchanges_in',
'onfail_in',
'prereq_in',
'watch_in',
'require_in',
'listen_in',
])
STATE_RUNTIME_KEYWORDS = frozenset([
'fun',
'state',
'check_cmd',
'failhard',
'onlyif',
'unless',
'order',
'prereq',
'prereq_in',
'prerequired',
'reload_modules',
'reload_grains',
'reload_pillar',
'fire_event',
'saltenv',
'use',
'use_in',
'__env__',
'__sls__',
'__id__',
'__pub_user',
'__pub_arg',
'__pub_jid',
'__pub_fun',
'__pub_tgt',
'__pub_ret',
'__pub_pid',
'__pub_tgt_type',
'__prereq__',
])
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(STATE_REQUISITE_IN_KEYWORDS).union(STATE_RUNTIME_KEYWORDS)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
'''
Take a low tag and split it back into the low dict that it came from
'''
state, id_, name, fun = tag.split('_|-')
return {'state': state,
'__id__': id_,
'name': name,
'fun': fun}
def _gen_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def _l_tag(name, id_):
low = {'name': 'listen_{0}'.format(name),
'__id__': 'listen_{0}'.format(id_),
'state': 'Listen_Error',
'fun': 'Listen_Error'}
return _gen_tag(low)
def trim_req(req):
'''
Trim any function off of a requisite
'''
reqfirst = next(iter(req))
if '.' in reqfirst:
return {reqfirst.split('.')[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
'''
Return a set of the arguments passed to the named state
'''
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
'''
Scan high data for the id referencing the given name
'''
ext_id = ''
if name in high:
ext_id = name
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(
high[nid][state],
list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id = nid
return ext_id
def format_log(ret):
'''
Format the state into a log message
'''
msg = ''
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if 'changes' in ret:
# Yep, looks like a valid state return
chg = ret['changes']
if not chg:
if ret['comment']:
msg = ret['comment']
else:
msg = 'No changes made for {0[name]}'.format(ret)
elif isinstance(chg, dict):
if 'diff' in chg:
if isinstance(chg['diff'], six.string_types):
msg = 'File changed:\n{0}'.format(chg['diff'])
if all([isinstance(x, dict) for x in six.itervalues(chg)]):
if all([('old' in x and 'new' in x)
for x in six.itervalues(chg)]):
msg = 'Made the following changes:\n'
for pkg in chg:
old = chg[pkg]['old']
if not old and old not in (False, None):
old = 'absent'
new = chg[pkg]['new']
if not new and new not in (False, None):
new = 'absent'
msg += '{0} changed from {1} to ' \
'{2}\n'.format(pkg, old, new)
if not msg:
msg = str(ret['changes'])
if ret['result'] is True or ret['result'] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
'''
Compile the master side low state data, and build the hidden state file
'''
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
class StateError(Exception):
'''
Custom exception class.
'''
pass
class Compiler(object):
'''
Class used to compile and manage the High Data structure
'''
def __init__(self, opts):
self.opts = opts
self.rend = salt.loader.render(self.opts, {})
# We need __setstate__ and __getstate__ to avoid pickling errors since
# 'self.rend' contains a function reference which is not picklable.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts}
def render_template(self, template, **kwargs):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'], **kwargs)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
'''
Turns dot delimited function refs into function strings
'''
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state? It needs to be padded!
if '.' in high[name]:
comps = high[name].split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if not isinstance(name, six.string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but is '
'a {2}'.format(name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if not isinstance(body[state], list):
errors.append(
'State {0!r} in SLS {1!r} is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ('require', 'watch', 'prereq'):
if not isinstance(arg[argfirst], list):
errors.append(('The {0}'
' statement in state {1!r} in SLS {2!r} '
'needs to be formed as a list').format(
argfirst,
name,
body['__sls__']
))
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type {0!r} '
'in state {1!r}, in SLS '
'{2!r}. Requisite types must '
'not contain dots, did you '
'mean {3!r}?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'is SLS {1}\n'
).format(
str(req_val),
body['__sls__']))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(('Multiple dictionaries '
'defined in argument of state {0!r} in SLS'
' {1!r}').format(
name,
body['__sls__']))
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(('No function declared in state {0!r} in'
' SLS {1!r}').format(state, body['__sls__']))
elif fun > 1:
errors.append(
'Too many functions declared in state {0!r} in '
'SLS {1!r}'.format(state, body['__sls__'])
)
return errors
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
live.update(entry[low_name][0])
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if body.get('__sls__', '') in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State(object):
'''
Class used to execute salt states
'''
def __init__(self, opts, pillar=None, jid=None):
if 'grains' not in opts:
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self._pillar_override = pillar
self.opts['pillar'] = self._gather_pillar()
self.state_con = {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
def _gather_pillar(self):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillar=self._pillar_override,
pillarenv=self.opts.get('pillarenv')
)
ret = pillar.compile_pillar()
if self._pillar_override and isinstance(self._pillar_override, dict):
ret.update(self._pillar_override)
return ret
def _mod_init(self, low):
'''
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
'''
minit = '{0}.mod_init'.format(low['state'])
if low['state'] not in self.mod_init:
if minit in self.states:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low['state'])
def _mod_aggregate(self, low, running, chunks):
'''
Execute the aggregation systems to runtime modify the low chunk
'''
agg_opt = self.functions['config.option']('state_aggregate')
if low.get('aggregate') is True:
agg_opt = low['aggregate']
if agg_opt is True:
agg_opt = [low['state']]
else:
return low
if low['state'] in agg_opt and not low.get('__agg__'):
agg_fun = '{0}.mod_aggregate'.format(low['state'])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low['__agg__'] = True
except TypeError:
log.error('Failed to execute aggregate for state {0}'.format(low['state']))
return low
def _run_check(self, low_data):
'''
Check that unless doesn't return 0, and that onlyif returns a 0.
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
if 'onlyif' in low_data:
if not isinstance(low_data['onlyif'], list):
low_data_onlyif = [low_data['onlyif']]
else:
low_data_onlyif = low_data['onlyif']
for entry in low_data_onlyif:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd != 0 and ret['result'] is False:
ret.update({'comment': 'onlyif execution failed',
'skip_watch': True,
'result': True})
return ret
elif cmd == 0:
ret.update({'comment': 'onlyif execution succeeded', 'result': False})
return ret
if 'unless' in low_data:
if not isinstance(low_data['unless'], list):
low_data_unless = [low_data['unless']]
else:
low_data_unless = low_data['unless']
for entry in low_data_unless:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'unless execution succeeded',
'skip_watch': True,
'result': True})
elif cmd != 0:
ret.update({'comment': 'unless execution failed', 'result': False})
return ret
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
'''
Alter the way a successful state run is determined
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
for entry in low_data['check_cmd']:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'check_cmd determined the state succeeded', 'result': True})
elif cmd != 0:
ret.update({'comment': 'check_cmd determined the state failed', 'result': False})
return ret
return ret
def load_modules(self, data=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, self.state_con, utils=self.utils)
if isinstance(data, dict):
if data.get('provider', False):
if isinstance(data['provider'], str):
providers = [{data['state']: data['provider']}]
elif isinstance(data['provider'], list):
providers = data['provider']
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(self.opts,
provider[mod],
self.functions)
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(
mod,
func[func.rindex('.'):]
)
self.functions[f_key] = funcs[func]
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
def module_refresh(self):
'''
Refresh all the modules
'''
log.debug('Refreshing modules...')
if self.opts['grains'].get('os') != 'MacOS':
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload(site)
except RuntimeError:
log.error('Error encountered during module reload. Modules were not reloaded.')
self.load_modules()
if not self.opts.get('local', False) and self.opts.get('multiprocessing', True):
self.functions['saltutil.refresh_modules']()
def check_refresh(self, data, ret):
'''
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
'''
_reload_modules = False
if data.get('reload_grains', False):
log.debug('Refreshing grains...')
self.opts['grains'] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get('reload_pillar', False):
log.debug('Refreshing pillar...')
self.opts['pillar'] = self._gather_pillar()
_reload_modules = True
if data.get('reload_modules', False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if not ret['changes']:
return
if data['state'] == 'file':
if data['fun'] == 'managed':
if data['name'].endswith(
('.py', '.pyx', '.pyo', '.pyc', '.so')):
self.module_refresh()
elif data['fun'] == 'recurse':
self.module_refresh()
elif data['fun'] == 'symlink':
if 'bin' in data['name']:
self.module_refresh()
elif data['state'] in ('pkg', 'ports'):
self.module_refresh()
def verify_ret(self, ret):
'''
Verify the state return data
'''
if not isinstance(ret, dict):
raise SaltException(
'Malformed state return, return must be a dict'
)
bad = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in ret:
bad.append(val)
if bad:
raise SaltException(
('The following keys were not present in the state '
'return: {0}'
).format(','.join(bad)))
def verify_data(self, data):
'''
Verify the data, return an error statement if something is wrong
'''
errors = []
if 'state' not in data:
errors.append('Missing "state" data')
if 'fun' not in data:
errors.append('Missing "fun" data')
if 'name' not in data:
errors.append('Missing "name" data')
if data['name'] and not isinstance(data['name'], six.string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but is '
'a {2}'.format(
data['name'], data['__sls__'], type(data['name']).__name__)
)
if errors:
return errors
full = data['state'] + '.' + data['fun']
if full not in self.states:
if '__sls__' in data:
errors.append(
'State \'{0}\' was not found in SLS \'{1}\''.format(
full,
data['__sls__']
)
)
else:
errors.append(
'Specified state \'{0}\' was not found'.format(
full
)
)
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
'Missing parameter {0} for state {1}'.format(
aspec.args[ind],
full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ''
if 'require' in data:
reqdec = 'require'
if 'watch' in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if '{0}.mod_watch'.format(data['state']) not in self.states:
if 'require' in data:
data['require'].extend(data.pop('watch'))
else:
data['require'] = data.pop('watch')
reqdec = 'require'
else:
reqdec = 'watch'
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data['state'] == reqfirst:
if (fnmatch.fnmatch(data['name'], req[reqfirst])
or fnmatch.fnmatch(data['__id__'], req[reqfirst])):
err = ('Recursive require detected in SLS {0} for'
' require {1} in ID {2}').format(
data['__sls__'],
req,
data['__id__'])
errors.append(err)
return errors
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
try:
if name.startswith('__'):
continue
except AttributeError:
pass
if not isinstance(name, six.string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but '
'is a {2}. It may need to be quoted.'.format(
name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if body[state] is None:
errors.append(
'ID {0!r} in SLS {1!r} contains a short declaration '
'({2}) with a trailing colon. When not passing any '
'arguments to a state, the colon must be omitted.'
.format(name, body['__sls__'], state)
)
continue
if not isinstance(body[state], list):
errors.append(
'State {0!r} in SLS {1!r} is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == 'names':
if not isinstance(arg[argfirst], list):
errors.append(
'The \'names\' argument in state '
'{0!r} in SLS {1!r} needs to be '
'formed as a list'
.format(name, body['__sls__'])
)
if argfirst in ('require', 'watch', 'prereq'):
if not isinstance(arg[argfirst], list):
errors.append(
'The {0} statement in state {1!r} in '
'SLS {2!r} needs to be formed as a '
'list'.format(argfirst,
name,
body['__sls__'])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type {0!r} '
'in state {1!r}, in SLS '
'{2!r}. Requisite types must '
'not contain dots, did you '
'mean {3!r}?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'please check your syntax.\n'
).format(str(req_val)))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
'Multiple dictionaries defined in '
'argument of state {0!r} in SLS {1!r}'
.format(name, body['__sls__'])
)
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(
'No function declared in state {0!r} in SLS {1!r}'
.format(state, body['__sls__'])
)
elif fun > 1:
errors.append(
'Too many functions declared in state {0!r} in '
'SLS {1!r}'.format(state, body['__sls__'])
)
return errors
def verify_chunks(self, chunks):
'''
Verify the chunks in a list of low data structures
'''
err = []
for chunk in chunks:
err += self.verify_data(chunk)
return err
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
elif key == 'state':
# Don't pass down a state override
continue
elif (key == 'name' and
not isinstance(val, six.string_types)):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
live.update(entry[low_name][0])
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
'''
Pull the extend data and add it to the respective high data
'''
errors = []
if '__extend__' not in high:
return high, errors
ext = high.pop('__extend__')
for ext_chunk in ext:
for name, body in six.iteritems(ext_chunk):
if name not in high:
state_type = next(
x for x in body if not x.startswith('__')
)
# Check for a matching 'name' override in high data
id_ = find_name(name, state_type, high)
if id_:
name = id_
else:
errors.append(
'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not '
'part of the high state.\n'
'This is likely due to a missing include statement '
'or an incorrectly typed ID.\nEnsure that a '
'state with an ID of \'{0}\' is available\nin '
'environment \'{1}\' and to SLS \'{2}\''.format(
name,
body.get('__env__', 'base'),
body.get('__sls__', 'base'))
)
continue
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if (isinstance(arg, six.string_types) and
isinstance(high[name][state][hind], six.string_types)):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if (isinstance(arg, dict) and
isinstance(high[name][state][hind], dict)):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if (argfirst ==
next(iter(high[name][state][hind]))):
# They match, check if the option is a
# watch or require, append, otherwise
# replace
if (argfirst == 'require' or
argfirst == 'watch'):
# Extend the list
(high[name][state][hind][argfirst]
.extend(arg[argfirst]))
update = True
else:
# Replace the value
high[name][state][hind] = arg
update = True
if (argfirst == 'name' and
next(iter(high[name][state][hind])) == 'names'):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
sls = body.get('__sls__', '')
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
'''
Extend the data reference with requisite_in arguments
'''
req_in = set([
'require_in',
'watch_in',
'onfail_in',
'onchanges_in',
'use',
'use_in',
'prereq',
'prereq_in',
])
req_in_all = req_in.union(
set([
'require',
'watch',
'onfail',
'onchanges',
]))
extend = {}
errors = []
for id_, body in six.iteritems(high):
if not isinstance(body, dict):
continue
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
rkey = key.split('_')[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in six.iteritems(items):
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = {}
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS {3!r}. Requisites must '
'not contain dots, did you mean {4!r}?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
if isinstance(items, list):
# Formed as a list of requisite additions
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
continue
if len(ind) < 1:
continue
_state = next(iter(ind))
name = ind[_state]
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS {3!r}. Requisites must '
'not contain dots, did you mean {4!r}?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if key == 'prereq_in':
# Add prerequired to origin
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{'prerequired': [{_state: name}]}
)
if key == 'prereq':
# Add prerequired to prereqs
ext_id = find_name(name, _state, high)
if not ext_id:
continue
if ext_id not in extend:
extend[ext_id] = {}
if _state not in extend[ext_id]:
extend[ext_id][_state] = []
extend[ext_id][_state].append(
{'prerequired': [{state: id_}]}
)
continue
if key == 'use_in':
# Add the running states args to the
# use_in states
ext_id = find_name(name, _state, high)
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = {}
if _state not in extend[ext_id]:
extend[ext_id][_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[ext_id][_state].append(arg)
continue
if key == 'use':
# Add the use state's args to the
# running state
ext_id = find_name(name, _state, high)
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = {}
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
high['__extend__'] = []
for key, val in six.iteritems(extend):
high['__extend__'].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def call(self, low, chunks=None, running=None):
'''
Call a state directly with the low data structure, verify data
before processing.
'''
start_time = datetime.datetime.now()
log.info('Running state [{0}] at time {1}'.format(low['name'], start_time.time().isoformat()))
errors = self.verify_data(low)
if errors:
ret = {
'result': False,
'name': low['name'],
'changes': {},
'comment': '',
}
for err in errors:
ret['comment'] += '{0}\n'.format(err)
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {'result': False, 'name': low['name'], 'changes': {}}
if not low.get('__prereq__'):
log.info(
'Executing state {0[state]}.{0[fun]} for {0[name]}'.format(
low
)
)
if 'provider' in low:
self.load_modules(low)
state_func_name = '{0[state]}.{0[fun]}'.format(low)
cdata = salt.utils.format_call(
self.states[state_func_name],
low,
initial_ret={'full': state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
'__low__': immutabletypes.freeze(low),
'__running__': immutabletypes.freeze(running) if running else {},
'__instance_id__': self.instance_id,
'__lowstate__': immutabletypes.freeze(chunks) if chunks else {}
}
if low.get('__prereq__'):
test = sys.modules[self.states[cdata['full']].__module__].__opts__['test']
sys.modules[self.states[cdata['full']].__module__].__opts__['test'] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
if ('unless' in low and '{0[state]}.mod_run_check'.format(low) not in self.states) or \
('onlyif' in low and '{0[state]}.mod_run_check'.format(low) not in self.states):
ret.update(self._run_check(low))
if 'saltenv' in low:
inject_globals['__env__'] = str(low['saltenv'])
elif isinstance(cdata['kwargs'].get('env', None), six.string_types):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals['__env__'] = str(cdata['kwargs']['env'])
elif '__env__' in low:
# The user is passing an alternative environment using __env__
# which is also not the appropriate choice, still, handle it
inject_globals['__env__'] = str(low['__env__'])
else:
# Let's use the default environment
inject_globals['__env__'] = 'base'
if 'result' not in ret or ret['result'] is False:
with context.func_globals_inject(self.states[cdata['full']],
**inject_globals):
ret = self.states[cdata['full']](*cdata['args'],
**cdata['kwargs'])
if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states:
ret.update(self._run_check_cmd(low))
self.verify_ret(ret)
except Exception:
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
if len(cdata['args']) > 0:
name = cdata['args'][0]
elif 'name' in cdata['kwargs']:
name = cdata['kwargs']['name']
else:
name = low.get('name', low.get('__id__'))
ret = {
'result': False,
'name': name,
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(
trb)
}
finally:
if low.get('__prereq__'):
sys.modules[self.states[cdata['full']].__module__].__opts__[
'test'] = test
# If format_call got any warnings, let's show them to the user
if 'warnings' in cdata:
ret.setdefault('warnings', []).extend(cdata['warnings'])
if 'provider' in low:
self.load_modules()
if low.get('__prereq__'):
low['__prereq__'] = False
return ret
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
finish_time = datetime.datetime.now()
ret['start_time'] = start_time.time().isoformat()
delta = (finish_time - start_time)
# duration in milliseconds.microseconds
ret['duration'] = (delta.seconds * 1000000 + delta.microseconds)/1000.0
log.info('Completed state [{0}] at time {1}'.format(low['name'], finish_time.time().isoformat()))
return ret
def call_chunks(self, chunks):
'''
Iterate over a list of chunks and call them, checking for requires.
'''
running = {}
for low in chunks:
if '__FAILHARD__' in running:
running.pop('__FAILHARD__')
return running
tag = _gen_tag(low)
if tag not in running:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
return running
def check_failhard(self, low, running):
'''
Check if the low data chunk should send a failhard signal
'''
tag = _gen_tag(low)
if (low.get('failhard', False) or self.opts['failhard']
and tag in running):
return not running[tag]['result']
return False
def check_requisite(self, low, running, chunks, pre=False):
'''
Look into the running data to check the status of all requisite
states
'''
present = False
# If mod_watch is not available make it a require
if 'watch' in low:
if '{0}.mod_watch'.format(low['state']) not in self.states:
if 'require' in low:
low['require'].extend(low.pop('watch'))
else:
low['require'] = low.pop('watch')
else:
present = True
if 'require' in low:
present = True
if 'prerequired' in low:
present = True
if 'prereq' in low:
present = True
if 'onfail' in low:
present = True
if 'onchanges' in low:
present = True
if not present:
return 'met', ()
reqs = {
'require': [],
'watch': [],
'prereq': [],
'onfail': [],
'onchanges': []}
if pre:
reqs['prerequired'] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
for req in low[r_state]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
found = True
reqs[r_state].append(chunk)
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
found = True
reqs[r_state].append(chunk)
if not found:
return 'unmet', ()
fun_stats = set()
for r_state, chunks in six.iteritems(reqs):
if r_state == 'prereq':
run_dict = self.pre
else:
run_dict = running
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
fun_stats.add('unmet')
continue
if r_state == 'onfail':
if run_dict[tag]['result'] is True:
fun_stats.add('onfail')
continue
else:
if run_dict[tag]['result'] is False:
fun_stats.add('fail')
continue
if r_state == 'onchanges':
if not run_dict[tag]['changes']:
fun_stats.add('onchanges')
continue
if r_state == 'watch' and run_dict[tag]['changes']:
fun_stats.add('change')
continue
if r_state == 'prereq' and run_dict[tag]['result'] is None:
fun_stats.add('premet')
if r_state == 'prereq' and not run_dict[tag]['result'] is None:
fun_stats.add('pre')
else:
fun_stats.add('met')
if 'unmet' in fun_stats:
status = 'unmet'
elif 'fail' in fun_stats:
status = 'fail'
elif 'pre' in fun_stats:
if 'premet' in fun_stats:
status = 'met'
else:
status = 'pre'
elif 'onfail' in fun_stats:
status = 'onfail'
elif 'onchanges' in fun_stats:
status = 'onchanges'
elif 'change' in fun_stats:
status = 'change'
else:
status = 'met'
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
'''
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
'''
if not self.opts.get('local') and (self.opts.get('state_events', True) or fire_event) and self.opts.get('master_uri'):
ret = {'ret': chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(chunk_ret['name'])], 'state_result'
)
elif isinstance(fire_event, six.string_types):
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(fire_event)], 'state_result'
)
else:
tag = salt.utils.event.tagify(
[self.jid, 'prog', self.opts['id'], str(chunk_ret['__run_num__'])], 'job'
)
ret['len'] = length
preload = {'jid': self.jid}
self.functions['event.fire_master'](ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
'''
Check if a chunk has any requires, execute the requires and then
the chunk
'''
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get('prerequired'):
self.active.add(tag)
requisites = ['require', 'watch', 'prereq', 'onfail', 'onchanges']
if not low.get('__prereq__'):
requisites.append('prerequired')
status, reqs = self.check_requisite(low, running, chunks, True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == 'unmet':
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
if requisite == 'prereq':
chunk['__prereq__'] = True
reqs.append(chunk)
found = True
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
if requisite == 'prereq':
chunk['__prereq__'] = True
elif requisite == 'prerequired':
chunk['__prerequired__'] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if lost['require'] or lost['watch'] or lost['prereq'] or lost['onfail'] or lost['onchanges'] or lost.get('prerequired'):
comment = 'The following requisites were not found:\n'
for requisite, lreqs in six.iteritems(lost):
if not lreqs:
continue
comment += \
'{0}{1}:\n'.format(' ' * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += \
'{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val)
running[tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get('__prerequired__'):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low['__prereq__'] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error('Recursive requisite found')
running[tag] = {
'changes': {},
'result': False,
'comment': 'Recursive requisite found',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
if low.get('__prereq__'):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]['changes'] and status == 'change':
self.pre[tag]['changes'] = {'watch': 'watch'}
self.pre[tag]['result'] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'met':
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == 'fail':
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]['__run_num__'] = self.__run_num
running[tag]['__sls__'] = low['__sls__']
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in six.itervalues(reqs):
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret['result'] is False:
# use SLS.ID for the key-- so its easier to find
key = '{sls}.{_id}'.format(sls=req_low['__sls__'],
_id=req_low['__id__'])
failed_requisites.add(key)
_cmt = 'One or more requisite failed: {0}'.format(
', '.join(str(i) for i in failed_requisites)
)
running[tag] = {
'changes': {},
'result': False,
'comment': _cmt,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']
}
self.__run_num += 1
elif status == 'change' and not low.get('__prereq__'):
ret = self.call(low, chunks, running)
if not ret['changes'] and not ret.get('skip_watch', False):
low = low.copy()
low['sfun'] = low['fun']
low['fun'] = 'mod_watch'
low['__reqs__'] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == 'pre':
pre_ret = {'changes': {},
'result': True,
'comment': 'No changes detected',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == 'onfail':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because onfail req did not change',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
elif status == 'onchanges':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because onchanges req did not change',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
else:
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
def call_listen(self, chunks, running):
'''
Find all of the listen routines and call the associated mod_watch runs
'''
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk['state'], chunk['name'])] = chunk
crefs[(chunk['state'], chunk['__id__'])] = chunk
if 'listen' in chunk:
listeners.append({(chunk['state'], chunk['name']): chunk['listen']})
if 'listen_in' in chunk:
for l_in in chunk['listen_in']:
for key, val in six.iteritems(l_in):
listeners.append({(key, val): [{chunk['state']: chunk['name']}]})
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in six.iteritems(l_dict):
for listen_to in val:
if not isinstance(listen_to, dict):
continue
for lkey, lval in six.iteritems(listen_to):
if (lkey, lval) not in crefs:
rerror = {_l_tag(lkey, lval):
{
'comment': 'Referenced state {0}: {1} does not exist'.format(lkey, lval),
'name': 'listen_{0}:{1}'.format(lkey, lval),
'result': False,
'changes': {}
}}
errors.update(rerror)
continue
to_tag = _gen_tag(crefs[(lkey, lval)])
if to_tag not in running:
continue
if running[to_tag]['changes']:
if key not in crefs:
rerror = {_l_tag(key[0], key[1]):
{'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]),
'name': 'listen_{0}:{1}'.format(key[0], key[1]),
'result': False,
'changes': {}}}
errors.update(rerror)
continue
chunk = crefs[key]
low = chunk.copy()
low['sfun'] = chunk['fun']
low['fun'] = 'mod_watch'
low['__id__'] = 'listener_{0}'.format(low['__id__'])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]['__run_num__'] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high):
'''
Process a high data call and ensure the defined states.
'''
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors += ext_errors
errors += self.verify_high(high)
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors += req_in_errors
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high)
# Check for any disabled states
disabled = {}
if 'state_runs_disabled' in self.opts['grains']:
_chunks = copy.deepcopy(chunks)
for low in _chunks:
state_ = '{0}.{1}'.format(low['state'], low['fun'])
for pat in self.opts['grains']['state_runs_disabled']:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
'to re-enable, run state.enable {1}.'
).format(
state_,
pat,
)
_tag = _gen_tag(low)
disabled[_tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
chunks.remove(low)
break
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = dict(list(disabled.items()) + list(self.call_chunks(chunks).items()))
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
salt.utils.get_accumulator_dir(self.opts['cachedir']),
self.instance_id
)
try:
os.remove(accum_data_path)
log.debug('Deleted accumulator data file {0}'.format(
accum_data_path)
)
except OSError:
log.debug('File {0} does not exist, no need to cleanup.'.format(
accum_data_path)
)
_cleanup_accumulator_data()
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
'Template {0} does not render to a dictionary'.format(template)
)
return high, errors
invalid_items = ('include', 'exclude', 'extends')
for item in invalid_items:
if item in high:
errors.append(
'The \'{0}\' declaration found on \'{1}\' is invalid when '
'rendering single templates'.format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in high[name]:
comps = high[name].split('.')
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
'ID {0} in template {1} is not a dictionary'.format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if high[name][key] is None:
errors.append(
'ID {0!r} in template {1} contains a short '
'declaration ({2}) with a trailing colon. When not '
'passing any arguments to a state, the colon must be '
'omitted.'.format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID {0!r} in template {1!r} contains multiple '
'state declarations of the same type'
.format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
'''
Enforce the states in a template, pass the template as a string
'''
high = compile_template_str(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, '<template-str>')
if errors:
return errors
return self.call_high(high)
class BaseHighState(object):
'''
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
'''
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = {}
def __gather_avail(self):
'''
Gather the lists of available sls data from the master
'''
avail = {}
for saltenv in self._get_envs():
avail[saltenv] = self.client.list_states(saltenv)
return avail
def __gen_opts(self, opts):
'''
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
'''
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if 'local_state' in opts:
if opts['local_state']:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts['renderer'] = 'yaml_jinja'
opts['failhard'] = False
opts['state_top'] = salt.utils.url.create('top.sls')
opts['nodegroups'] = {}
opts['file_roots'] = {'base': [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts['renderer'] = mopts['renderer']
opts['failhard'] = mopts.get('failhard', False)
if mopts['state_top'].startswith('salt://'):
opts['state_top'] = mopts['state_top']
elif mopts['state_top'].startswith('/'):
opts['state_top'] = salt.utils.url.create(mopts['state_top'][1:])
else:
opts['state_top'] = salt.utils.url.create(mopts['state_top'])
opts['nodegroups'] = mopts.get('nodegroups', {})
opts['state_auto_order'] = mopts.get(
'state_auto_order',
opts['state_auto_order'])
opts['file_roots'] = mopts['file_roots']
opts['state_events'] = mopts.get('state_events')
opts['state_aggregate'] = mopts.get('state_aggregate', opts.get('state_aggregate', False))
opts['jinja_lstrip_blocks'] = mopts.get('jinja_lstrip_blocks', False)
opts['jinja_trim_blocks'] = mopts.get('jinja_trim_blocks', False)
return opts
def _get_envs(self):
'''
Pull the file server environments out of the master options
'''
envs = set(['base'])
if 'file_roots' in self.opts:
envs.update(list(self.opts['file_roots']))
return envs.union(set(self.client.envs()))
def get_tops(self):
'''
Gather the top files
'''
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
if self.opts['environment']:
contents = self.client.cache_file(
self.opts['state_top'],
self.opts['environment']
)
if contents:
found = 1
tops[self.opts['environment']] = [
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=self.opts['environment']
)
]
else:
found = 0
for saltenv in self._get_envs():
contents = self.client.cache_file(
self.opts['state_top'],
saltenv
)
if contents:
found = found + 1
else:
log.debug('No contents loaded for env: {0}'.format(saltenv))
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
if found == 0:
log.error('No contents found in top file')
# Search initial top files for includes
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
'''
Cleanly merge the top files
'''
top = DefaultOrderedDict(OrderedDict)
for ctops in six.itervalues(tops):
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == 'include':
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
matches = []
states = set()
for comp in top[saltenv][tgt]:
if isinstance(comp, dict):
matches.append(comp)
if isinstance(comp, six.string_types):
states.add(comp)
top[saltenv][tgt] = matches
top[saltenv][tgt].extend(list(states))
except TypeError:
raise SaltRenderError('Unable to render top file. No targets found.')
return top
def verify_tops(self, tops):
'''
Verify the contents of the top file data
'''
errors = []
if not isinstance(tops, dict):
errors.append('Top data was not formed as a dict')
# No further checks will work, bail out
return errors
for saltenv, matches in six.iteritems(tops):
if saltenv == 'include':
continue
if not isinstance(saltenv, six.string_types):
errors.append(
'Environment {0} in top file is not formed as a '
'string'.format(saltenv)
)
if saltenv == '':
errors.append('Empty saltenv statement in top file')
if not isinstance(matches, dict):
errors.append(
'The top file matches for saltenv {0} are not '
'formatted as a dict'.format(saltenv)
)
for slsmods in six.itervalues(matches):
if not isinstance(slsmods, list):
errors.append('Malformed topfile (state declarations not '
'formed as a list)')
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in six.itervalues(slsmod):
if not val:
errors.append(
'Improperly formatted top file matcher '
'in saltenv {0}: {1} file'.format(
slsmod,
val
)
)
elif isinstance(slsmod, six.string_types):
# This is a sls module
if not slsmod:
errors.append(
'Environment {0} contains an empty sls '
'index'.format(saltenv)
)
return errors
def get_top(self):
'''
Returns the high data derived from the top file
'''
tops = self.get_tops()
return self.merge_tops(tops)
def top_matches(self, top):
'''
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
'''
matches = {}
# pylint: disable=cell-var-from-loop
for saltenv, body in six.iteritems(top):
if self.opts['environment']:
if saltenv != self.opts['environment']:
continue
for match, data in six.iteritems(body):
def _filter_matches(_match, _data, _opts):
if isinstance(_data, six.string_types):
_data = [_data]
if self.matcher.confirm_top(
_match,
_data,
_opts
):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if 'subfilter' in item:
_tmpdata = item.pop('subfilter')
for match, data in six.iteritems(_tmpdata):
_filter_matches(match, data, _opts)
if isinstance(item, six.string_types):
matches[saltenv].append(item)
_filter_matches(match, data, self.opts['nodegroups'])
ext_matches = self.client.ext_nodes()
for saltenv in ext_matches:
if saltenv in matches:
matches[saltenv] = list(
set(ext_matches[saltenv]).union(matches[saltenv]))
else:
matches[saltenv] = ext_matches[saltenv]
# pylint: enable=cell-var-from-loop
return matches
def load_dynamic(self, matches):
'''
If autoload_dynamic_modules is True then automatically load the
dynamic modules
'''
if not self.opts['autoload_dynamic_modules']:
return
if self.opts.get('local', False):
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
else:
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
if syncd['grains']:
self.opts['grains'] = salt.loader.grains(self.opts)
self.state.opts['pillar'] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False):
'''
Render a state file and retrieve all of the include states
'''
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get('dest', False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
'Specified SLS {0} on local filesystem cannot '
'be found.'.format(sls)
)
if not fn_:
errors.append(
'Specified SLS {0} in saltenv {1} is not '
'available on the salt master or through a configured '
'fileserver'.format(sls, saltenv)
)
state = None
try:
state = compile_template(
fn_, self.state.rend, self.state.opts['renderer'], saltenv,
sls, rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append(
'SLS {0} does not render to a dictionary'.format(sls)
)
else:
include = []
if 'include' in state:
if not isinstance(state['include'], list):
err = ('Include Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
else:
include = state.pop('include')
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = '_xenv'
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = ('Nonexistent saltenv {0!r} found in include '
'of {1!r} within SLS \'{2}:{3}\''
.format(env_key, inc_sls, saltenv, sls))
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith('.'):
p_comps = sls.split('.')
if state_data.get('source', '').endswith('/init.sls'):
inc_sls = sls + inc_sls
else:
inc_sls = '.'.join(p_comps[:-1]) + inc_sls
if env_key != xenv_key:
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(self.avail[env_key], inc_sls):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(
self.avail[saltenv],
inc_sls
) or [inc_sls]
for sls_target in sls_targets:
r_env = resolved_envs[0] if len(resolved_envs) == 1 else saltenv
mod_tgt = '{0}:{1}'.format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target,
r_env,
mods,
matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ''
if not resolved_envs:
msg = ('Unknown include: Specified SLS {0}: {1} is not available on the salt '
'master in saltenv(s): {2} '
).format(env_key,
inc_sls,
', '.join(matches) if env_key == xenv_key else env_key)
elif len(resolved_envs) > 1:
msg = ('Ambiguous include: Specified SLS {0}: {1} is available on the salt master '
'in multiple available saltenvs: {2}'
).format(env_key,
inc_sls,
', '.join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical('Could not render SLS {0}. Syntax error detected.'.format(sls))
else:
state = {}
return state, errors
def _handle_iorder(self, state):
'''
Take a state and apply the iorder system
'''
if self.opts['state_auto_order']:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, six.string_types):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith('_'):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(six.iterkeys(arg)) == 'order':
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append(
{'order': self.iorder}
)
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
'''
Add sls and saltenv components to the state
'''
for name in state:
if not isinstance(state[name], dict):
if name == '__extend__':
continue
if name == '__exclude__':
continue
if isinstance(state[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in state[name]:
comps = state[name].split('.')
state[name] = {'__sls__': sls,
'__env__': saltenv,
comps[0]: [comps[1]]}
continue
errors.append(
'ID {0} in SLS {1} is not a dictionary'.format(name, sls)
)
continue
skeys = set()
for key in state[name]:
if key.startswith('_'):
continue
if not isinstance(state[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID {0!r} in SLS {1!r} contains multiple state '
'declarations of the same type'.format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if '__sls__' not in state[name]:
state[name]['__sls__'] = sls
if '__env__' not in state[name]:
state[name]['__env__'] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
'''
Take the extend dec out of state and apply to the highstate global
dec
'''
if 'extend' in state:
ext = state.pop('extend')
if not isinstance(ext, dict):
errors.append(('Extension value in SLS {0!r} is not a '
'dictionary').format(sls))
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(('Extension name {0!r} in SLS {1!r} is '
'not a dictionary'
.format(name, sls)))
continue
if '__sls__' not in ext[name]:
ext[name]['__sls__'] = sls
if '__env__' not in ext[name]:
ext[name]['__env__'] = saltenv
for key in ext[name]:
if key.startswith('_'):
continue
if not isinstance(ext[name][key], list):
continue
if '.' in key:
comps = key.split('.')
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault('__extend__', []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc)
def render_highstate(self, matches):
'''
Gather the state files and render them into a single unified salt
high data structure.
'''
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in six.iteritems(matches):
for sls_match in states:
try:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
except KeyError:
all_errors.extend(['No matching salt environment for environment {0!r} found'.format(saltenv)])
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = '{0}:{1}'.format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if 'is not available' in error:
# match SLS foobar in environment
this_sls = 'SLS {0} in saltenv'.format(
sls_match)
if this_sls in error:
errors[i] = (
'No matching sls found for {0!r} '
'in env {1!r}'.format(sls_match, saltenv))
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if '__extend__' in highstate:
highext = []
for items in (six.iteritems(ext) for ext in highstate['__extend__']):
for item in items:
if item not in highext:
highext.append(item)
highstate['__extend__'] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if '__extend__' in state:
highstate.setdefault('__extend__',
[]).extend(state.pop('__extend__'))
if '__exclude__' in state:
highstate.setdefault('__exclude__',
[]).extend(state.pop('__exclude__'))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append((
'Detected conflicting IDs, SLS'
' IDs need to be globally unique.\n The'
' conflicting ID is {0!r} and is found in SLS'
' \'{1}:{2}\' and SLS \'{3}:{4}\'').format(
id_,
highstate[id_]['__env__'],
highstate[id_]['__sls__'],
state[id_]['__env__'],
state[id_]['__sls__'])
)
try:
highstate.update(state)
except ValueError:
errors.append(
'Error when rendering state with contents: {0}'.format(state)
)
def _check_pillar(self, force=False):
'''
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
'''
if force:
return True
if '_errors' in self.state.opts['pillar']:
return False
return True
def matches_whitelist(self, matches, whitelist):
'''
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
'''
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(',')
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(self, exclude=None, cache=None, cache_name='highstate',
force=False, whitelist=None):
'''
Run the sequence to execute the salt highstate for this minion
'''
# Check that top file exists
tag_name = 'no_|-states_|-states_|-None'
ret = {tag_name: {
'result': False,
'comment': 'No states found for this minion',
'name': 'No States',
'changes': {},
'__run_num__': 0,
}}
cfn = os.path.join(
self.opts['cachedir'],
'{0}.cache.p'.format(cache_name)
)
if cache:
if os.path.isfile(cfn):
with salt.utils.fopen(cfn, 'rb') as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]['comment'] = err.error
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = ('No Top file or external nodes data matches found')
ret[tag_name]['comment'] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ['Pillar failed to render with the following messages:']
err += self.state.opts['pillar']['_errors']
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
if err:
return err
if not high:
return ret
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet')
with salt.utils.fopen(cfn, 'w+b') as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
msg = 'Unable to write to "state.highstate" cache file {0}'
log.error(msg.format(cfn))
os.umask(cumask)
return self.state.call_high(high)
def compile_highstate(self):
'''
Return just the highstate or the errors
'''
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
'''
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
'''
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
class HighState(BaseHighState):
'''
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
'''
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(self, opts, pillar=None, jid=None):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(self.opts, pillar, jid)
self.matcher = salt.minion.Matcher(self.opts)
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
'''
Create a State object for master side compiling
'''
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(
self.opts,
self.opts['id']
)
# Load the states, but they should not be used in this class apart
# from inspection
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
class MasterHighState(HighState):
'''
Execute highstate compilation from the master
'''
def __init__(self, master_opts, minion_opts, grains, id_,
saltenv=None,
env=None):
if isinstance(env, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts['file_client'] = 'local'
opts['file_roots'] = master_opts['master_roots']
opts['renderer'] = master_opts['renderer']
opts['state_top'] = master_opts['state_top']
opts['id'] = id_
opts['grains'] = grains
HighState.__init__(self, opts)
class RemoteHighState(object):
'''
Manage gathering the data from the master
'''
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.Channel.factory(self.opts['master_uri'])
def compile_master(self):
'''
Return the state data from the master
'''
load = {'grains': self.grains,
'opts': self.opts,
'cmd': '_master_state'}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
| 42.352298 | 132 | 0.429516 |
c9cdaea928d556d5bb4a188c3f7a08ca162b5e63 | 882 | py | Python | data.py | hossainsadman/lstm-stock-predictor | 6d91acac3054d5b870142cae8db0ae4f8e3810d3 | [
"MIT"
] | null | null | null | data.py | hossainsadman/lstm-stock-predictor | 6d91acac3054d5b870142cae8db0ae4f8e3810d3 | [
"MIT"
] | null | null | null | data.py | hossainsadman/lstm-stock-predictor | 6d91acac3054d5b870142cae8db0ae4f8e3810d3 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
import urllib.request
import json
import pandas as pd
import datetime
load_dotenv()
ALPHA_API_KEY = os.getenv('ALPHA_API_KEY')
TICKER = 'QCOM'
url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&outputsize=full&apikey=%s" %(TICKER, ALPHA_API_KEY)
data_file = '%s.csv' %TICKER
if not os.path.exists(data_file):
data = json.loads(urllib.request.urlopen(url).read().decode())
data = data['Time Series (Daily)']
df = pd.DataFrame(columns=['Date','Low','High','Close','Open'])
for i, j in data.items():
date = datetime.datetime.strptime(i, '%Y-%m-%d')
data_row = [date.date(), float(j['3. low']), float(j['2. high']), float(j['4. close']), float(j['1. open'])]
df.loc[-1,:] = data_row
df.index += 1
df.to_csv(data_file, index=False)
data = pd.read_csv(data_file)
| 31.5 | 129 | 0.663265 |
6a9f3d628d7b9f1d9b737302c3dbe2ccc6bb3760 | 476 | py | Python | tests/models/test_projects.py | onaio/tasking | 5faff50a2f3575f0df91a6b20afe37d43a592381 | [
"Apache-2.0"
] | 6 | 2018-05-07T14:40:26.000Z | 2020-02-26T11:41:58.000Z | tests/models/test_projects.py | onaio/tasking | 5faff50a2f3575f0df91a6b20afe37d43a592381 | [
"Apache-2.0"
] | 95 | 2018-05-08T08:34:23.000Z | 2020-01-24T08:36:13.000Z | tests/models/test_projects.py | onaio/tasking | 5faff50a2f3575f0df91a6b20afe37d43a592381 | [
"Apache-2.0"
] | 2 | 2018-06-15T02:17:53.000Z | 2018-09-28T06:04:37.000Z | """
Test for Project model
"""
from django.test import TestCase
from model_mommy import mommy
class TestProject(TestCase):
"""
Test class for TaskProject models
"""
def test_project_model_str(self):
"""
Test __str__ method of Project model
"""
livestock_task_list = mommy.make("tasking.Project", name="Livestock tasks")
expected = "Livestock tasks"
self.assertEqual(expected, livestock_task_list.__str__())
| 21.636364 | 83 | 0.665966 |
1145f97734923b9ab12e913cb72982c2e5a536fb | 1,612 | py | Python | API/src/main/resources/Lib/robot/reporting/stringcache.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | API/src/main/resources/Lib/robot/reporting/stringcache.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | API/src/main/resources/Lib/robot/reporting/stringcache.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | # Copyright (c) 2010-2020, sikuli.org, sikulix.com - MIT license
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from robot.utils import compress_text
class StringIndex(int):
pass
class StringCache(object):
_compress_threshold = 80
_use_compressed_threshold = 1.1
_zero_index = StringIndex(0)
def __init__(self):
self._cache = OrderedDict({'*': self._zero_index})
def add(self, text):
if not text:
return self._zero_index
text = self._encode(text)
if text not in self._cache:
self._cache[text] = StringIndex(len(self._cache))
return self._cache[text]
def _encode(self, text):
raw = self._raw(text)
if raw in self._cache or len(raw) < self._compress_threshold:
return raw
compressed = compress_text(text)
if len(compressed) * self._use_compressed_threshold < len(raw):
return compressed
return raw
def _raw(self, text):
return '*'+text
def dump(self):
return tuple(self._cache)
| 29.851852 | 75 | 0.673697 |
5ac3be6178eb2fed901a3dc5818e30b8e4364dd7 | 676 | py | Python | sigtrac/devices/models.py | nyaruka/sigtrac | edee5851047f6159d61c541e41848ff2b4bb58b1 | [
"MIT"
] | null | null | null | sigtrac/devices/models.py | nyaruka/sigtrac | edee5851047f6159d61c541e41848ff2b4bb58b1 | [
"MIT"
] | null | null | null | sigtrac/devices/models.py | nyaruka/sigtrac | edee5851047f6159d61c541e41848ff2b4bb58b1 | [
"MIT"
] | null | null | null | from django.db import models
from smartmin.models import SmartModel
DEVICE_CHOICES = (('WEB', "Web Browser"),
('AND', "Android Phone"),
('IOS', "iPhone"))
class Device(models.Model):
device_type = models.CharField(max_length=16, choices=DEVICE_CHOICES,
help_text="What kind of device this is")
uuid = models.CharField(max_length=40,
help_text="The unique id for this device")
created_on = models.DateTimeField(auto_now_add=True,
help_text="When this report was created")
def __unicode__(self):
return self.uuid | 35.578947 | 79 | 0.58432 |
26a19ed7a6df66264afab1f5f526bb740c36e6f6 | 2,285 | py | Python | GoogleCodeJam/SpeakingInTongues_2012_Qualification/Decoder.py | otkaverappa/AdventureOfCode | a2c3f08f5deadca70a998b43b341eb31def7fa8f | [
"Apache-2.0"
] | null | null | null | GoogleCodeJam/SpeakingInTongues_2012_Qualification/Decoder.py | otkaverappa/AdventureOfCode | a2c3f08f5deadca70a998b43b341eb31def7fa8f | [
"Apache-2.0"
] | null | null | null | GoogleCodeJam/SpeakingInTongues_2012_Qualification/Decoder.py | otkaverappa/AdventureOfCode | a2c3f08f5deadca70a998b43b341eb31def7fa8f | [
"Apache-2.0"
] | null | null | null | import unittest
import string
class Decoder:
def __init__( self, ioSampleList ):
self.decoderMap = dict()
for sourceList, targetList in ioSampleList:
assert len( sourceList ) == len( targetList )
for i in range( len( sourceList ) ):
S, T = sourceList[ i ], targetList[ i ]
if S.isspace():
assert S == T
continue
if S in self.decoderMap:
assert self.decoderMap[ S ] == T
continue
self.decoderMap[ S ] = T
expectedCount = len( string.ascii_lowercase )
assert len( self.decoderMap ) in (expectedCount, expectedCount - 1)
if len( self.decoderMap ) == expectedCount - 1:
S = set.difference( set( string.ascii_lowercase ), set( self.decoderMap.keys() ) ).pop()
T = set.difference( set( string.ascii_lowercase ), set( self.decoderMap.values() ) ).pop()
self.decoderMap[ S ] = T
def decode( self, inputString ):
decodedCharList = list( inputString )
for i in range( len( inputString ) ):
if inputString[ i ].isspace():
continue
decodedCharList[ i ] = self.decoderMap[ inputString[ i ] ]
return ''.join( decodedCharList )
class DecoderTest( unittest.TestCase ):
def test_decode( self ):
inputStringList = list()
with open( 'tests/small.in' ) as inputFile:
T = int( inputFile.readline().strip() )
for _ in range( T ):
inputStringList.append( inputFile.readline().strip() )
solutionList = list()
with open( 'tests/small.ans' ) as solutionFile:
for line in solutionFile.readlines():
solutionList.append( line.strip() )
assert len( solutionList ) == T
ioSampleList = list()
ioSampleList.append( ('y qee', 'a zoo') )
ioSampleList.append( ('ejp mysljylc kd kxveddknmc re jsicpdrysi', 'our language is impossible to understand') )
ioSampleList.append( ('rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd', 'there are twenty six factorial possibilities') )
ioSampleList.append( ('de kr kd eoya kw aej tysr re ujdr lkgc jv', 'so it is okay if you want to just give up') )
decoder = Decoder( ioSampleList )
for i in range( T ):
decodedText = decoder.decode( inputStringList[ i ] )
print( 'Testcase {} Text: {}'.format( i + 1, decodedText ) )
self.assertEqual( 'Case #{}: {}'.format( i + 1, decodedText ), solutionList[ i ] )
if __name__ == '__main__':
unittest.main() | 35.703125 | 121 | 0.674836 |
53d620b41031ca6c845551d0f00c2902702d669a | 23,376 | py | Python | sklearn/tests/test_calibration.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 3 | 2019-11-18T13:47:42.000Z | 2021-08-22T23:37:47.000Z | sklearn/tests/test_calibration.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 12 | 2021-03-06T23:42:46.000Z | 2021-04-04T00:10:42.000Z | sklearn/tests/test_calibration.py | emarkou/scikit-learn | d73822f84f2832dcc25f0ff58769f60871a78025 | [
"BSD-3-Clause"
] | 2 | 2017-06-27T12:40:35.000Z | 2021-08-22T23:37:35.000Z | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
with pytest.raises(ValueError):
cal_clf.fit(X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_zero_probability():
# Test an edge case where _CalibratedClassifier avoids numerical errors
# in the multiclass normalization step if all the calibrators output
# are zero all at once for a given sample and instead fallback to uniform
# probabilities.
class ZeroCalibrator():
# This function is called from _CalibratedClassifier.predict_proba.
def predict(self, X):
return np.zeros(X.shape[0])
X, y = make_blobs(n_samples=50, n_features=10, random_state=7,
centers=10, cluster_std=15.0)
clf = DummyClassifier().fit(X, y)
calibrator = ZeroCalibrator()
cal_clf = _CalibratedClassifier(
base_estimator=clf, calibrators=[calibrator], classes=clf.classes_)
probas = cal_clf.predict_proba(X)
# Check that all probabilities are uniformly 1. / clf.n_classes_
assert_allclose(probas, 1. / clf.n_classes_)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
with pytest.raises(ValueError):
_SigmoidCalibration().fit(np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
with pytest.raises(ValueError):
calibration_curve([1.1], [-0.1], normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
with pytest.raises(ValueError):
calibration_curve(y_true2, y_pred2, strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def dict_data():
dict_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return dict_data, text_labels
@pytest.fixture
def dict_data_pipeline(dict_data):
X, y = dict_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_dict_pipeline(dict_data, dict_data_pipeline):
"""Test that calibration works in prefit pipeline with transformer
`X` is not array-like, sparse matrix or dataframe at the start.
See https://github.com/scikit-learn/scikit-learn/issues/8710
Also test it can predict without running into validation errors.
See https://github.com/scikit-learn/scikit-learn/issues/19637
"""
X, y = dict_data
clf = dict_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
# Neither the pipeline nor the calibration meta-estimator
# expose the n_features_in_ check on this kind of data.
assert not hasattr(clf, 'n_features_in_')
assert not hasattr(calib_clf, 'n_features_in_')
# Ensure that no error is thrown with predict and predict_proba
calib_clf.predict(X)
calib_clf.predict_proba(X)
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
def test_calibration_inconsistent_prefit_n_features_in():
# Check that `n_features_in_` from prefit base estimator
# is consistent with training set
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
clf = LinearSVC(C=1).fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
msg = "X has 3 features, but LinearSVC is expecting 5 features as input."
with pytest.raises(ValueError, match=msg):
calib_clf.fit(X[:, :3], y)
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
| 38.321311 | 79 | 0.678046 |
798947b492027e7e927b2f35dad5f62b5a8b5411 | 3,586 | py | Python | google/cloud/secretmanager/v1/secretmanager-v1-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/secretmanager/v1/secretmanager-v1-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/secretmanager/v1/secretmanager-v1-py/noxfile.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/secretmanager_v1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 26.962406 | 96 | 0.62744 |
17bd310ab2fb9541b92d2e10dc784049f6c950b4 | 4,735 | py | Python | jax_dft/jax_dft/losses_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | jax_dft/jax_dft/losses_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | jax_dft/jax_dft/losses_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for jax_dft.losses."""
from absl.testing import absltest
from jax.config import config
import jax.numpy as jnp
import numpy as np
from jax_dft import losses
# Set the default dtype as float64
config.update('jax_enable_x64', True)
class LossesTest(absltest.TestCase):
def test_trajectory_mse_wrong_predict_ndim(self):
with self.assertRaisesRegex(
ValueError,
'The size of the shape of predict should be '
'greater or equal to 2, got 1'):
losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([0.6, 0.6, 0.6, 0.6]),
discount=1.)
def test_trajectory_mse_wrong_predict_target_ndim_difference(self):
with self.assertRaisesRegex(
ValueError,
'The size of the shape of predict should be greater than '
'the size of the shape of target by 1, '
r'but got predict \(2\) and target \(2\)'):
losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
discount=1.)
def test_density_mse(self):
self.assertAlmostEqual(
float(losses.mean_square_error(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([[0.4, 0.5, 0.2, 0.3], [0.6, 0.6, 0.6, 0.6]]))),
# ((
# (0.4 - 0.2) ** 2 + (0.5 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# ) / 4 + 0) / 2 = 0.0175
0.0175)
def test_energy_mse(self):
self.assertAlmostEqual(
float(losses.mean_square_error(
target=jnp.array([[0.2, 0.6]]),
predict=jnp.array([[0.4, 0.7]]))),
# ((0.4 - 0.2) ** 2 + (0.7 - 0.6) ** 2) / 2 = 0.025
0.025)
def test_get_discount_coefficients(self):
np.testing.assert_allclose(
losses._get_discount_coefficients(num_steps=4, discount=0.8),
[0.512, 0.64, 0.8, 1.])
def test_trajectory_mse_on_density(self):
self.assertAlmostEqual(
float(losses.trajectory_mse(
target=jnp.array([[0.2, 0.2, 0.2, 0.2], [0.6, 0.6, 0.6, 0.6]]),
predict=jnp.array([
[[0.4, 0.5, 0.2, 0.3],
[0.3, 0.3, 0.2, 0.2],
[0.3, 0.3, 0.3, 0.2]],
[[0.6, 0.6, 0.6, 0.6],
[0.6, 0.6, 0.6, 0.5],
[0.6, 0.6, 0.6, 0.6]]]),
discount=0.6)),
# First sample in the batch:
# (
# (0.4 - 0.2) ** 2 + (0.5 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# ) / 4 * 0.6 * 0.6
# + (
# (0.3 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# + (0.2 - 0.2) ** 2 + (0.2 - 0.2) ** 2
# ) / 4 * 0.6
# + (
# (0.3 - 0.2) ** 2 + (0.3 - 0.2) ** 2
# + (0.3 - 0.2) ** 2 + (0.2 - 0.2) ** 2
# ) / 4 = 0.0231
# Second sample in the batch:
# (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# ) / 4 * 0.6 * 0.6
# + (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.5 - 0.6) ** 2
# ) / 4 * 0.6
# + (
# (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# + (0.6 - 0.6) ** 2 + (0.6 - 0.6) ** 2
# ) / 4 = 0.0015
# Loss:
# (0.0231 + 0.0015) / 2 = 0.0123
0.0123)
def test_trajectory_mse_on_energy(self):
self.assertAlmostEqual(
float(losses.trajectory_mse(
target=jnp.array([0.2, 0.6]),
predict=jnp.array([[0.4, 0.3, 0.2], [0.7, 0.7, 0.7]]),
discount=0.6)),
# First sample in the batch:
# ((0.4 - 0.2) ** 2 * 0.6 * 0.6
# + (0.3 - 0.2) ** 2 * 0.6 + (0.2 - 0.2) ** 2) = 0.0204
# Second sample in the batch:
# ((0.7 - 0.6) ** 2 * 0.6 * 0.6
# + (0.7 - 0.6) ** 2 * 0.6 + (0.7 - 0.6) ** 2) = 0.0196
# Loss:
# (0.0204 + 0.0196) / 2 = 0.02
0.02)
if __name__ == '__main__':
absltest.main()
| 34.064748 | 78 | 0.485744 |
47f63d016686121879c41568357ea79c17f4566a | 5,509 | py | Python | example/example/settings.py | stenius/django-hunger | 712684dd6ff8b776db6c3245ec0a134e9cd7a66b | [
"BSD-3-Clause"
] | 37 | 2015-01-05T16:07:20.000Z | 2021-08-09T18:01:12.000Z | example/example/settings.py | stenius/django-hunger | 712684dd6ff8b776db6c3245ec0a134e9cd7a66b | [
"BSD-3-Clause"
] | 16 | 2015-01-02T14:58:29.000Z | 2021-06-10T17:30:49.000Z | example/example/settings.py | stenius/django-hunger | 712684dd6ff8b776db6c3245ec0a134e9cd7a66b | [
"BSD-3-Clause"
] | 12 | 2015-01-02T15:04:16.000Z | 2020-06-07T13:02:36.000Z | # Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'example.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'y#rmfqpl68yg!=!ue7^(y^^sdbfrph-p*$oc0398$m@ayff@c6'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'hunger.middleware.BetaMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# One-week activation window; you may, of course, use a different value.
ACCOUNT_ACTIVATION_DAYS = 7
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
HUNGER_ALWAYS_ALLOW_VIEWS = (
'registration_activation_complete',
'registration_activate',
'registration_complete',
'registration_disallowed',
'registration_register',
'home',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'registration',
'hunger',
'example',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 32.791667 | 108 | 0.691051 |
6a616fa066cd931ee6fa8fd30af51848c2e98180 | 4,191 | py | Python | alien/collection.py | palexandremello/GPyM | 10daa25e4d68d527c799f6bf99225b4992e43319 | [
"MIT"
] | 1 | 2021-03-08T15:47:52.000Z | 2021-03-08T15:47:52.000Z | build/lib/GPyM/alien/collection.py | palexandremello/GPyM | 10daa25e4d68d527c799f6bf99225b4992e43319 | [
"MIT"
] | 1 | 2019-01-08T17:56:27.000Z | 2019-01-08T17:56:27.000Z | build/lib/GPyM/alien/collection.py | palexandremello/GPyM | 10daa25e4d68d527c799f6bf99225b4992e43319 | [
"MIT"
] | null | null | null | #! /usr/bin/python
import os,sys
from io import StringIO
from numpy import load, save, array
from numpy.lib.format import open_memmap
#def cached(mode='normal',cacheName=None,cacheDir='./cached',compress='lz4'):
def cached(name=None, cdir='./cached', compress=False, mode='cached', verbose=True, purge_empty_file=True):
'''
mode : in ['cached', # read from cached file if exists
'skip' , # skip caching process
'update' # force to update cached file
] or False
compress : in ['lz4', False]
'''
def wrapper(func):
def inner(*args, **kwargs):
mode = wrapper.mode
name = wrapper.name
cdir = wrapper.cdir
compress = wrapper.compress
verbose = wrapper.verbose
if mode in [False, 'skip'] : return func( *args, **kwargs )
if name == None : name = func.__name__
if not os.path.exists(cdir) : os.makedirs(cdir)
cachePath = os.path.join(cdir, name)
if compress : import lz4
if os.path.exists( cachePath ) and mode != 'update':
if compress == 'lz4':
cached = StringIO( lz4.loads( open(cachePath,'r').read() ) )
else:
cached = cachePath
#cached = open(cachePath,'r')
if verbose: print ('\t!! Cached from %s'%cachePath)
aOut = load( cached )
if aOut.shape != () or purge_empty_file == False:
return aOut
else:
os.remove( cachePath )
raise ValueError ('empty cache file (erased): %s'%(cachePath))
if os.path.exists( cachePath ) == False or mode == 'update':
aOut = func( *args, **kwargs )
if compress == 'lz4':
cached = StringIO()
save( cached, aOut )
open(cachePath,'w').write( lz4.dumps( cached.getvalue() ) )
else:
fCache = open(cachePath,'wb')
save( fCache, aOut )
fCache.close()
if verbose: print ('\t!! Cached to %s'%cachePath)
return aOut
raise KeyError('failed exception handling for %s and %s'%( cachePath, mode ))
return inner
wrapper.name = name
wrapper.mode = mode
wrapper.cdir = cdir
wrapper.compress = compress
wrapper.verbose = verbose
return wrapper
# push_cache, pop_cache
def push_cache(aOut,varName,itrmCode,timeCode,cacheDir=None,ow=False):
if cacheDir == None:
baseDir = './cached/%s.%s'%(varName,itrmCode)
else:
baseDir = cacheDir
if not os.path.exists(baseDir):
os.makedirs(baseDir)
outPath = os.path.join(baseDir,'%s.%s.%s.npy'%(varName,itrmCode,timeCode))
if os.path.exists(outPath) and ow == False: # file size and array size compare [ToDo]
return False
else:
save(outPath, aOut.astype('float32')) # better dtype treatment [ToDo]
return True
def pop_cache(varName,itrmCode,timeCode,func,args,cacheDir=None,cache=True,mmap=None,returnTF=False):
if cacheDir == None:
baseDir = './%s.%s'%(varName,itrmCode)
else:
baseDir = cacheDir
srcPath = os.path.join(baseDir,'%s.%s.%s.npy'%(varName,itrmCode,timeCode))
if os.path.exists(srcPath) and cache != 'ow':
aSrc = load(srcPath, mmap_mode=mmap)
else:
# replace None with srcPath to cache
if func == open_memmap:
if not os.path.exists(baseDir):
os.makedirs(baseDir)
aSrc= func(srcPath, *args)
else:
aSrc = func(*args)
ow = True if cache == 'ow' else False
if cache == True:
push_cache(aSrc,varName,itrmCode,timeCode,cacheDir=cacheDir,ow=ow)
if returnTF : return aSrc,False
else : return aSrc
| 29.723404 | 107 | 0.524457 |
0289bad5728f65310e0e375e28fdc85258bae863 | 1,309 | py | Python | SayuBot/helper/mongo_connect.py | TaprisSugarbell/SayUbot | 8c8beea35af3229d24cdbdd7b03063e9c52a4346 | [
"MIT"
] | null | null | null | SayuBot/helper/mongo_connect.py | TaprisSugarbell/SayUbot | 8c8beea35af3229d24cdbdd7b03063e9c52a4346 | [
"MIT"
] | null | null | null | SayuBot/helper/mongo_connect.py | TaprisSugarbell/SayUbot | 8c8beea35af3229d24cdbdd7b03063e9c52a4346 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
from .mongo_db import *
# Variables
load_dotenv()
URI = os.getenv("URI")
async def confirm(user_db, data=None):
if data is None:
data = {}
return user_db.find(data)
async def add_(user_db, data=None):
if data is None:
data = {}
return user_db.insert_one(data)
async def update_(user_db, old_data=None, new_data=None):
if old_data is None:
old_data = {}
if new_data is None:
new_data = {}
return user_db.update_one(old_data, new_data)
async def remove_(user_db, data=None):
if data is None:
data = {}
return user_db.delete_one(data)
def confirm_ofdb(user_db, data=None):
if data is None:
data = {}
return user_db.find(data)
def add_ofdb(user_db, data=None):
if data is None:
data = {}
return user_db.insert_one(data)
def update_ofdb(user_db, old_data=None, new_data=None):
if old_data is None:
old_data = {}
if new_data is None:
new_data = {}
return user_db.update_one(old_data, new_data)
def remove_ofdb(user_db, data=None):
if data is None:
data = {}
return user_db.delete_one(data)
def remove_many(user_db, data=None):
if data is None:
data = {}
return user_db.delete_many(data)
| 19.833333 | 57 | 0.644003 |
d95609461e5c7bec13a9db646f0fec586a4e130f | 1,646 | py | Python | src/data_utils/utils.py | NoOneUST/COMP5212 | 171b564f08841e426545f58e3b52870c0e090586 | [
"MIT"
] | 3 | 2020-04-05T06:50:46.000Z | 2020-04-05T08:20:33.000Z | src/data_utils/utils.py | NoOneUST/COMP5212Project | 171b564f08841e426545f58e3b52870c0e090586 | [
"MIT"
] | 2 | 2021-05-21T16:24:54.000Z | 2022-02-10T01:21:54.000Z | src/data_utils/utils.py | NoOneUST/COMP5212Project | 171b564f08841e426545f58e3b52870c0e090586 | [
"MIT"
] | 1 | 2020-06-15T16:22:20.000Z | 2020-06-15T16:22:20.000Z | # This file is for all utility functions
import os
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def save(toBeSaved, filename, mode='wb'):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
file = open(filename, mode)
pickle.dump(toBeSaved, file, protocol=4) # protocol 4 allows large size object, it's the default since python 3.8
file.close()
def load(filename, mode='rb'):
file = open(filename, mode)
loaded = pickle.load(file)
file.close()
return loaded
def pad_sents(sents, pad_token=0, max_len=512):
sents_padded = []
lens = get_lens(sents)
max_len = min(max(lens), max_len)
sents_padded = []
new_len = []
for i, l in enumerate(lens):
if l > max_len:
l = max_len
new_len.append(l)
sents_padded.append(sents[i][:l] + [pad_token] * (max_len - l))
return sents_padded, new_len
def sort_sents(sents, reverse=True):
sents.sort(key=(lambda s: len(s)), reverse=reverse)
return sents
def get_mask(sents, unmask_idx=1, mask_idx=0, max_len=512):
lens = get_lens(sents)
max_len = min(max(lens), max_len)
mask = []
for l in lens:
if l > max_len:
l = max_len
mask.append([unmask_idx] * l + [mask_idx] * (max_len - l))
return mask
def get_lens(sents):
return [len(sent) for sent in sents]
def get_max_len(sents):
max_len = max([len(sent) for sent in sents])
return max_len
def truncate_sents(self, sents, length):
sents = [sent[:length] for sent in sents]
return sents | 26.983607 | 117 | 0.648846 |
ad1c2678690b6d50f0643b1acbca097ffe30ae5d | 1,581 | py | Python | estimated_demo/plot_spec.py | IPCV/VocaLiST | 57d8df6252e1badeb00874591aa2cb9111557468 | [
"MIT"
] | null | null | null | estimated_demo/plot_spec.py | IPCV/VocaLiST | 57d8df6252e1badeb00874591aa2cb9111557468 | [
"MIT"
] | null | null | null | estimated_demo/plot_spec.py | IPCV/VocaLiST | 57d8df6252e1badeb00874591aa2cb9111557468 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import librosa
import librosa.display
import os
import pydub
from pathlib import Path
NFFT = 1022
HOP_LENGTH = 256
TARGET_SAMPLING_RATE = 16384
wav_files = Path('.').rglob('*.wav')
for file in wav_files:
file_path = file.as_posix()
"""
if ('mod' in file_path) or (('6Ws1WKA4z2k_0_35_to_0_48' in file_path) \
or ('cttFanV0o7c_0_07_to_2_44' in file_path) \
or ('pRh9rKd2j64_0_15_to_0_55' in file_path) \
or ('sEnTMgzw8ow_1_5_to_2_07' in file_path) \
or ('sEnTMgzw8ow_1_29_to_1_47' in file_path) \
or ('sEnTMgzw8ow_2_38_to_2_53' in file_path) \
or ('tcol' in file_path) \
or ('vyu3HU3XWi4_2_04_to_2_14' in file_path)):
"""
if ('mod' in file_path):
file_name = os.path.basename(file_path)
folder_path = os.path.dirname(file_path)
output_path = os.path.join(folder_path, file_name[:-4] + '.png')
y, sr = librosa.load(file_path, sr=16384)
sound = pydub.AudioSegment.from_wav(file_path)
sound.export(file_path[:-4]+'.mp3', format="mp3")
stft = librosa.stft(y, n_fft=NFFT, hop_length=HOP_LENGTH)
D = librosa.amplitude_to_db(np.absolute(stft),
ref=np.max)
librosa.display.specshow(D, sr=sr, hop_length=HOP_LENGTH, x_axis='time', cmap='Reds', y_axis='linear')
plt.ylabel(None)
plt.xlabel(None)
plt.savefig(output_path, bbox_inches='tight')
#plt.show()
| 36.767442 | 110 | 0.635674 |
cad4dc2f6dadc96da099c0329e57d4c624316ba0 | 10,255 | py | Python | src/twisted/plugins/cowrie_plugin.py | GreyNoise-Intelligence/cowrie | d2a9b30f5fd23428baf32e2de1d24e944cf8cde7 | [
"BSD-3-Clause"
] | null | null | null | src/twisted/plugins/cowrie_plugin.py | GreyNoise-Intelligence/cowrie | d2a9b30f5fd23428baf32e2de1d24e944cf8cde7 | [
"BSD-3-Clause"
] | null | null | null | src/twisted/plugins/cowrie_plugin.py | GreyNoise-Intelligence/cowrie | d2a9b30f5fd23428baf32e2de1d24e944cf8cde7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015 Michel Oosterhof <michel@oosterhof.net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The names of the author(s) may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import sys
from backend_pool.pool_server import PoolServerFactory
from twisted._version import __version__ as __twisted_version__
from twisted.application import service
from twisted.application.service import IServiceMaker
from twisted.cred import portal
from twisted.internet import reactor
from twisted.logger import ILogObserver, globalLogPublisher
from twisted.plugin import IPlugin
from twisted.python import log, usage
from zope.interface import implementer, provider
import cowrie.core.checkers
import cowrie.core.realm
import cowrie.ssh.factory
import cowrie.telnet.factory
from cowrie import core
from cowrie._version import __version__ as __cowrie_version__
from cowrie.core.config import CowrieConfig
from cowrie.core.utils import create_endpoint_services, get_endpoints_from_section
from cowrie.pool_interface.handler import PoolHandler
if __twisted_version__.major < 17:
raise ImportError(
"Your version of Twisted is too old. Please ensure your virtual environment is set up correctly."
)
class Options(usage.Options):
"""
This defines commandline options and flags
"""
# The '-c' parameters is currently ignored
optParameters = []
optFlags = [["help", "h", "Display this help and exit."]]
@provider(ILogObserver)
def importFailureObserver(event):
if "failure" in event and event["failure"].type is ImportError:
log.err(
"ERROR: %s. Please run `pip install -U -r requirements.txt` "
"from Cowrie's install directory and virtualenv to install "
"the new dependency" % event["failure"].value.message
)
globalLogPublisher.addObserver(importFailureObserver)
@implementer(IServiceMaker, IPlugin)
class CowrieServiceMaker:
tapname = "cowrie"
description = "She sells sea shells by the sea shore."
options = Options
output_plugins = None
def __init__(self):
self.topService = None
self.pool_handler = None
# ssh is enabled by default
self.enableSSH = CowrieConfig().getboolean("ssh", "enabled", fallback=True)
# telnet is disabled by default
self.enableTelnet = CowrieConfig().getboolean(
"telnet", "enabled", fallback=False
)
# pool is disabled by default, but need to check this setting in case user only wants to run the pool
self.pool_only = CowrieConfig().getboolean(
"backend_pool", "pool_only", fallback=False
)
def makeService(self, options):
"""
Construct a TCPServer from a factory defined in Cowrie.
"""
if options["help"] is True:
print(
"""Usage: twistd [options] cowrie [-h]
Options:
-h, --help print this help message.
Makes a Cowrie SSH/Telnet honeypot.
"""
)
sys.exit(1)
if os.name == "posix" and os.getuid() == 0:
print("ERROR: You must not run cowrie as root!")
sys.exit(1)
tz = CowrieConfig().get("honeypot", "timezone", fallback="UTC")
# `system` means use the system time zone
if tz != "system":
os.environ["TZ"] = tz
log.msg("Python Version {}".format(str(sys.version).replace("\n", "")))
log.msg(
"Twisted Version {}.{}.{}".format(
__twisted_version__.major,
__twisted_version__.minor,
__twisted_version__.micro,
)
)
log.msg(
"Cowrie Version {}.{}.{}".format(
__cowrie_version__.major,
__cowrie_version__.minor,
__cowrie_version__.micro,
)
)
# check configurations
if not self.enableTelnet and not self.enableSSH and not self.pool_only:
print(
"ERROR: You must at least enable SSH or Telnet, or run the backend pool"
)
sys.exit(1)
# Load output modules
self.output_plugins = []
for x in CowrieConfig().sections():
if not x.startswith("output_"):
continue
if CowrieConfig().getboolean(x, "enabled") is False:
continue
engine = x.split("_")[1]
try:
output = __import__(
f"cowrie.output.{engine}", globals(), locals(), ["output"]
).Output()
log.addObserver(output.emit)
self.output_plugins.append(output)
log.msg(f"Loaded output engine: {engine}")
except ImportError as e:
log.err(
f"Failed to load output engine: {engine} due to ImportError: {e}"
)
log.msg(
f"Please install the dependencies for {engine} listed in requirements-output.txt"
)
except Exception:
log.err()
log.msg(f"Failed to load output engine: {engine}")
self.topService = service.MultiService()
application = service.Application("cowrie")
self.topService.setServiceParent(application)
# initialise VM pool handling - only if proxy AND pool set to enabled, and pool is to be deployed here
# or also enabled if pool_only is true
backend_type = CowrieConfig().get("honeypot", "backend", fallback="shell")
proxy_backend = CowrieConfig().get("proxy", "backend", fallback="simple")
if (backend_type == "proxy" and proxy_backend == "pool") or self.pool_only:
# in this case we need to set some kind of pool connection
local_pool = (
CowrieConfig().get("proxy", "pool", fallback="local") == "local"
)
pool_host = CowrieConfig().get("proxy", "pool_host", fallback="127.0.0.1")
pool_port = CowrieConfig().getint("proxy", "pool_port", fallback=6415)
if local_pool or self.pool_only:
# start a pool locally
f = PoolServerFactory()
f.tac = self
listen_endpoints = get_endpoints_from_section(
CowrieConfig(), "backend_pool", 6415
)
create_endpoint_services(reactor, self.topService, listen_endpoints, f)
pool_host = "127.0.0.1" # force use of local interface
# either way (local or remote) we set up a client to the pool
# unless this instance has no SSH and Telnet (pool only)
if (self.enableTelnet or self.enableSSH) and not self.pool_only:
self.pool_handler = PoolHandler(pool_host, pool_port, self)
else:
# we initialise the services directly
self.pool_ready()
return self.topService
def pool_ready(self):
backend = CowrieConfig().get("honeypot", "backend", fallback="shell")
# this method is never called if self.pool_only is False,
# since we do not start the pool handler that would call it
if self.enableSSH:
factory = cowrie.ssh.factory.CowrieSSHFactory(backend, self.pool_handler)
factory.tac = self
factory.portal = portal.Portal(core.realm.HoneyPotRealm())
factory.portal.registerChecker(core.checkers.HoneypotPublicKeyChecker())
factory.portal.registerChecker(core.checkers.HoneypotPasswordChecker())
if CowrieConfig().getboolean("ssh", "auth_none_enabled", fallback=False):
factory.portal.registerChecker(core.checkers.HoneypotNoneChecker())
if CowrieConfig().has_section("ssh"):
listen_endpoints = get_endpoints_from_section(
CowrieConfig(), "ssh", 2222
)
else:
listen_endpoints = get_endpoints_from_section(
CowrieConfig(), "honeypot", 2222
)
create_endpoint_services(
reactor, self.topService, listen_endpoints, factory
)
if self.enableTelnet:
f = cowrie.telnet.factory.HoneyPotTelnetFactory(backend, self.pool_handler)
f.tac = self
f.portal = portal.Portal(core.realm.HoneyPotRealm())
f.portal.registerChecker(core.checkers.HoneypotPasswordChecker())
listen_endpoints = get_endpoints_from_section(
CowrieConfig(), "telnet", 2223
)
create_endpoint_services(reactor, self.topService, listen_endpoints, f)
# Now construct an object which *provides* the relevant interfaces
# The name of this variable is irrelevant, as long as there is *some*
# name bound to a provider of IPlugin and IServiceMaker.
serviceMaker = CowrieServiceMaker()
| 37.981481 | 110 | 0.637933 |
9ece9ebf1ab707d374708beae2a7622c7536124a | 571 | py | Python | example.py | lijielife/carp | 376e1a03da6594a567ddf15dde76008a4b126647 | [
"MIT"
] | 1 | 2021-03-02T15:48:57.000Z | 2021-03-02T15:48:57.000Z | example.py | lijielife/carp | 376e1a03da6594a567ddf15dde76008a4b126647 | [
"MIT"
] | null | null | null | example.py | lijielife/carp | 376e1a03da6594a567ddf15dde76008a4b126647 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from carp import context
from carp import util
from carp import render
## sync 数据
context.sync_history_bar()
## 获取stocks list
symbols = context.get_stock_list()
print(symbols)
basic = context.get_basic()
print(context.get_date_bars('000002', '2015-11-11', '2018-05-11', freq = util.FREQ_DAY))
print(context.get_count_bars('000002', None, limit = 20, freq = util.FREQ_DAY))
r = render.StockBarRender.create('000002', '2017-10-10', '2018-03-20', util.FREQ_DAY)
page = render.WebPage('test')
page.add(r)
page.show()
| 19.033333 | 88 | 0.711033 |
4352a944808cc79870e942f94f656d843fa960bc | 806 | py | Python | plotly/validators/scatter/_unselected.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scatter/_unselected.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/scatter/_unselected.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class UnselectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='unselected', parent_name='scatter', **kwargs
):
super(UnselectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Unselected'),
data_docs=kwargs.pop(
'data_docs', """
marker
plotly.graph_objs.scatter.unselected.Marker
instance or dict with compatible properties
textfont
plotly.graph_objs.scatter.unselected.Textfont
instance or dict with compatible properties
"""
),
**kwargs
)
| 32.24 | 74 | 0.609181 |
7b7b18a8ae64666710395926c57ec74abb88bd2f | 386 | py | Python | src/DocClustering/heuristics/pubmed.py | jd-s/DocClustering | a7d4acff8464f960558cf9cc6d03de78d07d56bf | [
"Apache-2.0"
] | null | null | null | src/DocClustering/heuristics/pubmed.py | jd-s/DocClustering | a7d4acff8464f960558cf9cc6d03de78d07d56bf | [
"Apache-2.0"
] | null | null | null | src/DocClustering/heuristics/pubmed.py | jd-s/DocClustering | a7d4acff8464f960558cf9cc6d03de78d07d56bf | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
import requests
def get_year(id):
r = requests.get('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&id='+str(id))
html = r.text
# Setup the BeautifulSoup Parser
soup = BeautifulSoup(html, 'html.parser')
year = soup.findAll(attrs={"name" : "PubDate"})
value = "0000"
for val in year:
value = val.text
return int(value[0:4])
| 27.571429 | 102 | 0.712435 |
3f27c92ef40a5bd0e612a1feeebd853238c49340 | 745 | py | Python | notifier/grabbers/crwle.py | thejeshpr/notifier | c9c8df0bea1e772a7f5f72cd8d67ac67bd3803ca | [
"MIT"
] | null | null | null | notifier/grabbers/crwle.py | thejeshpr/notifier | c9c8df0bea1e772a7f5f72cd8d67ac67bd3803ca | [
"MIT"
] | null | null | null | notifier/grabbers/crwle.py | thejeshpr/notifier | c9c8df0bea1e772a7f5f72cd8d67ac67bd3803ca | [
"MIT"
] | null | null | null | import urllib.parse
from notifier.grabbers.base import Base, Internet
class Crwle(object):
@staticmethod
def sync(obj: Base, *args, **kwargs):
soup = Internet.get_soup_phjs(obj.sync_type.base_url)
links = soup.find_all('a', {'class':'o-eZTujG o-fyWCgU'})
for a in links[::-1]:
link = a.get('href')
url = urllib.parse.urljoin(obj.sync_type.base_url, link)
name = a.text.strip()
obj.add_text_task(
unique_key=url,
name=name,
url=url,
data=dict(text=url)
)
| 29.8 | 80 | 0.448322 |
aefff09fae62f37955948fc01028a260336e4d33 | 8,069 | py | Python | python/ray/tests/test_cancel.py | 77loopin/ray | 9322f6aab53f4ca5baf5a3573e1ffde12feae519 | [
"Apache-2.0"
] | 39 | 2021-02-02T23:09:31.000Z | 2022-03-28T16:39:12.000Z | python/ray/tests/test_cancel.py | 77loopin/ray | 9322f6aab53f4ca5baf5a3573e1ffde12feae519 | [
"Apache-2.0"
] | 84 | 2021-03-06T08:02:56.000Z | 2022-03-05T08:07:19.000Z | python/ray/tests/test_cancel.py | 77loopin/ray | 9322f6aab53f4ca5baf5a3573e1ffde12feae519 | [
"Apache-2.0"
] | 20 | 2021-02-05T05:51:39.000Z | 2022-03-04T21:13:24.000Z | import random
import sys
import time
import pytest
import ray
from ray.exceptions import TaskCancelledError, RayTaskError, \
GetTimeoutError, WorkerCrashedError, \
ObjectLostError
from ray.test_utils import SignalActor
def valid_exceptions(use_force):
if use_force:
return (RayTaskError, TaskCancelledError, WorkerCrashedError,
ObjectLostError)
else:
return (RayTaskError, TaskCancelledError)
@pytest.mark.parametrize("use_force", [True, False])
def test_cancel_chain(ray_start_regular, use_force):
signaler = SignalActor.remote()
@ray.remote
def wait_for(t):
return ray.get(t[0])
obj1 = wait_for.remote([signaler.wait.remote()])
obj2 = wait_for.remote([obj1])
obj3 = wait_for.remote([obj2])
obj4 = wait_for.remote([obj3])
assert len(ray.wait([obj1], timeout=.1)[0]) == 0
ray.cancel(obj1, force=use_force)
for ob in [obj1, obj2, obj3, obj4]:
with pytest.raises(valid_exceptions(use_force)):
ray.get(ob)
signaler2 = SignalActor.remote()
obj1 = wait_for.remote([signaler2.wait.remote()])
obj2 = wait_for.remote([obj1])
obj3 = wait_for.remote([obj2])
obj4 = wait_for.remote([obj3])
assert len(ray.wait([obj3], timeout=.1)[0]) == 0
ray.cancel(obj3, force=use_force)
for ob in [obj3, obj4]:
with pytest.raises(valid_exceptions(use_force)):
ray.get(ob)
with pytest.raises(GetTimeoutError):
ray.get(obj1, timeout=.1)
with pytest.raises(GetTimeoutError):
ray.get(obj2, timeout=.1)
signaler2.send.remote()
ray.get(obj1)
@pytest.mark.parametrize("use_force", [True, False])
def test_cancel_multiple_dependents(ray_start_regular, use_force):
signaler = SignalActor.remote()
@ray.remote
def wait_for(t):
return ray.get(t[0])
head = wait_for.remote([signaler.wait.remote()])
deps = []
for _ in range(3):
deps.append(wait_for.remote([head]))
assert len(ray.wait([head], timeout=.1)[0]) == 0
ray.cancel(head, force=use_force)
for d in deps:
with pytest.raises(valid_exceptions(use_force)):
ray.get(d)
head2 = wait_for.remote([signaler.wait.remote()])
deps2 = []
for _ in range(3):
deps2.append(wait_for.remote([head]))
for d in deps2:
ray.cancel(d, force=use_force)
for d in deps2:
with pytest.raises(valid_exceptions(use_force)):
ray.get(d)
signaler.send.remote()
ray.get(head2)
@pytest.mark.parametrize("use_force", [True, False])
def test_single_cpu_cancel(shutdown_only, use_force):
ray.init(num_cpus=1)
signaler = SignalActor.remote()
@ray.remote
def wait_for(t):
return ray.get(t[0])
obj1 = wait_for.remote([signaler.wait.remote()])
obj2 = wait_for.remote([obj1])
obj3 = wait_for.remote([obj2])
indep = wait_for.remote([signaler.wait.remote()])
assert len(ray.wait([obj3], timeout=.1)[0]) == 0
ray.cancel(obj3, force=use_force)
with pytest.raises(valid_exceptions(use_force)):
ray.get(obj3)
ray.cancel(obj1, force=use_force)
for d in [obj1, obj2]:
with pytest.raises(valid_exceptions(use_force)):
ray.get(d)
signaler.send.remote()
ray.get(indep)
@pytest.mark.parametrize("use_force", [True, False])
def test_comprehensive(ray_start_regular, use_force):
signaler = SignalActor.remote()
@ray.remote
def wait_for(t):
ray.get(t[0])
return "Result"
@ray.remote
def combine(a, b):
return str(a) + str(b)
a = wait_for.remote([signaler.wait.remote()])
b = wait_for.remote([signaler.wait.remote()])
combo = combine.remote(a, b)
a2 = wait_for.remote([a])
assert len(ray.wait([a, b, a2, combo], timeout=1)[0]) == 0
ray.cancel(a, force=use_force)
with pytest.raises(valid_exceptions(use_force)):
ray.get(a, timeout=10)
with pytest.raises(valid_exceptions(use_force)):
ray.get(a2, timeout=10)
signaler.send.remote()
with pytest.raises(valid_exceptions(use_force)):
ray.get(combo)
# Running this test with use_force==False is flaky.
# TODO(ilr): Look into the root of this flakiness.
@pytest.mark.parametrize("use_force", [True])
def test_stress(shutdown_only, use_force):
ray.init(num_cpus=1)
@ray.remote
def infinite_sleep(y):
if y:
while True:
time.sleep(1 / 10)
first = infinite_sleep.remote(True)
sleep_or_no = [random.randint(0, 1) for _ in range(100)]
tasks = [infinite_sleep.remote(i) for i in sleep_or_no]
cancelled = set()
# Randomly kill queued tasks (infinitely sleeping or not).
for t in tasks:
if random.random() > 0.5:
ray.cancel(t, force=use_force)
cancelled.add(t)
ray.cancel(first, force=use_force)
cancelled.add(first)
for done in cancelled:
with pytest.raises(valid_exceptions(use_force)):
ray.get(done, timeout=120)
# Kill all infinitely sleeping tasks (queued or not).
for indx, t in enumerate(tasks):
if sleep_or_no[indx]:
ray.cancel(t, force=use_force)
cancelled.add(t)
for indx, t in enumerate(tasks):
if t in cancelled:
with pytest.raises(valid_exceptions(use_force)):
ray.get(t, timeout=120)
else:
ray.get(t, timeout=120)
@pytest.mark.parametrize("use_force", [True, False])
def test_fast(shutdown_only, use_force):
ray.init(num_cpus=2)
@ray.remote
def fast(y):
return y
signaler = SignalActor.remote()
ids = list()
for _ in range(100):
x = fast.remote("a")
# NOTE If a non-force Cancellation is attempted in the time
# between a worker receiving a task and the worker executing
# that task (specifically the python execution), Cancellation
# can fail.
time.sleep(0.1)
ray.cancel(x, force=use_force)
ids.append(x)
@ray.remote
def wait_for(y):
return y
sig = signaler.wait.remote()
for _ in range(5000):
x = wait_for.remote(sig)
ids.append(x)
for idx in range(100, 5100):
if random.random() > 0.95:
ray.cancel(ids[idx], force=use_force)
signaler.send.remote()
for i, obj_ref in enumerate(ids):
try:
ray.get(obj_ref, timeout=120)
except Exception as e:
assert isinstance(
e, valid_exceptions(use_force)), f"Failure on iteration: {i}"
@pytest.mark.parametrize("use_force", [True, False])
def test_remote_cancel(ray_start_regular, use_force):
signaler = SignalActor.remote()
@ray.remote
def wait_for(y):
return ray.get(y[0])
@ray.remote
def remote_wait(sg):
return [wait_for.remote([sg[0]])]
sig = signaler.wait.remote()
outer = remote_wait.remote([sig])
inner = ray.get(outer)[0]
with pytest.raises(GetTimeoutError):
ray.get(inner, timeout=1)
ray.cancel(inner, force=use_force)
with pytest.raises(valid_exceptions(use_force)):
ray.get(inner, timeout=10)
@pytest.mark.parametrize("use_force", [True, False])
def test_recursive_cancel(shutdown_only, use_force):
ray.init(num_cpus=4)
@ray.remote(num_cpus=1)
def inner():
while True:
time.sleep(0.1)
@ray.remote(num_cpus=1)
def outer():
x = [inner.remote()]
print(x)
while True:
time.sleep(0.1)
@ray.remote(num_cpus=4)
def many_resources():
return 300
outer_fut = outer.remote()
many_fut = many_resources.remote()
with pytest.raises(GetTimeoutError):
ray.get(many_fut, timeout=1)
ray.cancel(outer_fut)
with pytest.raises(valid_exceptions(use_force)):
ray.get(outer_fut, timeout=10)
assert ray.get(many_fut, timeout=30)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| 26.283388 | 77 | 0.629446 |
5b349a0a5c079357a78fed800a07cc4451658207 | 55,135 | py | Python | src/azure-cli/azure/cli/command_modules/storage/_help.py | dyna-dot/azure-cli | 47d67e6e47a574a82b53c181084b29479aa92d51 | [
"MIT"
] | 1 | 2019-10-01T10:29:15.000Z | 2019-10-01T10:29:15.000Z | src/azure-cli/azure/cli/command_modules/storage/_help.py | dyna-dot/azure-cli | 47d67e6e47a574a82b53c181084b29479aa92d51 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/storage/_help.py | dyna-dot/azure-cli | 47d67e6e47a574a82b53c181084b29479aa92d51 | [
"MIT"
] | 1 | 2019-11-25T19:33:05.000Z | 2019-11-25T19:33:05.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['storage'] = """
type: group
short-summary: Manage Azure Cloud Storage resources.
"""
helps['storage account'] = """
type: group
short-summary: Manage storage accounts.
"""
helps['storage account create'] = """
type: command
short-summary: Create a storage account.
long-summary: >
The SKU of the storage account defaults to 'Standard_RAGRS'.
examples:
- name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --sku Standard_LRS
unsupported-profiles: 2017-03-09-profile
- name: Create a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account create -n MyStorageAccount -g MyResourceGroup -l westus --account-type Standard_LRS
supported-profiles: 2017-03-09-profile
"""
helps['storage account delete'] = """
type: command
short-summary: Delete a storage account.
examples:
- name: Delete a storage account using a resource ID.
text: az storage account delete --ids /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Storage/storageAccounts/{StorageAccount}
- name: Delete a storage account using an account name and resource group.
text: az storage account delete -n MyStorageAccount -g MyResourceGroup
"""
helps['storage account generate-sas'] = """
type: command
parameters:
- name: --services
short-summary: 'The storage services the SAS is applicable for. Allowed values: (b)lob (f)ile (q)ueue (t)able. Can be combined.'
- name: --resource-types
short-summary: 'The resource types the SAS is applicable for. Allowed values: (s)ervice (c)ontainer (o)bject. Can be combined.'
- name: --expiry
short-summary: Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes invalid.
- name: --start
short-summary: Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes valid. Defaults to the time of the request.
- name: --account-name
short-summary: 'Storage account name. Must be used in conjunction with either storage account key or a SAS token. Environment Variable: AZURE_STORAGE_ACCOUNT'
examples:
- name: Generate a sas token for the account that is valid for queue and table services on Linux.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage account generate-sas --permissions cdlruwap --account-name MyStorageAccount --services qt --resource-types sco --expiry $end -o tsv
- name: Generate a sas token for the account that is valid for queue and table services on MacOS.
text: |
end=`date -v+30M '+%Y-%m-%dT%H:%MZ'`
az storage account generate-sas --permissions cdlruwap --account-name MyStorageAccount --services qt --resource-types sco --expiry $end -o tsv
- name: Generates a shared access signature for the account (autogenerated)
text: az storage account generate-sas --account-key 00000000 --account-name MyStorageAccount --expiry 2020-01-01 --https-only --permissions acuw --resource-types co --services bfqt
crafted: true
"""
helps['storage account keys'] = """
type: group
short-summary: Manage storage account keys.
"""
helps['storage account keys list'] = """
type: command
short-summary: List the primary and secondary keys for a storage account.
examples:
- name: List the primary and secondary keys for a storage account.
text: az storage account keys list -g MyResourceGroup -n MyStorageAccount
"""
helps['storage account list'] = """
type: command
short-summary: List storage accounts.
examples:
- name: List all storage accounts in a subscription.
text: az storage account list
- name: List all storage accounts in a resource group.
text: az storage account list -g MyResourceGroup
"""
helps['storage account management-policy'] = """
type: group
short-summary: Manage storage account management policies.
"""
helps['storage account management-policy create'] = """
type: command
short-summary: Creates the data policy rules associated with the specified storage account.
"""
helps['storage account management-policy update'] = """
type: command
short-summary: Updates the data policy rules associated with the specified storage account.
"""
helps['storage account network-rule'] = """
type: group
short-summary: Manage network rules.
"""
helps['storage account network-rule add'] = """
type: command
short-summary: Add a network rule.
long-summary: >
Rules can be created for an IPv4 address, address range (CIDR format), or a virtual network subnet.
examples:
- name: Create a rule to allow a specific address-range.
text: az storage account network-rule add -g myRg --account-name mystorageaccount --ip-address 23.45.1.0/24
- name: Create a rule to allow access for a subnet.
text: az storage account network-rule add -g myRg --account-name mystorageaccount --vnet myvnet --subnet mysubnet
"""
helps['storage account network-rule list'] = """
type: command
short-summary: List network rules.
examples:
- name: List network rules. (autogenerated)
text: az storage account network-rule list --account-name MyAccount --resource-group MyResourceGroup
crafted: true
"""
helps['storage account network-rule remove'] = """
type: command
short-summary: Remove a network rule.
examples:
- name: Remove a network rule. (autogenerated)
text: az storage account network-rule remove --account-name MyAccount --resource-group MyResourceGroup --subnet mysubnet
crafted: true
- name: Remove a network rule. (autogenerated)
text: az storage account network-rule remove --account-name MyAccount --ip-address 23.45.1.0/24 --resource-group MyResourceGroup
crafted: true
"""
helps['storage account revoke-delegation-keys'] = """
type: command
short-summary: Revoke all user delegation keys for a storage account.
examples:
- name: Revoke all user delegation keys for a storage account by resource ID.
text: az storage account revoke-delegation-keys --ids /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Storage/storageAccounts/{StorageAccount}
- name: Revoke all user delegation keys for a storage account 'MyStorageAccount' in resource group 'MyResourceGroup' in the West US region with locally redundant storage.
text: az storage account revoke-delegation-keys -n MyStorageAccount -g MyResourceGroup
"""
helps['storage account show'] = """
type: command
short-summary: Show storage account properties.
examples:
- name: Show properties for a storage account by resource ID.
text: az storage account show --ids /subscriptions/{SubID}/resourceGroups/{ResourceGroup}/providers/Microsoft.Storage/storageAccounts/{StorageAccount}
- name: Show properties for a storage account using an account name and resource group.
text: az storage account show -g MyResourceGroup -n MyStorageAccount
"""
helps['storage account show-connection-string'] = """
type: command
short-summary: Get the connection string for a storage account.
examples:
- name: Get a connection string for a storage account.
text: az storage account show-connection-string -g MyResourceGroup -n MyStorageAccount
- name: Get the connection string for a storage account. (autogenerated)
text: az storage account show-connection-string --name MyStorageAccount --resource-group MyResourceGroup --subscription MySubscription
crafted: true
"""
helps['storage account show-usage'] = """
type: command
short-summary: Show the current count and limit of the storage accounts under the subscription.
examples:
- name: Show the current count and limit of the storage accounts under the subscription. (autogenerated)
text: az storage account show-usage --location westus2
crafted: true
"""
helps['storage account update'] = """
type: command
short-summary: Update the properties of a storage account.
examples:
- name: Update the properties of a storage account. (autogenerated)
text: az storage account update --default-action Allow --name MyStorageAccount --resource-group MyResourceGroup
crafted: true
"""
helps['storage blob'] = """
type: group
short-summary: Manage object storage for unstructured data (blobs).
"""
helps['storage blob copy'] = """
type: group
short-summary: Manage blob copy operations. Use `az storage blob show` to check the status of the blobs.
"""
helps['storage blob copy start'] = """
type: command
short-summary: Copies a blob asynchronously. Use `az storage blob show` to check the status of the blobs.
examples:
- name: Copies a blob asynchronously. Use `az storage blob show` to check the status of the blobs. (autogenerated)
text: az storage blob copy start --account-key 00000000 --account-name MyAccount --destination-blob MyDestinationBlob --destination-container MyDestinationContainer --source-uri https://storage.blob.core.windows.net/photos
crafted: true
"""
helps['storage blob copy start-batch'] = """
type: command
short-summary: Copy multiple blobs or files to a blob container. Use `az storage blob show` to check the status of the blobs.
parameters:
- name: --destination-container -c
type: string
short-summary: The blob container where the selected source files or blobs will be copied to.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files or blobs to be uploaded. No actual data transfer will occur.
- name: --source-account-name
type: string
short-summary: The source storage account from which the files or blobs are copied to the destination. If omitted, the source account is used.
- name: --source-account-key
type: string
short-summary: The account key for the source storage account.
- name: --source-container
type: string
short-summary: The source container from which blobs are copied.
- name: --source-share
type: string
short-summary: The source share from which files are copied.
- name: --source-uri
type: string
short-summary: A URI specifying a file share or blob container from which the files or blobs are copied.
long-summary: If the source is in another account, the source must either be public or be authenticated by using a shared access signature.
- name: --source-sas
type: string
short-summary: The shared access signature for the source storage account.
examples:
- name: Copy multiple blobs or files to a blob container. Use `az storage blob show` to check the status of the blobs. (autogenerated)
text: az storage blob copy start-batch --account-key 00000000 --account-name MyAccount --destination-container MyDestinationContainer --source-account-key MySourceKey --source-account-name MySourceAccount --source-container MySourceContainer
crafted: true
"""
helps['storage blob delete'] = """
type: command
short-summary: Mark a blob or snapshot for deletion.
long-summary: >
The blob is marked for later deletion during garbage collection. In order to delete a blob, all of its snapshots must also be deleted.
Both can be removed at the same time.
examples:
- name: Delete a blob.
text: az storage blob delete -c MyContainer -n MyBlob
"""
helps['storage blob delete-batch'] = """
type: command
short-summary: Delete blobs from a blob container recursively.
parameters:
- name: --source -s
type: string
short-summary: The blob container from where the files will be deleted.
long-summary: The source can be the container URL or the container name. When the source is the container URL, the storage account name will be parsed from the URL.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq]', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: Show the summary of the operations to be taken instead of actually deleting the file(s).
long-summary: If this is specified, it will ignore all the Precondition Arguments that include --if-modified-since and --if-unmodified-since. So the file(s) will be deleted with the command without --dryrun may be different from the result list with --dryrun flag on.
- name: --if-match
type: string
short-summary: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified.
- name: --if-none-match
type: string
short-summary: An ETag value, or the wildcard character (*).
long-summary: Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist.
examples:
- name: Delete all blobs ending with ".py" in a container that have not been modified for 10 days.
text: |
date=`date -d "10 days ago" '+%Y-%m-%dT%H:%MZ'`
az storage blob delete-batch -s MyContainer --account-name MyStorageAccount --pattern *.py --if-unmodified-since $date
"""
helps['storage blob download-batch'] = """
type: command
short-summary: Download blobs from a blob container recursively.
parameters:
- name: --source -s
type: string
short-summary: The blob container from where the files will be downloaded.
long-summary: The source can be the container URL or the container name. When the source is the container URL, the storage account name will be parsed from the URL.
- name: --destination -d
type: string
short-summary: The existing destination folder for this download operation.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq]', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: Show the summary of the operations to be taken instead of actually downloading the file(s).
examples:
- name: Download all blobs that end with .py
text: az storage blob download-batch -d . --pattern *.py -s MyContainer --account-name MyStorageAccount
"""
helps['storage blob exists'] = """
type: command
short-summary: Check for the existence of a blob in a container.
parameters:
- name: --name -n
short-summary: The blob name.
examples:
- name: Check for the existence of a blob in a container. (autogenerated)
text: az storage blob exists --account-key 00000000 --account-name MyAccount --container-name MyContainer --name MyBlob
crafted: true
"""
helps['storage blob generate-sas'] = """
type: command
short-summary: Generates a shared access signature for the blob.
examples:
- name: Generate a sas token for a blob with read-only permissions.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage blob generate-sas --account-name MyStorageAccount -c MyContainer -n MyBlob --permissions r --expiry $end --https-only
- name: Generates a shared access signature for the blob. (autogenerated)
text: az storage blob generate-sas --account-key 00000000 --account-name MyStorageAccount --container-name MyContainer --expiry 2018-01-01T00:00:00Z --name MyBlob --permissions r
crafted: true
"""
helps['storage blob incremental-copy'] = """
type: group
short-summary: Manage blob incremental copy operations.
"""
helps['storage blob incremental-copy start'] = """
type: command
short-summary: Copies an incremental copy of a blob asynchronously.
long-summary: This operation returns a copy operation properties object, including a copy ID you can use to check or abort the copy operation. The Blob service copies blobs on a best-effort basis. The source blob for an incremental copy operation must be a page blob. Call get_blob_properties on the destination blob to check the status of the copy operation. The final blob will be committed when the copy completes.
examples:
- name: Upload all files that end with .py unless blob exists and has been modified since given date.
text: az storage blob incremental-copy start --source-container MySourceContainer --source-blob MyBlob --source-account-name MySourceAccount --source-account-key MySourceKey --source-snapshot MySnapshot --destination-container MyDestinationContainer --destination-blob MyDestinationBlob
- name: Copies an incremental copy of a blob asynchronously. (autogenerated)
text: az storage blob incremental-copy start --account-key 00000000 --account-name MyAccount --destination-blob MyDestinationBlob --destination-container MyDestinationContainer --source-account-key MySourceKey --source-account-name MySourceAccount --source-blob MyBlob --source-container MySourceContainer --source-snapshot MySnapshot
crafted: true
"""
helps['storage blob lease'] = """
type: group
short-summary: Manage storage blob leases.
"""
helps['storage blob list'] = """
type: command
short-summary: List blobs in a given container.
parameters:
- name: --include
short-summary: 'Specifies additional datasets to include: (c)opy-info, (m)etadata, (s)napshots, (d)eleted-soft. Can be combined.'
examples:
- name: List all storage blobs in a container whose names start with 'foo'; will match names such as 'foo', 'foobar', and 'foo/bar'
text: az storage blob list -c MyContainer --prefix foo
"""
helps['storage blob metadata'] = """
type: group
short-summary: Manage blob metadata.
"""
helps['storage blob service-properties'] = """
type: group
short-summary: Manage storage blob service properties.
"""
helps['storage blob service-properties delete-policy'] = """
type: group
short-summary: Manage storage blob delete-policy service properties.
"""
helps['storage blob service-properties delete-policy show'] = """
type: command
short-summary: Show the storage blob delete-policy.
examples:
- name: Show the storage blob delete-policy. (autogenerated)
text: az storage blob service-properties delete-policy show --account-name MyAccount
crafted: true
"""
helps['storage blob service-properties delete-policy update'] = """
type: command
short-summary: Update the storage blob delete-policy.
examples:
- name: Update the storage blob delete-policy. (autogenerated)
text: az storage blob service-properties delete-policy update --account-name MyAccount --days-retained 7 --enable true
crafted: true
"""
helps['storage blob service-properties update'] = """
type: command
short-summary: Update storage blob service properties.
examples:
- name: Update storage blob service properties. (autogenerated)
text: az storage blob service-properties update --404-document error.html --account-name MyAccount --index-document index.html --static-website true
crafted: true
"""
helps['storage blob set-tier'] = """
type: command
short-summary: Set the block or page tiers on the blob.
parameters:
- name: --type -t
short-summary: The blob type
- name: --tier
short-summary: The tier value to set the blob to.
- name: --timeout
short-summary: The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually.
long-summary: >
For block blob this command only supports block blob on standard storage accounts.
For page blob, this command only supports for page blobs on premium accounts.
examples:
- name: Set the block or page tiers on the blob. (autogenerated)
text: az storage blob set-tier --account-key 00000000 --account-name MyAccount --container-name MyContainer --name MyBlob --tier P10
crafted: true
"""
helps['storage blob show'] = """
type: command
short-summary: Get the details of a blob.
examples:
- name: Show all properties of a blob.
text: az storage blob show -c MyContainer -n MyBlob
- name: Get the details of a blob (autogenerated)
text: az storage blob show --account-name MyAccount --container-name MyContainer --name MyBlob
crafted: true
"""
helps['storage blob sync'] = """
type: command
short-summary: Sync blobs recursively to a storage blob container.
long-summary: Sync command depends on Azcopy, which only works for 64-bit Operating System now. We will support 32-bit Operating System soon.
examples:
- name: Sync a single blob to a container.
text: az storage blob sync -c MyContainer --account-name MyStorageAccount -s "path/to/file" -d NewBlob
- name: Sync a directory to a container.
text: az storage blob sync -c MyContainer --account-name MyStorageAccount -s "path/to/directory"
"""
helps['storage blob upload'] = """
type: command
short-summary: Upload a file to a storage blob.
long-summary: Creates a new blob from a file path, or updates the content of an existing blob with automatic chunking and progress notifications.
parameters:
- name: --type -t
short-summary: Defaults to 'page' for *.vhd files, or 'block' otherwise.
- name: --maxsize-condition
short-summary: The max length in bytes permitted for an append blob.
- name: --validate-content
short-summary: Specifies that an MD5 hash shall be calculated for each chunk of the blob and verified by the service when the chunk has arrived.
- name: --tier
short-summary: A page blob tier value to set the blob to. The tier correlates to the size of the blob and number of allowed IOPS. This is only applicable to page blobs on premium storage accounts.
examples:
- name: Upload to a blob.
text: az storage blob upload -f /path/to/file -c MyContainer -n MyBlob
"""
helps['storage blob upload-batch'] = """
type: command
short-summary: Upload files from a local directory to a blob container.
parameters:
- name: --source -s
type: string
short-summary: The directory where the files to be uploaded are located.
- name: --destination -d
type: string
short-summary: The blob container where the files will be uploaded.
long-summary: The destination can be the container URL or the container name. When the destination is the container URL, the storage account name will be parsed from the URL.
- name: --pattern
type: string
short-summary: The pattern used for globbing files or blobs in the source. The supported patterns are '*', '?', '[seq]', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: Show the summary of the operations to be taken instead of actually uploading the file(s).
- name: --if-match
type: string
short-summary: An ETag value, or the wildcard character (*). Specify this header to perform the operation only if the resource's ETag matches the value specified.
- name: --if-none-match
type: string
short-summary: An ETag value, or the wildcard character (*).
long-summary: Specify this header to perform the operation only if the resource's ETag does not match the value specified. Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist.
- name: --validate-content
short-summary: Specifies that an MD5 hash shall be calculated for each chunk of the blob and verified by the service when the chunk has arrived.
- name: --type -t
short-summary: Defaults to 'page' for *.vhd files, or 'block' otherwise. The setting will override blob types for every file.
- name: --maxsize-condition
short-summary: The max length in bytes permitted for an append blob.
- name: --lease-id
short-summary: The active lease id for the blob
examples:
- name: Upload all files that end with .py unless blob exists and has been modified since given date.
text: az storage blob upload-batch -d MyContainer --account-name MyStorageAccount -s directory_path --pattern *.py --if-unmodified-since 2018-08-27T20:51Z
"""
helps['storage blob url'] = """
type: command
short-summary: Create the url to access a blob.
examples:
- name: Create the url to access a blob (autogenerated)
text: az storage blob url --connection-string $connectionString --container-name container1 --name blob1
crafted: true
- name: Create the url to access a blob (autogenerated)
text: az storage blob url --account-name storageacct --container-name container1 --name blob1
crafted: true
"""
helps['storage container'] = """
type: group
short-summary: Manage blob storage containers.
"""
helps['storage container create'] = """
type: command
short-summary: Create a container in a storage account.
long-summary: >
By default, container data is private ("off") to the account owner. Use "blob" to allow public read access for blobs.
Use "container" to allow public read and list access to the entire container.
You can configure the --public-access using `az storage container set-permission -n CONTAINER_NAME --public-access blob/container/off`.
examples:
- name: Create a storage container in a storage account.
text: az storage container create -n MyStorageContainer
- name: Create a storage container in a storage account and return an error if the container already exists.
text: az storage container create -n MyStorageContainer --fail-on-exist
- name: Create a storage container in a storage account and allow public read access for blobs.
text: az storage container create -n MyStorageContainer --public-access blob
"""
helps['storage container delete'] = """
type: command
short-summary: Marks the specified container for deletion.
long-summary: >
The container and any blobs contained within it are later deleted during garbage collection.
examples:
- name: Marks the specified container for deletion. (autogenerated)
text: az storage container delete --account-key 00000000 --account-name MyAccount --name MyContainer
crafted: true
"""
helps['storage container exists'] = """
type: command
short-summary: Check for the existence of a storage container.
examples:
- name: Check for the existence of a storage container. (autogenerated)
text: az storage container exists --account-name MyAccount --name MyContainer
crafted: true
"""
helps['storage container generate-sas'] = """
type: command
short-summary: Generate a SAS token for a storage container.
examples:
- name: Generate a sas token for blob container and use it to upload a blob.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
sas=`az storage container generate-sas -n MyContainer --account-name MyStorageAccount --https-only --permissions dlrw --expiry $end -o tsv`
az storage blob upload -n MyBlob -c MyContainer --account-name MyStorageAccount -f file.txt --sas-token $sas
- name: Generates a shared access signature for the container (autogenerated)
text: az storage container generate-sas --account-key 00000000 --account-name MyStorageAccount --expiry 2020-01-01 --name MyContainer --permissions dlrw
crafted: true
"""
helps['storage container immutability-policy'] = """
type: group
short-summary: Manage container immutability policies.
"""
helps['storage container lease'] = """
type: group
short-summary: Manage blob storage container leases.
"""
helps['storage container legal-hold'] = """
type: group
short-summary: Manage container legal holds.
"""
helps['storage container legal-hold show'] = """
type: command
short-summary: Get the legal hold properties of a container.
examples:
- name: Get the legal hold properties of a container. (autogenerated)
text: az storage container legal-hold show --account-name MyAccount --container-name MyContainer
crafted: true
"""
helps['storage container list'] = """
type: command
short-summary: List containers in a storage account.
"""
helps['storage container metadata'] = """
type: group
short-summary: Manage container metadata.
"""
helps['storage container policy'] = """
type: group
short-summary: Manage container stored access policies.
"""
helps['storage copy'] = """
type: command
short-summary: Copy files or directories to or from Azure storage.
long-summary: Copy command depends on Azcopy, which only works for 64-bit Operating System now. We will support 32-bit Operating System soon.
examples:
- name: Upload a single file to Azure Blob using url.
text: az storage copy -s /path/to/file.txt -d https://[account].blob.core.windows.net/[container]/[path/to/blob]
- name: Upload a single file to Azure Blob using account name and container name.
text: az storage copy --source-local-path /path/to/file.txt --destination-account-name mystorageaccount --destination-container mycontainer
- name: Upload a single file to Azure Blob with MD5 hash of the file content and save it as the blob's Content-MD5 property.
text: az storage copy -s /path/to/file.txt -d https://[account].blob.core.windows.net/[container]/[path/to/blob] --put-md5
- name: Upload an entire directory to Azure Blob using url.
text: az storage copy -s /path/to/dir -d https://[account].blob.core.windows.net/[container]/[path/to/directory] --recursive
- name: Upload an entire directory to Azure Blob using account name and container name.
text: az storage copy --source-local-path /path/to/dir --destination-account-name mystorageaccount --destination-container mycontainer --recursive
- name: Upload a set of files to Azure Blob using wildcards with url.
text: az storage copy -s /path/*foo/*bar/*.pdf -d https://[account].blob.core.windows.net/[container]/[path/to/directory]
- name: Upload a set of files to Azure Blob using wildcards with account name and container name.
text: az storage copy --source-local-path /path/*foo/*bar/*.pdf --destination-account-name mystorageaccount --destination-container mycontainer
- name: Upload files and directories to Azure Blob using wildcards with url.
text: az storage copy -s /path/*foo/*bar* -d https://[account].blob.core.windows.net/[container]/[path/to/directory] --recursive
- name: Upload files and directories to Azure Blob using wildcards with account name and container name.
text: az storage copy --source-local-path /path/*foo/*bar* --destination-account-name mystorageaccount --destination-container mycontainer --recursive
- name: Download a single file from Azure Blob using url, and you can also specify your storage account and container information as above.
text: az storage copy -s https://[account].blob.core.windows.net/[container]/[path/to/blob] -d /path/to/file.txt
- name: Download an entire directory from Azure Blob, and you can also specify your storage account and container information as above.
text: az storage copy -s https://[account].blob.core.windows.net/[container]/[path/to/directory] -d /path/to/dir --recursive
- name: Download a set of files from Azure Blob using wildcards, and you can also specify your storage account and container information as above.
text: az storage copy -s https://[account].blob.core.windows.net/[container]/foo* -d /path/to/dir --recursive
- name: Copy a single blob to another blob, and you can also specify the storage account and container information of source and destination as above.
text: az storage copy -s https://[srcaccount].blob.core.windows.net/[container]/[path/to/blob] -d https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]
- name: Copy an entire account data from blob account to another blob account, and you can also specify the storage account and container information of source and destination as above.
text: az storage copy -s https://[srcaccount].blob.core.windows.net -d https://[destaccount].blob.core.windows.net --recursive
- name: Copy a single object from S3 with access key to blob, and you can also specify your storage account and container information as above.
text: az storage copy -s https://s3.amazonaws.com/[bucket]/[object] -d https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]
- name: Copy an entire directory from S3 with access key to blob virtual directory, and you can also specify your storage account and container information as above.
text: az storage copy -s https://s3.amazonaws.com/[bucket]/[folder] -d https://[destaccount].blob.core.windows.net/[container]/[path/to/directory] --recursive
- name: Copy all buckets in S3 service with access key to blob account, and you can also specify your storage account information as above.
text: az storage copy -s https://s3.amazonaws.com/ -d https://[destaccount].blob.core.windows.net --recursive
- name: Copy all buckets in a S3 region with access key to blob account, and you can also specify your storage account information as above.
text: az storage copy -s https://s3-[region].amazonaws.com/ -d https://[destaccount].blob.core.windows.net --recursive
- name: Upload a single file to Azure File Share using url.
text: az storage copy -s /path/to/file.txt -d https://[account].file.core.windows.net/[share]/[path/to/file]
- name: Upload a single file to Azure File Share using account name and share name.
text: az storage copy --source-local-path /path/to/file.txt --destination-account-name mystorageaccount --destination-share myshare
- name: Upload an entire directory to Azure File Share using url.
text: az storage copy -s /path/to/dir -d https://[account].file.core.windows.net/[share]/[path/to/directory] --recursive
- name: Upload an entire directory to Azure File Share using account name and container name.
text: az storage copy --source-local-path /path/to/dir --destination-account-name mystorageaccount --destination-share myshare --recursive
- name: Upload a set of files to Azure File Share using wildcards with account name and share name.
text: az storage copy --source-local-path /path/*foo/*bar/*.pdf --destination-account-name mystorageaccount --destination-share myshare
- name: Upload files and directories to Azure File Share using wildcards with url.
text: az storage copy -s /path/*foo/*bar* -d https://[account].file.core.windows.net/[share]/[path/to/directory] --recursive
- name: Upload files and directories to Azure File Share using wildcards with account name and share name.
text: az storage copy --source-local-path /path/*foo/*bar* --destination-account-name mystorageaccount --destination-share myshare --recursive
- name: Download a single file from Azure File Share using url, and you can also specify your storage account and share information as above.
text: az storage copy -s https://[account].file.core.windows.net/[share]/[path/to/file] -d /path/to/file.txt
- name: Download an entire directory from Azure File Share, and you can also specify your storage account and share information as above.
text: az storage copy -s https://[account].file.core.windows.net/[share]/[path/to/directory] -d /path/to/dir --recursive
- name: Download a set of files from Azure File Share using wildcards, and you can also specify your storage account and share information as above.
text: az storage copy -s https://[account].file.core.windows.net/[share]/foo* -d /path/to/dir --recursive
"""
helps['storage cors'] = """
type: group
short-summary: Manage storage service Cross-Origin Resource Sharing (CORS).
"""
helps['storage cors add'] = """
type: command
short-summary: Add a CORS rule to a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to add rules to. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
- name: --max-age
short-summary: The maximum number of seconds the client/browser should cache a preflight response.
- name: --origins
short-summary: Space-separated list of origin domains that will be allowed via CORS, or '*' to allow all domains.
- name: --methods
short-summary: Space-separated list of HTTP methods allowed to be executed by the origin.
- name: --allowed-headers
short-summary: Space-separated list of response headers allowed to be part of the cross-origin request.
- name: --exposed-headers
short-summary: Space-separated list of response headers to expose to CORS clients.
"""
helps['storage cors clear'] = """
type: command
short-summary: Remove all CORS rules from a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to remove rules from. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
examples:
- name: Remove all CORS rules from a storage account. (autogenerated)
text: az storage cors clear --account-name MyAccount --services bfqt
crafted: true
"""
helps['storage cors list'] = """
type: command
short-summary: List all CORS rules for a storage account.
parameters:
- name: --services
short-summary: >
The storage service(s) to list rules for. Allowed options are: (b)lob, (f)ile,
(q)ueue, (t)able. Can be combined.
"""
helps['storage directory'] = """
type: group
short-summary: Manage file storage directories.
"""
helps['storage directory exists'] = """
type: command
short-summary: Check for the existence of a storage directory.
examples:
- name: Check for the existence of a storage directory. (autogenerated)
text: az storage directory exists --account-key 00000000 --account-name MyAccount --name MyDirectory --share-name MyShare
crafted: true
"""
helps['storage directory list'] = """
type: command
short-summary: List directories in a share.
examples:
- name: List directories in a share. (autogenerated)
text: az storage directory list --share-name MyShare
crafted: true
"""
helps['storage directory metadata'] = """
type: group
short-summary: Manage file storage directory metadata.
"""
helps['storage entity'] = """
type: group
short-summary: Manage table storage entities.
"""
helps['storage entity insert'] = """
type: command
short-summary: Insert an entity into a table.
parameters:
- name: --table-name -t
type: string
short-summary: The name of the table to insert the entity into.
- name: --entity -e
type: list
short-summary: Space-separated list of key=value pairs. Must contain a PartitionKey and a RowKey.
long-summary: The PartitionKey and RowKey must be unique within the table, and may be up to 64Kb in size. If using an integer value as a key, convert it to a fixed-width string which can be canonically sorted. For example, convert the integer value 1 to the string value "0000001" to ensure proper sorting.
- name: --if-exists
type: string
short-summary: Behavior when an entity already exists for the specified PartitionKey and RowKey.
- name: --timeout
short-summary: The server timeout, expressed in seconds.
examples:
- name: Insert an entity into a table. (autogenerated)
text: az storage entity insert --connection-string $connectionString --entity PartitionKey=AAA RowKey=BBB Content=ASDF2 --table-name MyTable
crafted: true
"""
helps['storage entity query'] = """
type: command
short-summary: List entities which satisfy a query.
parameters:
- name: --marker
type: list
short-summary: Space-separated list of key=value pairs. Must contain a nextpartitionkey and a nextrowkey.
long-summary: This value can be retrieved from the next_marker field of a previous generator object if max_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped.
examples:
- name: List entities which satisfy a query. (autogenerated)
text: az storage entity query --table-name MyTable
crafted: true
"""
helps['storage file'] = """
type: group
short-summary: Manage file shares that use the SMB 3.0 protocol.
"""
helps['storage file copy'] = """
type: group
short-summary: Manage file copy operations.
"""
helps['storage file copy start-batch'] = """
type: command
short-summary: Copy multiple files or blobs to a file share.
parameters:
- name: --destination-share
type: string
short-summary: The file share where the source data is copied to.
- name: --destination-path
type: string
short-summary: The directory where the source data is copied to. If omitted, data is copied to the root directory.
- name: --pattern
type: string
short-summary: The pattern used for globbing files and blobs. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be copied. No actual data transfer will occur.
- name: --source-account-name
type: string
short-summary: The source storage account to copy the data from. If omitted, the destination account is used.
- name: --source-account-key
type: string
short-summary: The account key for the source storage account. If omitted, the active login is used to determine the account key.
- name: --source-container
type: string
short-summary: The source container blobs are copied from.
- name: --source-share
type: string
short-summary: The source share files are copied from.
- name: --source-uri
type: string
short-summary: A URI that specifies a the source file share or blob container.
long-summary: If the source is in another account, the source must either be public or authenticated via a shared access signature.
- name: --source-sas
type: string
short-summary: The shared access signature for the source storage account.
"""
helps['storage file delete-batch'] = """
type: command
short-summary: Delete files from an Azure Storage File Share.
parameters:
- name: --source -s
type: string
short-summary: The source of the file delete operation. The source can be the file share URL or the share name.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq]', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be deleted. No actual data deletion will occur.
examples:
- name: Delete files from an Azure Storage File Share. (autogenerated)
text: az storage file delete-batch --account-key 00000000 --account-name MyAccount --source /path/to/file
crafted: true
"""
helps['storage file download-batch'] = """
type: command
short-summary: Download files from an Azure Storage File Share to a local directory in a batch operation.
parameters:
- name: --source -s
type: string
short-summary: The source of the file download operation. The source can be the file share URL or the share name.
- name: --destination -d
type: string
short-summary: The local directory where the files are downloaded to. This directory must already exist.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq]', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be downloaded. No actual data transfer will occur.
- name: --max-connections
type: integer
short-summary: The maximum number of parallel connections to use. Default value is 1.
- name: --snapshot
type: string
short-summary: A string that represents the snapshot version, if applicable.
- name: --validate-content
type: bool
short-summary: If set, calculates an MD5 hash for each range of the file for validation.
long-summary: >
The storage service checks the hash of the content that has arrived is identical to the hash that was sent.
This is mostly valuable for detecting bitflips during transfer if using HTTP instead of HTTPS. This hash is not stored.
examples:
- name: Download files from an Azure Storage File Share to a local directory in a batch operation. (autogenerated)
text: az storage file download-batch --account-key 00000000 --account-name MyAccount --destination . --no-progress --source /path/to/file
crafted: true
"""
helps['storage file exists'] = """
type: command
short-summary: Check for the existence of a file.
examples:
- name: Check for the existence of a file. (autogenerated)
text: az storage file exists --account-key 00000000 --account-name MyAccount --path path/file.txt --share-name MyShare
crafted: true
"""
helps['storage file generate-sas'] = """
type: command
examples:
- name: Generate a sas token for a file.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
az storage file generate-sas -p path/file.txt -s MyShare --account-name MyStorageAccount --permissions rcdw --https-only --expiry $end
"""
helps['storage file list'] = """
type: command
short-summary: List files and directories in a share.
parameters:
- name: --exclude-dir
type: bool
short-summary: List only files in the given share.
examples:
- name: List files and directories in a share. (autogenerated)
text: az storage file list --share-name MyShare
crafted: true
"""
helps['storage file metadata'] = """
type: group
short-summary: Manage file metadata.
"""
helps['storage file upload'] = """
type: command
short-summary: Upload a file to a share that uses the SMB 3.0 protocol.
long-summary: Creates or updates an Azure file from a source path with automatic chunking and progress notifications.
examples:
- name: Upload to a local file to a share.
text: az storage file upload -s MyShare --source /path/to/file
- name: Upload a file to a share that uses the SMB 3.0 protocol. (autogenerated)
text: az storage file upload --account-key 00000000 --account-name MyStorageAccount --path path/file.txt --share-name MyShare --source /path/to/file
crafted: true
"""
helps['storage file upload-batch'] = """
type: command
short-summary: Upload files from a local directory to an Azure Storage File Share in a batch operation.
parameters:
- name: --source -s
type: string
short-summary: The directory to upload files from.
- name: --destination -d
type: string
short-summary: The destination of the upload operation.
long-summary: The destination can be the file share URL or the share name. When the destination is the share URL, the storage account name is parsed from the URL.
- name: --destination-path
type: string
short-summary: The directory where the source data is copied to. If omitted, data is copied to the root directory.
- name: --pattern
type: string
short-summary: The pattern used for file globbing. The supported patterns are '*', '?', '[seq', and '[!seq]'.
- name: --dryrun
type: bool
short-summary: List the files and blobs to be uploaded. No actual data transfer will occur.
- name: --max-connections
type: integer
short-summary: The maximum number of parallel connections to use. Default value is 1.
- name: --validate-content
type: bool
short-summary: If set, calculates an MD5 hash for each range of the file for validation.
long-summary: >
The storage service checks the hash of the content that has arrived is identical to the hash that was sent.
This is mostly valuable for detecting bitflips during transfer if using HTTP instead of HTTPS. This hash is not stored.
examples:
- name: Upload files from a local directory to an Azure Storage File Share in a batch operation. (autogenerated)
text: az storage file upload-batch --account-key 00000000 --account-name MyAccount --destination . --source /path/to/file
crafted: true
"""
helps['storage file url'] = """
type: command
short-summary: Create the url to access a file.
examples:
- name: Create the url to access a file. (autogenerated)
text: az storage file url --account-name MyAccount --path path/file.txt --share-name MyShare
crafted: true
"""
helps['storage logging'] = """
type: group
short-summary: Manage storage service logging information.
"""
helps['storage logging show'] = """
type: command
short-summary: Show logging settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage services from which to retrieve logging info: (b)lob (q)ueue (t)able. Can be combined.'
"""
helps['storage logging update'] = """
type: command
short-summary: Update logging settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage service(s) for which to update logging info: (b)lob (q)ueue (t)able. Can be combined.'
- name: --log
short-summary: 'The operations for which to enable logging: (r)ead (w)rite (d)elete. Can be combined.'
- name: --retention
short-summary: Number of days for which to retain logs. 0 to disable.
- name: --version
short-summary: Version of the logging schema.
"""
helps['storage message'] = """
type: group
short-summary: Manage queue storage messages.
"""
helps['storage metrics'] = """
type: group
short-summary: Manage storage service metrics.
"""
helps['storage metrics show'] = """
type: command
short-summary: Show metrics settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage services from which to retrieve metrics info: (b)lob (q)ueue (t)able. Can be combined.'
- name: --interval
short-summary: Filter the set of metrics to retrieve by time interval
examples:
- name: Show metrics settings for a storage account. (autogenerated)
text: az storage metrics show --account-key 00000000 --account-name MyAccount
crafted: true
"""
helps['storage metrics update'] = """
type: command
short-summary: Update metrics settings for a storage account.
parameters:
- name: --services
short-summary: 'The storage services from which to retrieve metrics info: (b)lob (q)ueue (t)able. Can be combined.'
- name: --hour
short-summary: Update the hourly metrics
- name: --minute
short-summary: Update the by-minute metrics
- name: --api
short-summary: Specify whether to include API in metrics. Applies to both hour and minute metrics if both are specified. Must be specified if hour or minute metrics are enabled and being updated.
- name: --retention
short-summary: Number of days for which to retain metrics. 0 to disable. Applies to both hour and minute metrics if both are specified.
examples:
- name: Update metrics settings for a storage account. (autogenerated)
text: az storage metrics update --account-name MyAccount --api true --hour true --minute true --retention 10 --services bfqt
crafted: true
"""
helps['storage queue'] = """
type: group
short-summary: Manage storage queues.
"""
helps['storage queue list'] = """
type: command
short-summary: List queues in a storage account.
"""
helps['storage queue metadata'] = """
type: group
short-summary: Manage the metadata for a storage queue.
"""
helps['storage queue policy'] = """
type: group
short-summary: Manage shared access policies for a storage queue.
"""
helps['storage remove'] = """
type: command
short-summary: Delete blobs or files from Azure Storage.
long-summary: To delete blobs, both the source must either be public or be authenticated by using a shared access signature. Remove command depends on Azcopy, which only works for 64-bit Operating System now. We will support 32-bit Operating System soon.
examples:
- name: Remove a single blob.
text: az storage remove -c MyContainer -n MyBlob
- name: Remove an entire virtual directory.
text: az storage remove -c MyContainer -n path/to/directory --recursive
- name: Remove only the top blobs inside a virtual directory but not its sub-directories.
text: az storage remove -c MyContainer -n path/to/directory
- name: Remove a subset of blobs in a virtual directory (For example, only jpg and pdf files, or if the blob name is "exactName").
text: az storage remove -c MyContainer -n path/to/directory --recursive --include "*.jpg;*.pdf;exactName"
- name: Remove an entire virtual directory but exclude certain blobs from the scope (For example, every blob that starts with foo or ends with bar).
text: az storage remove -c MyContainer -n path/to/directory --recursive --include "foo*;*bar"
- name: Remove a single file.
text: az storage remove -s MyShare -p MyFile
- name: Remove an entire directory.
text: az storage remove -s MyShare -p path/to/directory --recursive
"""
helps['storage share'] = """
type: group
short-summary: Manage file shares.
"""
helps['storage share create'] = """
type: command
short-summary: Creates a new share under the specified account.
examples:
- name: Creates a new share under the specified account. (autogenerated)
text: az storage share create --name MyFileShare
crafted: true
"""
helps['storage share exists'] = """
type: command
short-summary: Check for the existence of a file share.
examples:
- name: Check for the existence of a file share. (autogenerated)
text: az storage share exists --account-key 00000000 --account-name MyAccount --name MyFileShare
crafted: true
"""
helps['storage share generate-sas'] = """
type: command
examples:
- name: Generate a sas token for a fileshare and use it to upload a file.
text: |
end=`date -u -d "30 minutes" '+%Y-%m-%dT%H:%MZ'`
sas=`az storage share generate-sas -n MyShare --account-name MyStorageAccount --https-only --permissions dlrw --expiry $end -o tsv`
az storage file upload -s MyShare --account-name MyStorageAccount --source file.txt --sas-token $sas
- name: Generates a shared access signature for the share. (autogenerated)
text: az storage share generate-sas --account-key 00000000 --account-name MyStorageAccount --expiry 2037-12-31T23:59:00Z --name MyShare --permissions dlrw
crafted: true
"""
helps['storage share list'] = """
type: command
short-summary: List the file shares in a storage account.
"""
helps['storage share metadata'] = """
type: group
short-summary: Manage the metadata of a file share.
"""
helps['storage share policy'] = """
type: group
short-summary: Manage shared access policies of a storage file share.
"""
helps['storage share url'] = """
type: command
short-summary: Create a URI to access a file share.
examples:
- name: Create a URI to access a file share. (autogenerated)
text: az storage share url --account-key 00000000 --account-name MyAccount --name MyFileShare
crafted: true
"""
helps['storage table'] = """
type: group
short-summary: Manage NoSQL key-value storage.
"""
helps['storage table list'] = """
type: command
short-summary: List tables in a storage account.
"""
helps['storage table policy'] = """
type: group
short-summary: Manage shared access policies of a storage table.
"""
| 46.883503 | 417 | 0.730716 |
edf4c2fd0ecfec983c06c0451053401dec89d98f | 7,002 | py | Python | asteroid/data/whamr_dataset.py | groadabike/asteroid | 276d98346ab791d904fbfe79b9b8e374392dd128 | [
"MIT"
] | 1 | 2020-12-18T02:42:23.000Z | 2020-12-18T02:42:23.000Z | asteroid/data/whamr_dataset.py | groadabike/asteroid | 276d98346ab791d904fbfe79b9b8e374392dd128 | [
"MIT"
] | null | null | null | asteroid/data/whamr_dataset.py | groadabike/asteroid | 276d98346ab791d904fbfe79b9b8e374392dd128 | [
"MIT"
] | null | null | null | import torch
from torch.utils import data
import json
import os
import numpy as np
import soundfile as sf
from .wsj0_mix import wsj0_license
from .wham_dataset import wham_noise_license
DATASET = "WHAMR"
# WHAMR tasks
# Many tasks can be considered with this dataset, we only consider the 4 core
# separation tasks presented in the paper for now.
sep_clean = {
"mixture": "mix_clean_anechoic",
"sources": ["s1_anechoic", "s2_anechoic"],
"infos": [],
"default_nsrc": 2,
}
sep_noisy = {
"mixture": "mix_both_anechoic",
"sources": ["s1_anechoic", "s2_anechoic"],
"infos": ["noise"],
"default_nsrc": 2,
}
sep_reverb = {
"mixture": "mix_clean_reverb",
"sources": ["s1_anechoic", "s2_anechoic"],
"infos": [],
"default_nsrc": 2,
}
sep_reverb_noisy = {
"mixture": "mix_both_reverb",
"sources": ["s1_anechoic", "s2_anechoic"],
"infos": ["noise"],
"default_nsrc": 2,
}
WHAMR_TASKS = {
"sep_clean": sep_clean,
"sep_noisy": sep_noisy,
"sep_reverb": sep_reverb,
"sep_reverb_noisy": sep_reverb_noisy,
}
# Support both order, confusion is easy
WHAMR_TASKS["sep_noisy_reverb"] = WHAMR_TASKS["sep_reverb_noisy"]
class WhamRDataset(data.Dataset):
"""Dataset class for WHAMR source separation and speech enhancement tasks.
Args:
json_dir (str): The path to the directory containing the json files.
task (str): One of ``'sep_clean'``, ``'sep_noisy'``, ``'sep_reverb'``
or ``'sep_reverb_noisy'``.
* ``'sep_clean'`` for two-speaker clean (anechoic) source
separation.
* ``'sep_noisy'`` for two-speaker noisy (anechoic) source
separation.
* ``'sep_reverb'`` for two-speaker clean reverberant
source separation.
* ``'sep_reverb_noisy'`` for two-speaker noisy reverberant source
separation.
sample_rate (int, optional): The sampling rate of the wav files.
segment (float, optional): Length of the segments used for training,
in seconds. If None, use full utterances (e.g. for test).
nondefault_nsrc (int, optional): Number of sources in the training
targets.
If None, defaults to one for enhancement tasks and two for
separation tasks.
References
- "WHAMR!: Noisy and Reverberant Single-Channel Speech Separation",
Maciejewski et al. 2020
"""
dataset_name = "WHAMR"
def __init__(self, json_dir, task, sample_rate=8000, segment=4.0, nondefault_nsrc=None):
super(WhamRDataset, self).__init__()
if task not in WHAMR_TASKS.keys():
raise ValueError(
"Unexpected task {}, expected one of " "{}".format(task, WHAMR_TASKS.keys())
)
# Task setting
self.json_dir = json_dir
self.task = task
self.task_dict = WHAMR_TASKS[task]
self.sample_rate = sample_rate
self.seg_len = None if segment is None else int(segment * sample_rate)
if not nondefault_nsrc:
self.n_src = self.task_dict["default_nsrc"]
else:
assert nondefault_nsrc >= self.task_dict["default_nsrc"]
self.n_src = nondefault_nsrc
self.like_test = self.seg_len is None
# Load json files
mix_json = os.path.join(json_dir, self.task_dict["mixture"] + ".json")
sources_json = [
os.path.join(json_dir, source + ".json") for source in self.task_dict["sources"]
]
with open(mix_json, "r") as f:
mix_infos = json.load(f)
sources_infos = []
for src_json in sources_json:
with open(src_json, "r") as f:
sources_infos.append(json.load(f))
# Filter out short utterances only when segment is specified
orig_len = len(mix_infos)
drop_utt, drop_len = 0, 0
if not self.like_test:
for i in range(len(mix_infos) - 1, -1, -1): # Go backward
if mix_infos[i][1] < self.seg_len:
drop_utt += 1
drop_len += mix_infos[i][1]
del mix_infos[i]
for src_inf in sources_infos:
del src_inf[i]
print(
"Drop {} utts({:.2f} h) from {} (shorter than {} samples)".format(
drop_utt, drop_len / sample_rate / 36000, orig_len, self.seg_len
)
)
self.mix = mix_infos
# Handle the case n_src > default_nsrc
while len(sources_infos) < self.n_src:
sources_infos.append([None for _ in range(len(self.mix))])
self.sources = sources_infos
def __add__(self, wham):
if self.n_src != wham.n_src:
raise ValueError(
"Only datasets having the same number of sources"
"can be added together. Received "
"{} and {}".format(self.n_src, wham.n_src)
)
if self.seg_len != wham.seg_len:
self.seg_len = min(self.seg_len, wham.seg_len)
print(
"Segment length mismatched between the two Dataset"
"passed one the smallest to the sum."
)
self.mix = self.mix + wham.mix
self.sources = [a + b for a, b in zip(self.sources, wham.sources)]
def __len__(self):
return len(self.mix)
def __getitem__(self, idx):
"""Gets a mixture/sources pair.
Returns:
mixture, vstack([source_arrays])
"""
# Random start
if self.mix[idx][1] == self.seg_len or self.like_test:
rand_start = 0
else:
rand_start = np.random.randint(0, self.mix[idx][1] - self.seg_len)
if self.like_test:
stop = None
else:
stop = rand_start + self.seg_len
# Load mixture
x, _ = sf.read(self.mix[idx][0], start=rand_start, stop=stop, dtype="float32")
seg_len = torch.as_tensor([len(x)])
# Load sources
source_arrays = []
for src in self.sources:
if src[idx] is None:
# Target is filled with zeros if n_src > default_nsrc
s = np.zeros((seg_len,))
else:
s, _ = sf.read(src[idx][0], start=rand_start, stop=stop, dtype="float32")
source_arrays.append(s)
sources = torch.from_numpy(np.vstack(source_arrays))
return torch.from_numpy(x), sources
def get_infos(self):
"""Get dataset infos (for publishing models).
Returns:
dict, dataset infos with keys `dataset`, `task` and `licences`.
"""
infos = dict()
infos["dataset"] = self.dataset_name
infos["task"] = self.task
if self.task == "sep_clean":
data_license = [wsj0_license]
else:
data_license = [wsj0_license, wham_noise_license]
infos["licenses"] = data_license
return infos
| 35.543147 | 92 | 0.583262 |
f639caba89cbeed027f38e18461415ec704e813a | 216 | py | Python | htmlBuilds/htmlSupport.py | skylarkgit/sql2phpclass | a79e7f3cfda8cb41ba00e8cbba0de33e9be759d6 | [
"MIT"
] | null | null | null | htmlBuilds/htmlSupport.py | skylarkgit/sql2phpclass | a79e7f3cfda8cb41ba00e8cbba0de33e9be759d6 | [
"MIT"
] | null | null | null | htmlBuilds/htmlSupport.py | skylarkgit/sql2phpclass | a79e7f3cfda8cb41ba00e8cbba0de33e9be759d6 | [
"MIT"
] | null | null | null | import sys
sys.path.append("..")
from htmlBuilds.htmlTemplates import *
from htmlBuilds.mytemplate import *
from lib.fileOps import *
def wrapForm(code):
return TAG('div',code,ATTR('class','archonFormWrapper'))
| 24 | 60 | 0.75 |
49165010a7b6f79eb946882978a1277348ab638a | 909 | py | Python | setup.py | llamm-de/miraculix | 61fb1e533ea83a746c86f6c91720886e856a84cc | [
"MIT"
] | null | null | null | setup.py | llamm-de/miraculix | 61fb1e533ea83a746c86f6c91720886e856a84cc | [
"MIT"
] | null | null | null | setup.py | llamm-de/miraculix | 61fb1e533ea83a746c86f6c91720886e856a84cc | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as file:
long_description = file.read()
setuptools.setup(
name='Miraculix',
version='0.0.1',
author='Lukas Lamm',
author_email='lukas.lamm@ifam.rwth-aachen.de',
url='https://www.ifam.rwth-aachen.de',
packages=setuptools.find_packages(),
scripts=[],
description='A handy tool for examination processes at RWTH Aachen University',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"randomuser",
"docxtpl",
],
python_requires='>=3.5',
license='LICENSE.md',
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"License :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True,
) | 29.322581 | 83 | 0.641364 |
4646be50a9300508c569d1195e588979715dc78e | 445 | py | Python | tests/test_storage_redis.py | alvistack/ionrock-cachecontrol | 09992a140edca2602cf8230370fc8b28566a069b | [
"Apache-2.0"
] | null | null | null | tests/test_storage_redis.py | alvistack/ionrock-cachecontrol | 09992a140edca2602cf8230370fc8b28566a069b | [
"Apache-2.0"
] | null | null | null | tests/test_storage_redis.py | alvistack/ionrock-cachecontrol | 09992a140edca2602cf8230370fc8b28566a069b | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2015 Eric Larson
#
# SPDX-License-Identifier: Apache-2.0
from datetime import datetime
from mock import Mock
from cachecontrol.caches import RedisCache
class TestRedisCache(object):
def setup(self):
self.conn = Mock()
self.cache = RedisCache(self.conn)
def test_set_expiration(self):
self.cache.set("foo", "bar", expires=datetime(2014, 2, 2))
assert self.conn.setex.called
| 22.25 | 66 | 0.701124 |
a443057dd78efef1b4abad99a0c0a45af42596e1 | 11,801 | py | Python | src/genome.py | snowyukischnee/simple-neat-implementation | 335bbd1e47af792e5b3fd773d44b141db244d5e3 | [
"MIT"
] | null | null | null | src/genome.py | snowyukischnee/simple-neat-implementation | 335bbd1e47af792e5b3fd773d44b141db244d5e3 | [
"MIT"
] | null | null | null | src/genome.py | snowyukischnee/simple-neat-implementation | 335bbd1e47af792e5b3fd773d44b141db244d5e3 | [
"MIT"
] | null | null | null | from typing import Any, Dict, Tuple
from genes import DefaultNodeGene, DefaultConnectionGene, NeuralNodeGene, NeuralConnectionGene, BaseGene
import activation_functions
import aggregation_functions
import random
class DefaultGenome(object):
def __init__(self, key: Any):
self.key: Any = key
self.nodes: Dict[int, BaseGene] = {}
self.connections: Dict[Tuple[int, int], BaseGene] = {}
self.fitness: float = None
def configure_new(self, config: object) -> None:
for nk in getattr(config, 'output_keys'):
self.nodes[nk] = self.create_node(config, nk)
def configure_crossover(self, parent1: Any, parent2: Any, config: object) -> None:
assert isinstance(parent1.fitness, float) and isinstance(parent2.fitness, float)
if parent1.fitness < parent2.fitness:
parent1, parent2 = parent2, parent1
for nk1, ng1 in parent1.nodes.items():
ng2 = parent2.nodes.get(nk1)
if ng2 is None:
self.nodes[nk1] = ng1.copy()
else:
self.nodes[nk1] = ng1.crossover(ng2)
for ck1, cg1 in parent1.connections.items():
cg2 = parent2.connections.get(ck1)
if cg2 is None:
self.connections[ck1] = cg1.copy()
else:
self.connections[ck1] = cg1.crossover(cg2)
def distance(self, other: Any, config: object) -> Any:
node_distance = 0.
if (len(self.nodes) > 0) or (len(other.nodes) > 0):
disjoint_nodes = 0
for k2 in other.nodes:
if k2 not in self.nodes:
disjoint_nodes += 1 # node in other genome but not this genome
for k1, n1 in self.nodes.items():
n2 = other.nodes.get(k1)
if n2 is None:
disjoint_nodes += 1 # node in this genome but not other genome
else:
node_distance += n1.distance(n2)
node_distance = (node_distance + disjoint_nodes) / max(len(self.nodes), len(other.nodes)) # mean distance
connection_distance = 0.
if (len(self.connections) > 0) or (len(other.connections) > 0):
disjoint_connections = 0
for k2 in other.connections:
if k2 not in self.connections:
disjoint_connections += 1 # connection in other genome but not this genome
for k1, n1 in self.connections.items():
n2 = other.connections.get(k1)
if n2 is None:
disjoint_connections += 1 # connection in this genome but not other genome
else:
connection_distance += n1.distance(n2)
connection_distance = (connection_distance + disjoint_connections) / max(len(self.connections), len(other.connections)) # mean distance
distance = node_distance + connection_distance
return distance
def mutate(self, config: object) -> None:
add_node_mutation_prob = getattr(config, 'add_node_mutation_prob', 0.0)
del_node_mutation_prob = getattr(config, 'del_node_mutation_prob', 0.0)
add_connection_mutation_prob = getattr(config, 'add_connection_mutation_prob', 0.0)
del_connection_mutation_prob = getattr(config, 'del_connection_mutation_prob', 0.0)
if random.random() < add_node_mutation_prob:
self.mutate_add_node(config)
if random.random() < del_node_mutation_prob:
self.mutate_del_node(config)
if random.random() < add_connection_mutation_prob:
self.mutate_add_connection(config)
if random.random() < del_connection_mutation_prob:
self.mutate_del_connection(config)
for ng in self.nodes.values():
ng.mutate(config)
for cg in self.connections.values():
cg.mutate(config)
def mutate_add_node(self, config: object) -> None:
if len(self.connections) == 0:
return
conn_to_split = random.choice(list(self.connections.values()))
new_node_key = len(self.nodes)
nng = self.create_node(config, new_node_key)
self.nodes[new_node_key] = nng
conn_to_split.enabled = False
inode_key, onode_key = conn_to_split.key
new_connection_1 = self.create_connection(config, inode_key, new_node_key)
new_connection_1.weight = 1.0
new_connection_1.enabled = True
self.connections[new_connection_1.key] = new_connection_1
new_connection_2 = self.create_connection(config, new_node_key, onode_key)
new_connection_2.weight = conn_to_split.weight
new_connection_1.enabled = True
self.connections[new_connection_2.key] = new_connection_2
def mutate_del_node(self, config: object) -> None:
available_node_keys = [nk for nk in self.nodes.keys() if nk not in getattr(config, 'output_keys')]
if len(available_node_keys) == 0:
return
del_node_key = random.choice(available_node_keys)
# delete connection that connected to 'will be deleted' node
for ck, cg in self.connections.items():
if del_node_key in ck:
del self.connections[ck]
del self.nodes[del_node_key]
def mutate_add_connection(self, config: object) -> None:
available_onode_keys = list(self.nodes.keys())
available_inode_keys = list(set(available_onode_keys + getattr(config, 'input_keys')))
connection_inode = random.choice(available_inode_keys)
connection_onode = random.choice(available_onode_keys)
connection_key = (connection_inode, connection_onode)
if connection_key in self.connections:
return
if (connection_inode in getattr(config, 'output_keys')) and (connection_onode in getattr(config, 'output_keys')):
return
# STILL NOT UNDERSTAND, COMMENTED
# if config.feed_forward and creates_cycle(list(self.connections.keys()), connection_key):
# return
ncg = self.create_connection(config, connection_inode, connection_onode)
self.connections[ncg.key] = ncg
def mutate_del_connection(self, config: object) -> None:
if len(self.connections) > 0:
del_connection_key = random.choice(list(self.connections.keys()))
del self.connections[del_connection_key]
@staticmethod
def create_node(config: object, key: int) -> Any:
new_node = getattr(config, 'node_gene_type')(key)
new_node.init_attributes(config)
return new_node
@staticmethod
def create_connection(config: object, inode_key: int, onode_key: int) -> Any:
new_connection = getattr(config, 'connection_gene_type')((inode_key, onode_key))
new_connection.init_attributes(config)
return new_connection
if __name__ == '__main__':
y = {
'node_gene_type': NeuralNodeGene,
'connection_gene_type': NeuralConnectionGene,
'genome_type': DefaultGenome,
'compatibility_weight_coefficient': 1.0,
'compatibility_disjoint_coefficient': 1.0,
'num_inputs': 2,
'input_keys': [-1, -2],
'output_keys': [0],
'num_outputs': 1,
'add_node_mutation_prob': 0.99,
'del_node_mutation_prob': 0.1,
'add_connection_mutation_prob': 0.99,
'del_connection_mutation_prob': 0.1,
'weight_init_type': 'normal',
'weight_default_value': 0.0,
'weight_mean': 0.0,
'weight_stdev': 1.0,
'weight_max_value': 2.0,
'weight_min_value': -2.0,
'weight_mutation_power': 1.0,
'weight_mutation_rate': 0.6,
'weight_replace_rate': 0.2,
'response_init_type': 'normal',
'response_default_value': 0.0,
'response_mean': 0.0,
'response_stdev': 1.0,
'response_max_value': 2.0,
'response_min_value': -2.0,
'response_mutation_power': 1.0,
'response_mutation_rate': 0.6,
'response_replace_rate': 0.2,
'bias_init_type': 'normal',
'bias_default_value': 0.0,
'bias_mean': 0.0,
'bias_stdev': 1.0,
'bias_max_value': 2.0,
'bias_min_value': -2.0,
'bias_mutation_power': 1.0,
'bias_mutation_rate': 0.6,
'bias_replace_rate': 0.2,
'activation_function_def': {
'sigmoid': activation_functions.SigmoidActivationFunction,
'tanh': activation_functions.TanhActivationFunction,
'relu': activation_functions.ReluActivationFunction,
'gauss': activation_functions.GaussianActivationFunction
},
'aggregation_function_def': {
'sum': aggregation_functions.SumAggregationFunction,
'mean': aggregation_functions.MeanAggregationFunction,
'product': aggregation_functions.ProductAggregationFunction
}
}
from collections import namedtuple
yp = namedtuple('config', y.keys())(*y.values())
# class Config(object):
# node_gene_type = NeuralNodeGene
# connection_gene_type = NeuralConnectionGene
# genome_type = DefaultGenome
# compatibility_weight_coefficient = 1.0
# compatibility_disjoint_coefficient = 1.0
# num_inputs = 2
# input_keys = [-1, -2]
# output_keys = [0]
# num_outputs = 1
# add_node_mutation_prob = 0.99
# del_node_mutation_prob = 0.1
# add_connection_mutation_prob = 0.99
# del_connection_mutation_prob = 0.1
#
# weight_init_type = 'normal'
# weight_default_value = 0.0
# weight_mean = 0.0
# weight_stdev = 1.0
# weight_max_value = 2.0
# weight_min_value = -2.0
# weight_mutation_power = 1.0
# weight_mutation_rate = 0.6
# weight_replace_rate = 0.2
#
# response_init_type = 'normal'
# response_default_value = 0.0
# response_mean = 0.0
# response_stdev = 1.0
# response_max_value = 2.0
# response_min_value = -2.0
# response_mutation_power = 1.0
# response_mutation_rate = 0.6
# response_replace_rate = 0.2
#
# bias_init_type = 'normal'
# bias_default_value = 0.0
# bias_mean = 0.0
# bias_stdev = 1.0
# bias_max_value = 2.0
# bias_min_value = -2.0
# bias_mutation_power = 1.0
# bias_mutation_rate = 0.6
# bias_replace_rate = 0.2
#
# activation_function_def = {
# 'sigmoid': activation_functions.SigmoidActivationFunction,
# 'tanh': activation_functions.TanhActivationFunction,
# 'relu': activation_functions.ReluActivationFunction,
# 'gauss': activation_functions.GaussianActivationFunction
# }
# aggregation_function_def = {
# 'sum': aggregation_functions.SumAggregationFunction,
# 'mean': aggregation_functions.MeanAggregationFunction,
# 'product': aggregation_functions.ProductAggregationFunction
# }
# yp = Config()
x = DefaultGenome('test_genome')
x.configure_new(yp)
x.mutate_add_connection(yp)
x.mutate_add_connection(yp)
x.mutate_add_connection(yp)
x.mutate_add_node(yp)
x.mutate_add_node(yp)
ng = x.create_node(yp, 3)
x.nodes[ng.key] = ng
from utils import required_for_output
print(x.connections.keys(), x.nodes.keys())
print(required_for_output(yp.input_keys, yp.output_keys, list(x.connections.keys())))
xx = x.nodes.get(2)
print(xx.activation, xx.aggregation, xx.response, xx.bias)
print(xx.forward(yp, [1., 2., 4.]))
print(xx._grad_items_history)
print(xx.backward(yp, 3.))
print(xx._grad_items_history) | 41.847518 | 148 | 0.630116 |
87a18129f2bf51406f39ab0b632921c0f51782e1 | 627 | py | Python | app/constants.py | nashahzad/jira-scraper | a42705e3e2055d21728e5fadfc8fd93e378c4c89 | [
"Apache-2.0"
] | 12 | 2021-07-07T17:19:29.000Z | 2021-09-26T17:29:05.000Z | app/constants.py | nashahzad/jira-sprint-analytics | a42705e3e2055d21728e5fadfc8fd93e378c4c89 | [
"Apache-2.0"
] | null | null | null | app/constants.py | nashahzad/jira-sprint-analytics | a42705e3e2055d21728e5fadfc8fd93e378c4c89 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
class SprintStates(Enum):
ACTIVE = "ACTIVE"
CLOSED = "CLOSED"
FUTURE = "FUTURE"
class IssueTypeEnum(Enum):
STORY = "Story"
TASK = "Task"
BUG = "Bug"
REPORT_COLUMNS = [
"Sprint",
"Commitment",
"Completed",
"4-Sprint Average",
"Scope Change",
"Planned Capacity",
"Capacity Achieved",
"4-Sprint Capacity Achieved",
"4-Sprint Smoothed Average",
"Unpointed Issues",
"Unpointed Stories",
"Unpointed Tasks",
"Bug Tickets",
"Priority Points",
"Non-Priority Points",
]
DEFAULT_STORY_POINTS_FIELD_NAME = "customfield_10591"
| 17.914286 | 53 | 0.629984 |
13c082c48fb4689c0dff47a8a54be20e79da1fd9 | 1,809 | py | Python | strawberryfields/circuitspecs/fock.py | egbQuantum/strawberryfields | 674e4fe2de5e5dd791a77f1cd219009120dcbbbf | [
"Apache-2.0"
] | null | null | null | strawberryfields/circuitspecs/fock.py | egbQuantum/strawberryfields | 674e4fe2de5e5dd791a77f1cd219009120dcbbbf | [
"Apache-2.0"
] | 5 | 2020-09-26T01:27:24.000Z | 2022-02-10T02:13:49.000Z | strawberryfields/circuitspecs/fock.py | egbQuantum/strawberryfields | 674e4fe2de5e5dd791a77f1cd219009120dcbbbf | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Circuit specifications for the Fock simulator backend."""
from .circuit_specs import CircuitSpecs
class FockSpecs(CircuitSpecs):
"""Circuit specifications for the Fock backend."""
short_name = 'fock'
modes = None
local = True
remote = True
interactive = True
primitives = {
# meta operations
"All",
"_New_modes",
"_Delete",
# state preparations
"Vacuum",
"Coherent",
"Squeezed",
"DisplacedSqueezed",
"Thermal",
"Fock",
"Catstate",
"Ket",
"DensityMatrix",
# measurements
"MeasureFock",
"MeasureHomodyne",
# channels
"LossChannel",
# single mode gates
"Dgate",
"Xgate",
"Zgate",
"Sgate",
"Rgate",
"Vgate",
"Kgate",
"Fouriergate",
"BSgate",
"CKgate",
}
decompositions = {
"Interferometer": {},
"GraphEmbed": {},
"BipartiteGraphEmbed": {},
"GaussianTransform": {},
"Gaussian": {},
"Pgate": {},
"S2gate": {},
"CXgate": {},
"CZgate": {},
"MZgate": {},
}
| 25.125 | 74 | 0.572692 |
7b7538852c508cf21af68c1077f003cbc7b28433 | 2,974 | py | Python | app.py | mehulmakwana97/react-crud | 95d23d05ba0e80fc6724c851a8b9a9eafdd26891 | [
"Apache-2.0"
] | null | null | null | app.py | mehulmakwana97/react-crud | 95d23d05ba0e80fc6724c851a8b9a9eafdd26891 | [
"Apache-2.0"
] | null | null | null | app.py | mehulmakwana97/react-crud | 95d23d05ba0e80fc6724c851a8b9a9eafdd26891 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env /usr/local/bin/python2
from flask import Flask, render_template, jsonify, make_response, request, current_app
from datetime import timedelta
from functools import update_wrapper
app = Flask(__name__, static_folder='build', static_url_path='')
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/peoples', methods=['GET', 'OPTIONS'])
def get_persons():
return jsonify({
"status" : "success",
"output" : [
{
"id": "1",
"name" : "Chintan Kotadia",
"username" :"chintankotadia13@gmail.com",
"company" : "ish",
"company_role" :"html/css coder",
"phone" :"49874646",
"notes" :"My notes",
"mobile" :"9497654654"
},
{
"id": "2",
"name" : "Marcus Hodgson",
"username" :"marcus@ish.com",
"company" : "ish",
"company_role" :"developer",
"phone" :"987897544",
"notes" :"Not available",
"mobile" :"9797464876"
},
{
"id": "3",
"name" : "Stephen McIlwaine",
"username" :"Stephen@ish.com",
"company" : "ish",
"company_role" :"java developer",
"phone" :"5464979646",
"notes" :"Busy",
"mobile" :"9797464797"
},
{
"id": "4",
"name" : "Aristedes Maniatis",
"username" :"ari@ish.com.au",
"company" : "ish",
"company_role" :"developer",
"phone" :"554879645",
"notes" :"employees scrum",
"mobile" :"9849476469"
}
]
})
@app.route('/data/people/view/<int:id>')
def view_person(id):
return jsonify(
{
"status": "success",
"output": {
"people": {
"id": id,
"name": "Chintan Kotadia",
"username":"chintankotadia13@gmail.com",
"company": "ish",
"company_role":"html/css coder",
"phone":"49874646",
"notes":"My notes",
"mobile":"9497654654"
}
}
}
)
@app.route('/data/people/update/<int:id>')
def update_person(id):
return jsonify(
{
"status": "success",
"output": {
"message": "People updated successfully",
"people": {
"id": "1",
"name": "Chintan Kotadia",
"username":"chintankotadia13@gmail.com",
"company": "ish",
"company_role":"html/css coder",
"phone":"49874646",
"notes":"My notes",
"mobile":"9497654654"
}
}
}
)
@app.route('/data/people/delete/<int:id>')
def delete_person(id):
return jsonify( { 'status':"success",
'output':{
'message':"People deleted successfully"
}})
@app.route('/data/company/get/<search>')
def search_company(search):
return jsonify(
{
"status": "success",
"output": {
"message": "Companies fetched successfully",
"output": [
{
"id": "1",
"name": "ish"
},
{
"id": "2",
"name": "weasydney"
}
]
}
}
)
if __name__ == '__main__':
app.run(debug=True)
| 22.530303 | 86 | 0.528917 |
1c1791f47d749fa394e08d5d6b0755274f9c57c1 | 1,989 | py | Python | test/test_storage_volume_utilization_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | test/test_storage_volume_utilization_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | test/test_storage_volume_utilization_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.storage_volume_utilization_ref import StorageVolumeUtilizationRef # noqa: E501
from intersight.rest import ApiException
class TestStorageVolumeUtilizationRef(unittest.TestCase):
"""StorageVolumeUtilizationRef unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStorageVolumeUtilizationRef(self):
"""Test StorageVolumeUtilizationRef"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.storage_volume_utilization_ref.StorageVolumeUtilizationRef() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 52.342105 | 1,052 | 0.788336 |
cc105445751504aeb7dc13364485212f86e1d24b | 319 | py | Python | src/kuas_api/kuas/__init__.py | JohnSounder/AP-API | 9e1fa4d7de1d844b453cd6b86faefba8bf051c3c | [
"MIT"
] | 1 | 2015-07-19T04:18:37.000Z | 2015-07-19T04:18:37.000Z | src/kuas_api/kuas/__init__.py | JohnSounder/AP-API | 9e1fa4d7de1d844b453cd6b86faefba8bf051c3c | [
"MIT"
] | 18 | 2015-09-05T11:14:17.000Z | 2015-10-18T08:09:33.000Z | src/kuas_api/kuas/__init__.py | JohnSounder/AP-API | 9e1fa4d7de1d844b453cd6b86faefba8bf051c3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module provide three kuas online system api.
AP system, bus system, and leave system.
Module AP
=========
.. automodule:: kuas_api.kuas.ap
:members:
Module Bus
==========
.. automodule:: kuas_api.kuas.bus
:members:
"""
__license__ = "MIT"
__docformat__ = "reStructuredText"
| 15.95 | 52 | 0.642633 |
183d1a37e1b08cfab545f0373942ac143972d479 | 34,860 | py | Python | test/unit/test_parser.py | sethwoodworth/dbt | 68babfb4bbd016e198bb09ac8dfd5dc71760ef7e | [
"Apache-2.0"
] | 1 | 2020-10-25T00:13:50.000Z | 2020-10-25T00:13:50.000Z | test/unit/test_parser.py | azhard/dbt | 9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55 | [
"Apache-2.0"
] | null | null | null | test/unit/test_parser.py | azhard/dbt | 9cd7cbc9e35e5a7c8c4f17a3d113263f4421ab55 | [
"Apache-2.0"
] | null | null | null | import unittest
from unittest import mock
import os
import yaml
import dbt.flags
import dbt.parser
from dbt.exceptions import CompilationException
from dbt.parser import (
ModelParser, MacroParser, DataTestParser, SchemaParser, ParseResult,
SnapshotParser, AnalysisParser
)
from dbt.parser.schemas import (
TestablePatchParser, SourceParser, AnalysisPatchParser, MacroPatchParser
)
from dbt.parser.search import FileBlock
from dbt.parser.schema_test_builders import YamlBlock
from dbt.parser.manifest import process_docs, process_sources, process_refs
from dbt.node_types import NodeType
from dbt.contracts.graph.manifest import (
Manifest, FilePath, SourceFile, FileHash
)
from dbt.contracts.graph.model_config import (
NodeConfig, TestConfig, TimestampSnapshotConfig, SnapshotStrategy,
)
from dbt.contracts.graph.parsed import (
ParsedModelNode, ParsedMacro, ParsedNodePatch, ParsedSourceDefinition,
DependsOn, ColumnInfo, ParsedDataTestNode, ParsedSnapshotNode,
ParsedAnalysisNode, ParsedDocumentation, UnpatchedSourceDefinition
)
from dbt.contracts.graph.unparsed import (
FreshnessThreshold, ExternalTable, Docs
)
from .utils import config_from_parts_or_dicts, normalize, generate_name_macros
def get_abs_os_path(unix_path):
return normalize(os.path.abspath(unix_path))
class BaseParserTest(unittest.TestCase):
maxDiff = None
def _generate_macros(self):
name_sql = {}
for component in ('database', 'schema', 'alias'):
if component == 'alias':
source = 'node.name'
else:
source = f'target.{component}'
name = f'generate_{component}_name'
sql = f'{{% macro {name}(value, node) %}} {{% if value %}} {{{{ value }}}} {{% else %}} {{{{ {source} }}}} {{% endif %}} {{% endmacro %}}'
name_sql[name] = sql
for name, sql in name_sql.items():
pm = ParsedMacro(
name=name,
resource_type=NodeType.Macro,
unique_id=f'macro.root.{name}',
package_name='root',
original_file_path=normalize('macros/macro.sql'),
root_path=get_abs_os_path('./dbt_modules/root'),
path=normalize('macros/macro.sql'),
macro_sql=sql,
)
yield pm
def setUp(self):
dbt.flags.STRICT_MODE = True
dbt.flags.WARN_ERROR = True
self.maxDiff = None
profile_data = {
'target': 'test',
'quoting': {},
'outputs': {
'test': {
'type': 'redshift',
'host': 'localhost',
'schema': 'analytics',
'user': 'test',
'pass': 'test',
'dbname': 'test',
'port': 1,
}
}
}
root_project = {
'name': 'root',
'version': '0.1',
'profile': 'test',
'project-root': normalize('/usr/src/app'),
}
self.root_project_config = config_from_parts_or_dicts(
project=root_project,
profile=profile_data,
cli_vars='{"test_schema_name": "foo"}'
)
snowplow_project = {
'name': 'snowplow',
'version': '0.1',
'profile': 'test',
'project-root': get_abs_os_path('./dbt_modules/snowplow'),
}
self.snowplow_project_config = config_from_parts_or_dicts(
project=snowplow_project, profile=profile_data
)
self.all_projects = {
'root': self.root_project_config,
'snowplow': self.snowplow_project_config
}
self.root_project_config.dependencies = self.all_projects
self.snowplow_project_config.dependencies = self.all_projects
self.patcher = mock.patch('dbt.context.providers.get_adapter')
self.factory = self.patcher.start()
self.parser_patcher = mock.patch('dbt.parser.base.get_adapter')
self.factory_parser = self.parser_patcher.start()
self.macro_manifest = Manifest.from_macros(
macros={m.unique_id: m for m in generate_name_macros('root')}
)
def tearDown(self):
self.parser_patcher.stop()
self.patcher.stop()
def file_block_for(self, data: str, filename: str, searched: str):
root_dir = get_abs_os_path('./dbt_modules/snowplow')
filename = normalize(filename)
path = FilePath(
searched_path=searched,
relative_path=filename,
project_root=root_dir,
)
source_file = SourceFile(
path=path,
checksum=FileHash.from_contents(data),
)
source_file.contents = data
return FileBlock(file=source_file)
def assert_has_results_length(self, results, files=1, macros=0, nodes=0,
sources=0, docs=0, patches=0, disabled=0):
self.assertEqual(len(results.files), files)
self.assertEqual(len(results.macros), macros)
self.assertEqual(len(results.nodes), nodes)
self.assertEqual(len(results.sources), sources)
self.assertEqual(len(results.docs), docs)
self.assertEqual(len(results.patches), patches)
self.assertEqual(sum(len(v) for v in results.disabled.values()), disabled)
SINGLE_TABLE_SOURCE = '''
version: 2
sources:
- name: my_source
tables:
- name: my_table
'''
SINGLE_TABLE_SOURCE_TESTS = '''
version: 2
sources:
- name: my_source
tables:
- name: my_table
description: A description of my table
columns:
- name: color
tests:
- not_null:
severity: WARN
- accepted_values:
values: ['red', 'blue', 'green']
'''
SINGLE_TABLE_MODEL_TESTS = '''
version: 2
models:
- name: my_model
description: A description of my model
columns:
- name: color
description: The color value
tests:
- not_null:
severity: WARN
- accepted_values:
values: ['red', 'blue', 'green']
- foreign_package.test_case:
arg: 100
'''
SINGLE_TABLE_SOURCE_PATCH = '''
version: 2
sources:
- name: my_source
overrides: snowplow
tables:
- name: my_table
columns:
- name: id
tests:
- not_null
- unique
'''
class SchemaParserTest(BaseParserTest):
def setUp(self):
super().setUp()
self.parser = SchemaParser(
results=ParseResult.rpc(),
project=self.snowplow_project_config,
root_project=self.root_project_config,
macro_manifest=self.macro_manifest,
)
def file_block_for(self, data, filename):
return super().file_block_for(data, filename, 'models')
def yaml_block_for(self, test_yml: str, filename: str):
file_block = self.file_block_for(data=test_yml, filename=filename)
return YamlBlock.from_file_block(
src=file_block,
data=yaml.safe_load(test_yml),
)
class SchemaParserSourceTest(SchemaParserTest):
def test__read_basic_source(self):
block = self.yaml_block_for(SINGLE_TABLE_SOURCE, 'test_one.yml')
analysis_blocks = AnalysisPatchParser(self.parser, block, 'analyses').parse()
model_blocks = TestablePatchParser(self.parser, block, 'models').parse()
source_blocks = SourceParser(self.parser, block, 'sources').parse()
macro_blocks = MacroPatchParser(self.parser, block, 'macros').parse()
self.assertEqual(len(analysis_blocks), 0)
self.assertEqual(len(model_blocks), 0)
self.assertEqual(len(source_blocks), 0)
self.assertEqual(len(macro_blocks), 0)
self.assertEqual(len(list(self.parser.results.patches)), 0)
self.assertEqual(len(list(self.parser.results.nodes)), 0)
results = list(self.parser.results.sources.values())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].source.name, 'my_source')
self.assertEqual(results[0].table.name, 'my_table')
self.assertEqual(results[0].table.description, '')
self.assertEqual(len(results[0].table.columns), 0)
def test__parse_basic_source(self):
block = self.file_block_for(SINGLE_TABLE_SOURCE, 'test_one.yml')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, sources=1)
src = list(self.parser.results.sources.values())[0]
assert isinstance(src, UnpatchedSourceDefinition)
assert src.package_name == 'snowplow'
assert src.source.name == 'my_source'
assert src.table.name == 'my_table'
assert src.resource_type == NodeType.Source
assert src.fqn == ['snowplow', 'my_source', 'my_table']
def test__read_basic_source_tests(self):
block = self.yaml_block_for(SINGLE_TABLE_SOURCE_TESTS, 'test_one.yml')
analysis_tests = AnalysisPatchParser(self.parser, block, 'analyses').parse()
model_tests = TestablePatchParser(self.parser, block, 'models').parse()
source_tests = SourceParser(self.parser, block, 'sources').parse()
macro_tests = MacroPatchParser(self.parser, block, 'macros').parse()
self.assertEqual(len(analysis_tests), 0)
self.assertEqual(len(model_tests), 0)
self.assertEqual(len(source_tests), 0)
self.assertEqual(len(macro_tests), 0)
self.assertEqual(len(list(self.parser.results.nodes)), 0)
self.assertEqual(len(list(self.parser.results.patches)), 0)
self.assertEqual(len(list(self.parser.results.source_patches)), 0)
results = list(self.parser.results.sources.values())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].source.name, 'my_source')
self.assertEqual(results[0].table.name, 'my_table')
self.assertEqual(results[0].table.description, 'A description of my table')
self.assertEqual(len(results[0].table.columns), 1)
def test__parse_basic_source_tests(self):
block = self.file_block_for(SINGLE_TABLE_SOURCE_TESTS, 'test_one.yml')
self.parser.parse_file(block)
self.assertEqual(len(self.parser.results.nodes), 0)
self.assertEqual(len(self.parser.results.sources), 1)
self.assertEqual(len(self.parser.results.patches), 0)
src = list(self.parser.results.sources.values())[0]
self.assertEqual(src.source.name, 'my_source')
self.assertEqual(src.source.schema, None)
self.assertEqual(src.table.name, 'my_table')
self.assertEqual(src.table.description, 'A description of my table')
tests = [
self.parser.parse_source_test(src, test, col)
for test, col in src.get_tests()
]
tests.sort(key=lambda n: n.unique_id)
self.assertEqual(tests[0].config.severity, 'ERROR')
self.assertEqual(tests[0].tags, ['schema'])
self.assertEqual(tests[0].sources, [['my_source', 'my_table']])
self.assertEqual(tests[0].column_name, 'color')
self.assertEqual(tests[0].fqn, ['snowplow', 'schema_test', tests[0].name])
self.assertEqual(tests[1].config.severity, 'WARN')
self.assertEqual(tests[1].tags, ['schema'])
self.assertEqual(tests[1].sources, [['my_source', 'my_table']])
self.assertEqual(tests[1].column_name, 'color')
self.assertEqual(tests[1].fqn, ['snowplow', 'schema_test', tests[1].name])
path = get_abs_os_path('./dbt_modules/snowplow/models/test_one.yml')
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].nodes, [])
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].sources,
['source.snowplow.my_source.my_table'])
self.assertEqual(self.parser.results.files[path].source_patches, [])
def test__read_source_patch(self):
block = self.yaml_block_for(SINGLE_TABLE_SOURCE_PATCH, 'test_one.yml')
analysis_tests = AnalysisPatchParser(self.parser, block, 'analyses').parse()
model_tests = TestablePatchParser(self.parser, block, 'models').parse()
source_tests = SourceParser(self.parser, block, 'sources').parse()
macro_tests = MacroPatchParser(self.parser, block, 'macros').parse()
self.assertEqual(len(analysis_tests), 0)
self.assertEqual(len(model_tests), 0)
self.assertEqual(len(source_tests), 0)
self.assertEqual(len(macro_tests), 0)
self.assertEqual(len(list(self.parser.results.nodes)), 0)
self.assertEqual(len(list(self.parser.results.patches)), 0)
self.assertEqual(len(list(self.parser.results.sources)), 0)
results = list(self.parser.results.source_patches.values())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].name, 'my_source')
self.assertEqual(results[0].overrides, 'snowplow')
self.assertIsNone(results[0].description)
self.assertEqual(len(results[0].tables), 1)
table = results[0].tables[0]
self.assertEqual(table.name, 'my_table')
self.assertIsNone(table.description)
self.assertEqual(len(table.columns), 1)
self.assertEqual(len(table.columns[0].tests), 2)
class SchemaParserModelsTest(SchemaParserTest):
def test__read_basic_model_tests(self):
block = self.yaml_block_for(SINGLE_TABLE_MODEL_TESTS, 'test_one.yml')
self.parser.parse_file(block)
self.assertEqual(len(list(self.parser.results.patches)), 1)
self.assertEqual(len(list(self.parser.results.sources)), 0)
self.assertEqual(len(list(self.parser.results.nodes)), 3)
def test__parse_basic_model_tests(self):
block = self.file_block_for(SINGLE_TABLE_MODEL_TESTS, 'test_one.yml')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, patches=1, nodes=3)
patch = list(self.parser.results.patches.values())[0]
self.assertEqual(len(patch.columns), 1)
self.assertEqual(patch.name, 'my_model')
self.assertEqual(patch.description, 'A description of my model')
expected_patch = ParsedNodePatch(
name='my_model',
description='A description of my model',
columns={'color': ColumnInfo(name='color', description='The color value')},
original_file_path=normalize('models/test_one.yml'),
meta={},
yaml_key='models',
package_name='snowplow',
docs=Docs(show=True),
)
self.assertEqual(patch, expected_patch)
tests = sorted(self.parser.results.nodes.values(), key=lambda n: n.unique_id)
self.assertEqual(tests[0].config.severity, 'ERROR')
self.assertEqual(tests[0].tags, ['schema'])
self.assertEqual(tests[0].refs, [['my_model']])
self.assertEqual(tests[0].column_name, 'color')
self.assertEqual(tests[0].package_name, 'snowplow')
self.assertTrue(tests[0].name.startswith('accepted_values_'))
self.assertEqual(tests[0].fqn, ['snowplow', 'schema_test', tests[0].name])
self.assertEqual(tests[0].unique_id.split('.'), ['test', 'snowplow', tests[0].name])
self.assertEqual(tests[0].test_metadata.name, 'accepted_values')
self.assertIsNone(tests[0].test_metadata.namespace)
self.assertEqual(
tests[0].test_metadata.kwargs,
{
'column_name': 'color',
'model': "{{ ref('my_model') }}",
'values': ['red', 'blue', 'green'],
}
)
# foreign packages are a bit weird, they include the macro package
# name in the test name
self.assertEqual(tests[1].config.severity, 'ERROR')
self.assertEqual(tests[1].tags, ['schema'])
self.assertEqual(tests[1].refs, [['my_model']])
self.assertEqual(tests[1].column_name, 'color')
self.assertEqual(tests[1].column_name, 'color')
self.assertEqual(tests[1].fqn, ['snowplow', 'schema_test', tests[1].name])
self.assertTrue(tests[1].name.startswith('foreign_package_test_case_'))
self.assertEqual(tests[1].package_name, 'snowplow')
self.assertEqual(tests[1].unique_id.split('.'), ['test', 'snowplow', tests[1].name])
self.assertEqual(tests[1].test_metadata.name, 'test_case')
self.assertEqual(tests[1].test_metadata.namespace, 'foreign_package')
self.assertEqual(
tests[1].test_metadata.kwargs,
{
'column_name': 'color',
'model': "{{ ref('my_model') }}",
'arg': 100,
},
)
self.assertEqual(tests[2].config.severity, 'WARN')
self.assertEqual(tests[2].tags, ['schema'])
self.assertEqual(tests[2].refs, [['my_model']])
self.assertEqual(tests[2].column_name, 'color')
self.assertEqual(tests[2].package_name, 'snowplow')
self.assertTrue(tests[2].name.startswith('not_null_'))
self.assertEqual(tests[2].fqn, ['snowplow', 'schema_test', tests[2].name])
self.assertEqual(tests[2].unique_id.split('.'), ['test', 'snowplow', tests[2].name])
self.assertEqual(tests[2].test_metadata.name, 'not_null')
self.assertIsNone(tests[2].test_metadata.namespace)
self.assertEqual(
tests[2].test_metadata.kwargs,
{
'column_name': 'color',
'model': "{{ ref('my_model') }}",
},
)
path = get_abs_os_path('./dbt_modules/snowplow/models/test_one.yml')
self.assertIn(path, self.parser.results.files)
self.assertEqual(sorted(self.parser.results.files[path].nodes),
[t.unique_id for t in tests])
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].patches, ['my_model'])
class ModelParserTest(BaseParserTest):
def setUp(self):
super().setUp()
self.parser = ModelParser(
results=ParseResult.rpc(),
project=self.snowplow_project_config,
root_project=self.root_project_config,
macro_manifest=self.macro_manifest,
)
def file_block_for(self, data, filename):
return super().file_block_for(data, filename, 'models')
def test_basic(self):
raw_sql = '{{ config(materialized="table") }}select 1 as id'
block = self.file_block_for(raw_sql, 'nested/model_1.sql')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, nodes=1)
node = list(self.parser.results.nodes.values())[0]
expected = ParsedModelNode(
alias='model_1',
name='model_1',
database='test',
schema='analytics',
resource_type=NodeType.Model,
unique_id='model.snowplow.model_1',
fqn=['snowplow', 'nested', 'model_1'],
package_name='snowplow',
original_file_path=normalize('models/nested/model_1.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
config=NodeConfig(materialized='table'),
path=normalize('nested/model_1.sql'),
raw_sql=raw_sql,
)
self.assertEqual(node, expected)
path = get_abs_os_path('./dbt_modules/snowplow/models/nested/model_1.sql')
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].nodes, ['model.snowplow.model_1'])
def test_parse_error(self):
block = self.file_block_for('{{ SYNTAX ERROR }}', 'nested/model_1.sql')
with self.assertRaises(CompilationException):
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, files=0)
class SnapshotParserTest(BaseParserTest):
def setUp(self):
super().setUp()
self.parser = SnapshotParser(
results=ParseResult.rpc(),
project=self.snowplow_project_config,
root_project=self.root_project_config,
macro_manifest=self.macro_manifest,
)
def file_block_for(self, data, filename):
return super().file_block_for(data, filename, 'snapshots')
def test_parse_error(self):
block = self.file_block_for('{% snapshot foo %}select 1 as id{%snapshot bar %}{% endsnapshot %}', 'nested/snap_1.sql')
with self.assertRaises(CompilationException):
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, files=0)
def test_single_block(self):
raw_sql = '''{{
config(unique_key="id", target_schema="analytics",
target_database="dbt", strategy="timestamp",
updated_at="last_update")
}}
select 1 as id, now() as last_update'''
full_file = '''
{{% snapshot foo %}}{}{{% endsnapshot %}}
'''.format(raw_sql)
block = self.file_block_for(full_file, 'nested/snap_1.sql')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, nodes=1)
node = list(self.parser.results.nodes.values())[0]
expected = ParsedSnapshotNode(
alias='foo',
name='foo',
# the `database` entry is overrridden by the target_database config
database='dbt',
schema='analytics',
resource_type=NodeType.Snapshot,
unique_id='snapshot.snowplow.foo',
fqn=['snowplow', 'nested', 'snap_1', 'foo'],
package_name='snowplow',
original_file_path=normalize('snapshots/nested/snap_1.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
config=TimestampSnapshotConfig(
strategy=SnapshotStrategy.Timestamp,
updated_at='last_update',
target_database='dbt',
target_schema='analytics',
unique_key='id',
materialized='snapshot',
),
path=normalize('nested/snap_1.sql'),
raw_sql=raw_sql,
)
self.assertEqual(node, expected)
path = get_abs_os_path('./dbt_modules/snowplow/snapshots/nested/snap_1.sql')
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].nodes, ['snapshot.snowplow.foo'])
def test_multi_block(self):
raw_1 = '''
{{
config(unique_key="id", target_schema="analytics",
target_database="dbt", strategy="timestamp",
updated_at="last_update")
}}
select 1 as id, now() as last_update
'''
raw_2 = '''
{{
config(unique_key="id", target_schema="analytics",
target_database="dbt", strategy="timestamp",
updated_at="last_update")
}}
select 2 as id, now() as last_update
'''
full_file = '''
{{% snapshot foo %}}{}{{% endsnapshot %}}
{{% snapshot bar %}}{}{{% endsnapshot %}}
'''.format(raw_1, raw_2)
block = self.file_block_for(full_file, 'nested/snap_1.sql')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, nodes=2)
nodes = sorted(self.parser.results.nodes.values(), key=lambda n: n.name)
expect_foo = ParsedSnapshotNode(
alias='foo',
name='foo',
database='dbt',
schema='analytics',
resource_type=NodeType.Snapshot,
unique_id='snapshot.snowplow.foo',
fqn=['snowplow', 'nested', 'snap_1', 'foo'],
package_name='snowplow',
original_file_path=normalize('snapshots/nested/snap_1.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
config=TimestampSnapshotConfig(
strategy=SnapshotStrategy.Timestamp,
updated_at='last_update',
target_database='dbt',
target_schema='analytics',
unique_key='id',
materialized='snapshot',
),
path=normalize('nested/snap_1.sql'),
raw_sql=raw_1,
)
expect_bar = ParsedSnapshotNode(
alias='bar',
name='bar',
database='dbt',
schema='analytics',
resource_type=NodeType.Snapshot,
unique_id='snapshot.snowplow.bar',
fqn=['snowplow', 'nested', 'snap_1', 'bar'],
package_name='snowplow',
original_file_path=normalize('snapshots/nested/snap_1.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
config=TimestampSnapshotConfig(
strategy=SnapshotStrategy.Timestamp,
updated_at='last_update',
target_database='dbt',
target_schema='analytics',
unique_key='id',
materialized='snapshot',
),
path=normalize('nested/snap_1.sql'),
raw_sql=raw_2,
)
self.assertEqual(nodes[0], expect_bar)
self.assertEqual(nodes[1], expect_foo)
path = get_abs_os_path('./dbt_modules/snowplow/snapshots/nested/snap_1.sql')
self.assertIn(path, self.parser.results.files)
self.assertEqual(sorted(self.parser.results.files[path].nodes),
['snapshot.snowplow.bar', 'snapshot.snowplow.foo'])
class MacroParserTest(BaseParserTest):
def setUp(self):
super().setUp()
self.parser = MacroParser(
results=ParseResult.rpc(),
project=self.snowplow_project_config,
)
def file_block_for(self, data, filename):
return super().file_block_for(data, filename, 'macros')
def test_single_block(self):
raw_sql = '{% macro foo(a, b) %}a ~ b{% endmacro %}'
block = self.file_block_for(raw_sql, 'macro.sql')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, macros=1)
macro = list(self.parser.results.macros.values())[0]
expected = ParsedMacro(
name='foo',
resource_type=NodeType.Macro,
unique_id='macro.snowplow.foo',
package_name='snowplow',
original_file_path=normalize('macros/macro.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
path=normalize('macros/macro.sql'),
macro_sql=raw_sql,
)
self.assertEqual(macro, expected)
path = get_abs_os_path('./dbt_modules/snowplow/macros/macro.sql')
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].macros, ['macro.snowplow.foo'])
def test_multiple_blocks(self):
raw_sql = '{% macro foo(a, b) %}a ~ b{% endmacro %}\n{% macro bar(c, d) %}c + d{% endmacro %}'
block = self.file_block_for(raw_sql, 'macro.sql')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, macros=2)
macros = sorted(self.parser.results.macros.values(), key=lambda m: m.name)
expected_bar = ParsedMacro(
name='bar',
resource_type=NodeType.Macro,
unique_id='macro.snowplow.bar',
package_name='snowplow',
original_file_path=normalize('macros/macro.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
path=normalize('macros/macro.sql'),
macro_sql='{% macro bar(c, d) %}c + d{% endmacro %}',
)
expected_foo = ParsedMacro(
name='foo',
resource_type=NodeType.Macro,
unique_id='macro.snowplow.foo',
package_name='snowplow',
original_file_path=normalize('macros/macro.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
path=normalize('macros/macro.sql'),
macro_sql='{% macro foo(a, b) %}a ~ b{% endmacro %}',
)
self.assertEqual(macros, [expected_bar, expected_foo])
path = get_abs_os_path('./dbt_modules/snowplow/macros/macro.sql')
self.assertIn(path, self.parser.results.files)
self.assertEqual(
sorted(self.parser.results.files[path].macros),
['macro.snowplow.bar', 'macro.snowplow.foo'],
)
class DataTestParserTest(BaseParserTest):
def setUp(self):
super().setUp()
self.parser = DataTestParser(
results=ParseResult.rpc(),
project=self.snowplow_project_config,
root_project=self.root_project_config,
macro_manifest=self.macro_manifest,
)
def file_block_for(self, data, filename):
return super().file_block_for(data, filename, 'tests')
def test_basic(self):
raw_sql = 'select * from {{ ref("blah") }} limit 0'
block = self.file_block_for(raw_sql, 'test_1.sql')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, nodes=1)
node = list(self.parser.results.nodes.values())[0]
expected = ParsedDataTestNode(
alias='test_1',
name='test_1',
database='test',
schema='analytics',
resource_type=NodeType.Test,
unique_id='test.snowplow.test_1',
fqn=['snowplow', 'data_test', 'test_1'],
package_name='snowplow',
original_file_path=normalize('tests/test_1.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
refs=[['blah']],
config=TestConfig(severity='ERROR'),
tags=['data'],
path=normalize('data_test/test_1.sql'),
raw_sql=raw_sql,
)
self.assertEqual(node, expected)
path = get_abs_os_path('./dbt_modules/snowplow/tests/test_1.sql')
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].nodes, ['test.snowplow.test_1'])
class AnalysisParserTest(BaseParserTest):
def setUp(self):
super().setUp()
self.parser = AnalysisParser(
results=ParseResult.rpc(),
project=self.snowplow_project_config,
root_project=self.root_project_config,
macro_manifest=self.macro_manifest,
)
def file_block_for(self, data, filename):
return super().file_block_for(data, filename, 'analyses')
def test_basic(self):
raw_sql = 'select 1 as id'
block = self.file_block_for(raw_sql, 'nested/analysis_1.sql')
self.parser.parse_file(block)
self.assert_has_results_length(self.parser.results, nodes=1)
node = list(self.parser.results.nodes.values())[0]
expected = ParsedAnalysisNode(
alias='analysis_1',
name='analysis_1',
database='test',
schema='analytics',
resource_type=NodeType.Analysis,
unique_id='analysis.snowplow.analysis_1',
fqn=['snowplow', 'analysis', 'nested', 'analysis_1'],
package_name='snowplow',
original_file_path=normalize('analyses/nested/analysis_1.sql'),
root_path=get_abs_os_path('./dbt_modules/snowplow'),
depends_on=DependsOn(),
config=NodeConfig(),
path=normalize('analysis/nested/analysis_1.sql'),
raw_sql=raw_sql,
)
self.assertEqual(node, expected)
path = get_abs_os_path('./dbt_modules/snowplow/analyses/nested/analysis_1.sql')
self.assertIn(path, self.parser.results.files)
self.assertEqual(self.parser.results.files[path].nodes, ['analysis.snowplow.analysis_1'])
class ProcessingTest(BaseParserTest):
def setUp(self):
super().setUp()
x_depends_on = mock.MagicMock()
y_depends_on = mock.MagicMock()
x_uid = 'model.project.x'
y_uid = 'model.otherproject.y'
src_uid = 'source.thirdproject.src.tbl'
self.x_node = mock.MagicMock(
__class__=ParsedModelNode,
package_name='project',
search_name='x',
config=mock.MagicMock(enabled=True),
refs=[],
sources=[['src', 'tbl']],
unique_id=x_uid,
resource_type=NodeType.Model,
depends_on=x_depends_on,
description='other_project: {{ doc("otherproject", "my_doc") }}',
)
self.y_node = mock.MagicMock(
__class__=ParsedModelNode,
package_name='otherproject',
search_name='y',
config=mock.MagicMock(enabled=True),
refs=[['x']],
sources=[],
unique_id=y_uid,
resource_type=NodeType.Model,
depends_on=y_depends_on,
description='{{ doc("my_doc") }}',
)
self.src_node = mock.MagicMock(
__class__=ParsedSourceDefinition,
package_name='project',
search_name='src.tbl',
config=mock.MagicMock(enabled=True),
resource_type=NodeType.Source,
unique_id=src_uid,
)
nodes = {
x_uid: self.x_node,
y_uid: self.y_node,
}
sources = {
src_uid: self.src_node,
}
docs = {
'otherproject.my_doc': mock.MagicMock(
__class__=ParsedDocumentation,
resource_type=NodeType.Documentation,
search_name='my_doc',
package_name='otherproject',
block_contents='some docs',
)
}
self.manifest = Manifest(
nodes=nodes, sources=sources, macros={}, docs=docs, disabled=[], files={}, generated_at=mock.MagicMock()
)
def test_process_docs(self):
process_docs(self.manifest, self.root_project_config)
self.assertEqual(self.x_node.description, 'other_project: some docs')
self.assertEqual(self.y_node.description, 'some docs')
def test_process_sources(self):
process_sources(self.manifest, 'project')
self.x_node.depends_on.nodes.append.assert_called_once_with('source.thirdproject.src.tbl')
def test_process_refs(self):
process_refs(self.manifest, 'project')
self.y_node.depends_on.nodes.append.assert_called_once_with('model.project.x')
| 40.487805 | 150 | 0.611044 |
6355544d3660c7c7411af12bc1ea0b5d84b39cbb | 2,700 | py | Python | demo_train.py | shubham0704/MeshCNN | 0085e06ab6b06402344130af4e25f0038918bb73 | [
"MIT"
] | null | null | null | demo_train.py | shubham0704/MeshCNN | 0085e06ab6b06402344130af4e25f0038918bb73 | [
"MIT"
] | null | null | null | demo_train.py | shubham0704/MeshCNN | 0085e06ab6b06402344130af4e25f0038918bb73 | [
"MIT"
] | null | null | null | import time
from options.demo_train_options import TrainOptions
from data import DataLoader
from models import create_model
from util.writer import Writer
from test import run_test
import pdb
from torch.profiler import profile, record_function, ProfilerActivity
if __name__ == '__main__':
opt = TrainOptions().parse()
dataset = DataLoader(opt)
dataset_size = len(dataset)
print('#training meshes = %d' % dataset_size)
# pdb.set_trace()
model = create_model(opt)
# for i, data in enumerate(dataset):
# model.set_input(data)
writer = Writer(opt)
total_steps = 0
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
iter_data_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset):
iter_start_time = time.time()
if total_steps % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_steps += opt.batch_size
epoch_iter += opt.batch_size
with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
profile_memory=True, record_shapes=True
) as prof:
model.set_input(data)
model.optimize_parameters()
print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
print(prof.key_averages(group_by_stack_n=5).table(sort_by="self_cuda_time_total", row_limit=2))
if total_steps % opt.print_freq == 0:
loss = model.loss
t = (time.time() - iter_start_time) / opt.batch_size
writer.print_current_losses(epoch, epoch_iter, loss, t, t_data)
writer.plot_loss(loss, epoch, epoch_iter, dataset_size)
if i % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save_network('latest')
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save_network('latest')
model.save_network(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
if opt.verbose_plot:
writer.plot_model_wts(model, epoch)
if epoch % opt.run_test_freq == 0:
acc = run_test(epoch)
writer.plot_acc(acc, epoch)
writer.close()
| 36.486486 | 107 | 0.612593 |
8306ee39baa9c41d5b790a15dc36c675b8aa992c | 1,461 | py | Python | linkpath.py | riceissa/ea-forum-reader | c340db63705ee2eb1dc64281fd6d2701451372b5 | [
"CC0-1.0"
] | 8 | 2018-11-10T19:52:55.000Z | 2022-01-19T20:43:15.000Z | linkpath.py | riceissa/ea-forum-reader | c340db63705ee2eb1dc64281fd6d2701451372b5 | [
"CC0-1.0"
] | 40 | 2018-11-23T22:19:05.000Z | 2021-08-03T17:02:33.000Z | linkpath.py | riceissa/ea-forum-reader | c340db63705ee2eb1dc64281fd6d2701451372b5 | [
"CC0-1.0"
] | 3 | 2018-11-24T06:04:28.000Z | 2020-05-23T09:28:40.000Z | #!/usr/bin/env python3
import config
PATH_STYLE = config.PATH_STYLE
def posts(postid, postslug="", display_format="html"):
if postid is None:
postid = ""
if PATH_STYLE == "localhost":
if display_format == "html":
return "./posts.php?id=" + postid
else:
return "./posts.php?id=" + postid + "&format=" + display_format
else:
if display_format == "html":
return "/posts/" + postid + "/" + postslug
else:
return "/posts/" + postid + "/" + postslug + "?format=" + display_format
def users(userslug, display_format="html"):
if PATH_STYLE == "localhost":
return "./users.php?id=" + userslug + ("&format=" + display_format if display_format != "html" else "")
else:
return "/users/" + userslug + ("?format=" + display_format if display_format != "html" else "")
def userlist(sort="karma", display_format="html"):
if PATH_STYLE == "localhost":
if display_format != "html":
return "./userlist.php?sort=" + sort + "&format=" + display_format
else:
return "./userlist.php?sort=" + sort
else:
if display_format != "html":
return "/userlist?sort=" + sort + "&format=" + display_format
else:
return "/userlist?sort=" + sort
def search():
if PATH_STYLE == "localhost":
return "./search.php"
else:
return "/search.php"
| 32.466667 | 115 | 0.569473 |
95a2660524516838445794f92b484267e4d12378 | 2,570 | py | Python | evodcinv/_io/_helpers.py | keurfonluu/evodcinv | d7059ebdbdea00a1819dfdcdd5820387c72d0125 | [
"BSD-3-Clause"
] | 9 | 2021-12-11T09:48:33.000Z | 2022-03-20T10:32:25.000Z | evodcinv/_io/_helpers.py | keurfonluu/evodcinv | d7059ebdbdea00a1819dfdcdd5820387c72d0125 | [
"BSD-3-Clause"
] | 2 | 2021-12-13T00:14:24.000Z | 2021-12-16T09:11:11.000Z | evodcinv/_io/_helpers.py | keurfonluu/evodcinv | d7059ebdbdea00a1819dfdcdd5820387c72d0125 | [
"BSD-3-Clause"
] | 2 | 2021-12-21T07:36:39.000Z | 2022-02-25T13:12:52.000Z | import os
_extension_to_filetype = {}
_reader_map = {}
_writer_map = {}
def register(file_format, extensions, reader, writer=None):
"""
Register a new input format.
Parameters
----------
file_format : str
File format to register.
extensions : array_like
List of extensions to associate to the new format.
reader : callable
Read fumction.
writer : callable or None, optional, default None
Write function.
"""
for ext in extensions:
_extension_to_filetype[ext] = file_format
if reader is not None:
_reader_map[file_format] = reader
if writer is not None:
_writer_map[file_format] = writer
def read(filename, file_format=None, **kwargs):
"""
Read inversion results.
Parameters
----------
filename : str
Input file name.
file_format : str ('h5', 'json') or None, optional, default None
Input file format.
Returns
-------
:class:`evodcinv.InversionResult`
Inversion results.
"""
if not isinstance(filename, str):
raise TypeError()
if file_format is None:
file_format = filetype_from_filename(filename, _extension_to_filetype)
else:
if file_format not in _reader_map:
raise ValueError()
return _reader_map[file_format](filename, **kwargs)
def write(filename, result, file_format=None, **kwargs):
"""
Write TOUGH input file.
Parameters
----------
filename : str
Output file name.
result : :class:`evodcinv.InversionResult`
Inversion results to export.
file_format : str ('h5', 'json') or None, optional, default None
Output file format.
Other Parameters
----------------
compression_opts : int, optional, default 4
Only if ``file_format = "h5"``. Compression level for gzip compression. May be an integer from 0 to 9.
indent : int, str or None, optional, default None
Only if ``file_format = "json"``. Indent level.
"""
if not isinstance(filename, str):
raise TypeError()
if file_format is None:
file_format = filetype_from_filename(filename, _extension_to_filetype)
else:
if file_format not in _reader_map:
raise ValueError()
_writer_map[file_format](filename, result, **kwargs)
def filetype_from_filename(filename, ext_to_fmt):
"""Determine file type from its extension."""
ext = os.path.splitext(filename)[1].lower()
return ext_to_fmt[ext] if ext in ext_to_fmt.keys() else ""
| 24.951456 | 110 | 0.636576 |
31e7c706ec9c73c20a63dbaa86e441ee1db8cc8a | 4,170 | py | Python | tests/components/vera/common.py | SmarthomeNinja/core | f4b8a95205ea7d4126fc5e704da532cd8eed937e | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | tests/components/vera/common.py | SmarthomeNinja/core | f4b8a95205ea7d4126fc5e704da532cd8eed937e | [
"Apache-2.0"
] | 47 | 2020-07-23T07:14:33.000Z | 2022-03-31T06:01:46.000Z | tests/components/vera/common.py | SmarthomeNinja/core | f4b8a95205ea7d4126fc5e704da532cd8eed937e | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Common code for tests."""
from typing import Callable, Dict, NamedTuple, Tuple
import pyvera as pv
from homeassistant.components.vera.const import CONF_CONTROLLER, DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock
from tests.common import MockConfigEntry
SetupCallback = Callable[[pv.VeraController, dict], None]
class ControllerData(NamedTuple):
"""Test data about a specific vera controller."""
controller: pv.VeraController
update_callback: Callable
class ComponentData(NamedTuple):
"""Test data about the vera component."""
controller_data: ControllerData
class ControllerConfig(NamedTuple):
"""Test config for mocking a vera controller."""
config: Dict
options: Dict
config_from_file: bool
serial_number: str
devices: Tuple[pv.VeraDevice, ...]
scenes: Tuple[pv.VeraScene, ...]
setup_callback: SetupCallback
def new_simple_controller_config(
config: dict = None,
options: dict = None,
config_from_file=False,
serial_number="1111",
devices: Tuple[pv.VeraDevice, ...] = (),
scenes: Tuple[pv.VeraScene, ...] = (),
setup_callback: SetupCallback = None,
) -> ControllerConfig:
"""Create simple contorller config."""
return ControllerConfig(
config=config or {CONF_CONTROLLER: "http://127.0.0.1:123"},
options=options,
config_from_file=config_from_file,
serial_number=serial_number,
devices=devices,
scenes=scenes,
setup_callback=setup_callback,
)
class ComponentFactory:
"""Factory class."""
def __init__(self, vera_controller_class_mock):
"""Initialize the factory."""
self.vera_controller_class_mock = vera_controller_class_mock
async def configure_component(
self, hass: HomeAssistant, controller_config: ControllerConfig
) -> ComponentData:
"""Configure the component with specific mock data."""
component_config = {
**(controller_config.config or {}),
**(controller_config.options or {}),
}
controller = MagicMock(spec=pv.VeraController) # type: pv.VeraController
controller.base_url = component_config.get(CONF_CONTROLLER)
controller.register = MagicMock()
controller.start = MagicMock()
controller.stop = MagicMock()
controller.refresh_data = MagicMock()
controller.temperature_units = "C"
controller.serial_number = controller_config.serial_number
controller.get_devices = MagicMock(return_value=controller_config.devices)
controller.get_scenes = MagicMock(return_value=controller_config.scenes)
for vera_obj in controller.get_devices() + controller.get_scenes():
vera_obj.vera_controller = controller
controller.get_devices.reset_mock()
controller.get_scenes.reset_mock()
if controller_config.setup_callback:
controller_config.setup_callback(controller)
self.vera_controller_class_mock.return_value = controller
hass_config = {}
# Setup component through config file import.
if controller_config.config_from_file:
hass_config[DOMAIN] = component_config
# Setup Home Assistant.
assert await async_setup_component(hass, DOMAIN, hass_config)
await hass.async_block_till_done()
# Setup component through config flow.
if not controller_config.config_from_file:
entry = MockConfigEntry(
domain=DOMAIN, data=component_config, options={}, unique_id="12345"
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
update_callback = (
controller.register.call_args_list[0][0][1]
if controller.register.call_args_list
else None
)
return ComponentData(
controller_data=ControllerData(
controller=controller, update_callback=update_callback
)
)
| 31.590909 | 83 | 0.682974 |
5af29fabd129e5cb6d8074754e3d59018c81251b | 1,719 | py | Python | nncf/tensorflow/pruning/callbacks.py | vshampor/nncf | 4916668308f6a151794b1953fa759d0154ba16ef | [
"Apache-2.0"
] | null | null | null | nncf/tensorflow/pruning/callbacks.py | vshampor/nncf | 4916668308f6a151794b1953fa759d0154ba16ef | [
"Apache-2.0"
] | null | null | null | nncf/tensorflow/pruning/callbacks.py | vshampor/nncf | 4916668308f6a151794b1953fa759d0154ba16ef | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.statistics import NNCFStatistics
from nncf.tensorflow.callbacks.statistics_callback import StatisticsCallback
class PruningStatisticsCallback(StatisticsCallback):
"""
Callback for logging pruning compression statistics to tensorboard and stdout.
"""
def _prepare_for_tensorboard(self, stats: NNCFStatistics):
base_prefix = '2.compression/statistics'
detailed_prefix = '3.compression_details/statistics'
ms = stats.filter_pruning.model_statistics
tensorboard_stats = {
f'{base_prefix}/algo_current_pruning_level': stats.filter_pruning.current_pruning_level,
f'{base_prefix}/model_FLOPS_pruning_level': ms.flops_pruning_level,
f'{base_prefix}/model_params_pruning_level': ms.params_pruning_level,
f'{base_prefix}/model_filters_pruning_level': ms.filter_pruning_level,
}
for ls in ms.pruned_layers_summary:
layer_name, pruning_level = ls.name, ls.filter_pruning_level
tensorboard_stats[f'{detailed_prefix}/{layer_name}/pruning_level'] = pruning_level
return tensorboard_stats
| 42.975 | 100 | 0.749273 |
5e71d3b0054d9368519bff71d526a585a50693b9 | 68 | py | Python | sheetwork/__init__.py | bastienboutonnet/sheetwork | 7aa757ed12375ddd2c56502b721d91146d22b7ea | [
"MIT"
] | 9 | 2020-12-10T12:12:42.000Z | 2021-11-24T20:56:36.000Z | sheetwork/__init__.py | bastienboutonnet/sheetwork | 7aa757ed12375ddd2c56502b721d91146d22b7ea | [
"MIT"
] | 266 | 2020-04-19T10:50:19.000Z | 2022-03-14T22:12:43.000Z | sheetwork/__init__.py | bastienboutonnet/sheetwork | 7aa757ed12375ddd2c56502b721d91146d22b7ea | [
"MIT"
] | 3 | 2020-04-25T18:11:20.000Z | 2020-12-21T09:36:34.000Z | """Version module and init for sheetwork."""
__version__ = "1.0.7"
| 17 | 44 | 0.676471 |
935560a67bf01e97bb10abaa9d69053eb6f960dc | 2,532 | py | Python | app/tests/test_b1_get_github_urls.py | OmarThinks/Gemography-Challenge-1 | 79da3c6f1a79a8d9834d302be4bbafd2b4190a8c | [
"MIT"
] | null | null | null | app/tests/test_b1_get_github_urls.py | OmarThinks/Gemography-Challenge-1 | 79da3c6f1a79a8d9834d302be4bbafd2b4190a8c | [
"MIT"
] | null | null | null | app/tests/test_b1_get_github_urls.py | OmarThinks/Gemography-Challenge-1 | 79da3c6f1a79a8d9834d302be4bbafd2b4190a8c | [
"MIT"
] | null | null | null | import unittest
from rest_api.serializers import GithubSearchRepoSerializer
"""
Here we will test the queries builder
"""
"""
To run the tests
pytest
pytest -rP
pytest -rP --junitxml=test-reports/junit.xml --html=test-reports/pytest_report.html --self-contained-html
"""
class QueriesBuilderTestCase(unittest.TestCase):
def test_000(self):
self.assertEqual(
"{date_format} {order_format} {page}".format(
date_format="123", order_format="456", page=1),
"123 456 1")
self.assertEqual("https://api.github.com/search/repositories?q=created:>{date_format}&sort=stars&order={order_format}&per_page=100&page={page}".format(
date_format = "123", order_format = "456", page = 1),
"https://api.github.com/search/repositories?q=created:>123&sort=stars&order=456&per_page=100&page=1"
)
mysrt = "https://api.github.com/search/repositories?"+"q=created:>{date_format}&sort=stars&order={order_format}"+"&per_page=100&page={page}"
mysrt.format(date_format = "123", order_format = "456", page = 1)
#print(mysrt)
print("test_000:testing string formatting")
def test_001(self):
serializer = GithubSearchRepoSerializer(data ={
"date" : "2019-04-29",
"order" : "asc",
"records":99
})
serializer.is_valid()
queries = serializer.get_github_urls()
self.assertEqual(queries, [
"https://api.github.com/search/repositories?"+
"q=created:>2019-04-29&sort=stars&order=asc"+
"&per_page=100&page=1"])
print("test_001:asc one page")
def test_002(self):
serializer = GithubSearchRepoSerializer(data ={
"date" : "2019-04-29",
"order" : "desc",
"records":100
})
serializer.is_valid()
queries = serializer.get_github_urls()
#print(queries)
self.assertEqual(queries, [
"https://api.github.com/search/repositories?"+
"q=created:>2019-04-29&sort=stars&order=desc"+
"&per_page=100&page=1"])
print("test_002:decs one page on the edge")
def test_003(self):
serializer = GithubSearchRepoSerializer(data ={
"date" : "2019-04-29",
"order" : "desc",
"records":101
})
serializer.is_valid()
queries = serializer.get_github_urls()
self.assertEqual(queries, [
"https://api.github.com/search/repositories?"+
"q=created:>2019-04-29&sort=stars&order=desc"+
"&per_page=100&page=1",
"https://api.github.com/search/repositories?"+
"q=created:>2019-04-29&sort=stars&order=desc"+
"&per_page=100&page=2"
])
print("test_003:decs 2 pages")
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main() | 28.133333 | 153 | 0.691943 |
b5b23767bc452d1d161330f945974af76c7faa29 | 3,337 | py | Python | tronx/modules/group.py | TronUb/Tron | 55b5067a34cf2849913647533d7d035cab64568e | [
"MIT"
] | 4 | 2022-03-07T07:27:04.000Z | 2022-03-29T05:59:57.000Z | tronx/modules/group.py | TronUb/Tron | 55b5067a34cf2849913647533d7d035cab64568e | [
"MIT"
] | null | null | null | tronx/modules/group.py | TronUb/Tron | 55b5067a34cf2849913647533d7d035cab64568e | [
"MIT"
] | 3 | 2022-03-05T15:24:51.000Z | 2022-03-14T08:48:05.000Z | import asyncio
from pyrogram.raw import functions
from pyrogram.types import Message
from tronx import app, gen
app.CMD_HELP.update(
{"group" : (
"group",
{
"bgroup [group name]" : "Creates a basic group.",
"sgroup [group name]" : "Creates a super group.",
"unread" : "Mark a chat as unread in your telegram folders.",
"channel [channel name]" : "Create a channel through this command."
}
)
}
)
@app.on_message(gen(["bgroup", "bgp"], allow =["sudo"]))
async def basicgroup_handler(_, m: Message):
grpname = None
users = None
if app.long() == 1:
return await app.send_edit(f"Usage: `{app.PREFIX}bgroup mygroupname`", delme=4)
elif app.long() > 1:
grpname = m.text.split(None, 1)[1]
users = "@TheRealPhoenixBot"
elif app.long() > 2:
grpname = m.text.split(None, 1)[1]
users = m.text.split(None, 2)[2].split()
else:
grpname = False
users = "@TheRealPhoenixBot" # required
try:
if grpname:
await app.send_edit(f"Creating a new basic group: `{grpname}`")
group = await app.create_group(title=f"{grpname}", users=users)
await app.send_edit(f"**Created a new basic group:** [{grpname}]({(await app.get_chat(group.id)).invite_link})")
else:
await app.send_edit("No group name is provided.", text_type=["mono"], delme=4)
except Exception as e:
await app.error(e)
@app.on_message(gen(["sgroup", "sgp"], allow =["sudo"]))
async def supergroup_handler(_, m: Message):
grpname = None
about = None
if app.long() == 1:
return await app.send_edit(f"`Usage: {app.PREFIX}sgroup mygroupname`", delme=4)
elif app.long() > 1:
grpname = m.text.split(None, 1)[1]
about = ""
elif app.long() > 2:
grpname = m.text.split(None, 1)[1]
about = m.text.split(None, 2)[2]
else:
grpname = False
about = ""
try:
if grpname:
await app.send_edit(f"Creating a new super group: `{grpname}`")
group = await app.create_supergroup(title=f"{grpname}", description=about)
await app.send_edit(f"**Created a new super group:** [{grpname}]({(await app.get_chat(group.id)).invite_link})")
else:
await app.send_edit("No group name is provided.", text_type=["mono"], delme=4)
except Exception as e:
await app.error(e)
@app.on_message(gen(["unread", "un"], allow =["sudo"]))
async def unreadchat_handler(_, m: Message):
try:
await asyncio.gather(
m.delete(),
app.invoke(
functions.messages.MarkDialogUnread(
peer=await app.resolve_peer(m.chat.id),
unread=True
)
),
)
except Exception as e:
await app.error(e)
@app.on_message(gen("channel", allow =["sudo"]))
async def channel_handler(_, m: Message):
chname = None
about = None
if app.long() == 1:
return await app.send_edit(f"Usage: `{app.PREFIX}channel [channel name]`", delme=4)
elif app.long() > 1:
chname = m.text.split(None, 1)[1]
about = ""
elif app.long() > 2:
chname = m.text.split(None, 1)[1]
about = m.text.split(None, 2)[2]
try:
if chname:
await app.send_edit(f"Creating your channel: `{chname}`")
response = await app.create_channel(title=f"{chname}", description=about)
if response:
await app.send_edit(f"**Created a new channel:** [{chname}]({(await app.get_chat(response.id)).invite_link})", disable_web_page_preview=True)
else:
await app.send_edit("Couldn't create a channel.")
except Exception as e:
await app.error(e)
| 26.275591 | 145 | 0.66407 |
cb30e36d51b5be1b53857d2833cb520e7a706a79 | 2,233 | py | Python | data/detection/total_text.py | JinGyeSetBirdsFree/FudanOCR | e6b18b0eefaf832b2eb7198f5df79e00bd4cee36 | [
"MIT"
] | 25 | 2020-02-29T12:14:10.000Z | 2020-04-24T07:56:06.000Z | data/detection/total_text.py | dun933/FudanOCR | fd79b679044ea23fd9eb30691453ed0805d2e98b | [
"MIT"
] | 33 | 2020-12-10T19:15:39.000Z | 2022-03-12T00:17:30.000Z | data/detection/total_text.py | dun933/FudanOCR | fd79b679044ea23fd9eb30691453ed0805d2e98b | [
"MIT"
] | 4 | 2020-02-29T12:14:18.000Z | 2020-04-12T12:26:50.000Z | import scipy.io as io
import numpy as np
import os
import copy
from model.detection_model.TextSnake_pytorch.dataset.data_util import pil_load_img
from model.detection_model.TextSnake_pytorch.dataset.dataload import TextDataset, TextInstance
from model.detection_model.TextSnake_pytorch.dataset.read_json import read_json, read_dict
class TotalText(TextDataset):
def __init__(self, data_root, ignore_list=None, is_training=True, transform=None):
super().__init__(transform)
self.data_root = data_root
self.is_training = is_training
if ignore_list:
with open(ignore_list) as f:
ignore_list = f.readlines()
ignore_list = [line.strip() for line in ignore_list]
else:
ignore_list = []
# self.image_root = os.path.join(data_root, 'train_images')
self.image_root = os.path.join(data_root, 'train_images' if is_training else 'test_images')
# self.image_root = os.path.join(data_root, 'crop_images_new')
self.image_list = os.listdir(self.image_root)
self.image_list = list(filter(lambda img: img.replace('.jpg', '') not in ignore_list, self.image_list))
self.annotation_path = os.path.join(data_root, 'train_labels.json')
# self.annotation_path = os.path.join(data_root, 'crop_result_js.json')
self.data_dict = read_json(self.annotation_path)
def __getitem__(self, item):
image_id = self.image_list[item]
image_path = os.path.join(self.image_root, image_id)
# Read image data
image = pil_load_img(image_path)
image_shape = image.shape
# Read annotation
polygons = read_dict(self.data_dict, image_id)
if self.transform:
image, polygons = self.transform(image, copy.copy(polygons))
# Todo: may be bug here
for i, polygon in enumerate(polygons):
if not polygon['illegibility']:
polygon.find_bottom_and_sideline(polygon.points)
return self.get_training_data(image, polygons, image_id=image_id, image_path=image_path,
image_shape=image_shape)
def __len__(self):
return len(self.image_list) | 39.875 | 111 | 0.675325 |
45f51594865128c08e49b37d5c555864470bda1c | 555 | py | Python | peeper/config/amk.py | raceup/peeper | 95005b279bf94c2e6435c6a0e0db3c456b971861 | [
"MIT"
] | 1 | 2019-05-13T02:47:14.000Z | 2019-05-13T02:47:14.000Z | peeper/config/amk.py | raceup/peeper | 95005b279bf94c2e6435c6a0e0db3c456b971861 | [
"MIT"
] | null | null | null | peeper/config/amk.py | raceup/peeper | 95005b279bf94c2e6435c6a0e0db3c456b971861 | [
"MIT"
] | null | null | null | from enum import Enum
class Motors(Enum):
FL = 3
FR = 2
RL = 1
RR = 0
AMK_VALUES_1_CAN_IDS = ['283', '284', '287', '288']
AMK_VALUES_2_CAN_IDS = ['285', '286', '289', '28a']
AMK_SETPOINTS_CAN_IDS = ['184', '185', '188', '189']
AMK_VALUES_1 = {
motor: AMK_VALUES_1_CAN_IDS[motor.value]
for motor in Motors
}
AMK_VALUES_2 = {
motor: AMK_VALUES_2_CAN_IDS[motor.value]
for motor in Motors
}
AMK_SETPOINTS = {
motor: AMK_SETPOINTS_CAN_IDS[motor.value]
for motor in Motors
}
MOTOR_LABELS = ["FL", "FR", "RL", "RR"]
| 19.137931 | 52 | 0.636036 |
2a7cb2046853778cd21b61291cf80b384b7cf40a | 518 | py | Python | lexlib/__init__.py | cranndarach/wordutils | f1e2288df1924a12e6d018786e9797dabec2656a | [
"MIT"
] | 4 | 2017-03-03T00:36:13.000Z | 2020-03-10T18:58:18.000Z | lexlib/__init__.py | cranndarach/wordutils | f1e2288df1924a12e6d018786e9797dabec2656a | [
"MIT"
] | 3 | 2016-12-30T02:47:45.000Z | 2018-08-14T22:43:06.000Z | lexlib/__init__.py | cranndarach/wordutils | f1e2288df1924a12e6d018786e9797dabec2656a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Set of utilities for research involving words.
"""
# copyright: 2016-2019 R. Steiner
# license: MIT License
from .io import *
from .neighbors import *
from .structure import *
from .utilities import *
__all__ = ["check_neighbors", "get_words", "clusters", "clusters_word",
"get_cv", "get_neighbor_dict", "get_neighbor_pairs",
"get_neighbor_positions", "get_neighbor_types", "get_neighbors",
"nsyll_list", "nsyll_word", "filter_by_nsyll", "filter_words"]
| 27.263158 | 75 | 0.69305 |
da6bb512fea156a72ded3134302fb2739d27dcde | 1,690 | py | Python | src/rabbitmq/RabbitSpider.py | gendobr/scrapy-boilerplate | 5695e5310ac27be35b76a20593cb987d51eccd28 | [
"MIT"
] | null | null | null | src/rabbitmq/RabbitSpider.py | gendobr/scrapy-boilerplate | 5695e5310ac27be35b76a20593cb987d51eccd28 | [
"MIT"
] | null | null | null | src/rabbitmq/RabbitSpider.py | gendobr/scrapy-boilerplate | 5695e5310ac27be35b76a20593cb987d51eccd28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import logging
import os
import re
import sys
import time
import pika
import scrapy
from scrapy.utils.project import get_project_settings
class RabbitSpider:
def __init__(self, *args, **kwargs):
settings = get_project_settings()
self.rabbitmq_connect(settings)
def rabbitmq_connect(self, settings):
logging.getLogger("pika").setLevel(os.getenv("PIKA_LOG_LEVEL"))
parameters = pika.ConnectionParameters(
host=settings.get("RABBITMQ_HOST"),
port=settings.get("RABBITMQ_PORT"),
virtual_host=settings.get("RABBITMQ_VIRTUAL_HOST"),
credentials=pika.credentials.PlainCredentials(
username=settings.get("RABBITMQ_USER"),
password=settings.get("RABBITMQ_PASS"),
),
heartbeat=0,
)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.basic_qos(
prefetch_count=int(settings.get("CONCURRENT_REQUESTS", 1))
)
def prepare_request(self):
raise NotImplementedError
def next_request(self):
queue_name = os.getenv("PUSHER_QUEUE", "")
while True:
stats = self.channel.queue_declare(queue=queue_name, durable=True)
if stats.method.message_count > 0:
method, header_frame, body = self.channel.basic_get(queue_name)
if body:
return self.prepare_request(method, header_frame, body)
else:
self.logger.warning("No messages in the queue, waiting...")
time.sleep(30)
| 30.727273 | 79 | 0.628994 |
59f922033600c7c4d9d83dab9324f431101daa2d | 2,088 | py | Python | operators/crossplane/python/pulumi_pulumi_kubernetes_crds_operators_crossplane/provider.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | operators/crossplane/python/pulumi_pulumi_kubernetes_crds_operators_crossplane/provider.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | 2 | 2020-09-18T17:12:23.000Z | 2020-12-30T19:40:56.000Z | operators/crossplane/python/pulumi_pulumi_kubernetes_crds_operators_crossplane/provider.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
__all__ = ['Provider']
class Provider(pulumi.ProviderResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Create a Crds resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
super(Provider, __self__).__init__(
'pulumi_kubernetes_crds_operators_crossplane',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 37.963636 | 134 | 0.645594 |
25235c8b7ae203face175993654de2903237b787 | 25,651 | py | Python | python/cucim/src/cucim/skimage/filters/edges.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 131 | 2021-04-09T19:02:10.000Z | 2022-03-25T08:49:11.000Z | python/cucim/src/cucim/skimage/filters/edges.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 222 | 2021-04-12T07:15:14.000Z | 2022-03-31T20:01:01.000Z | python/cucim/src/cucim/skimage/filters/edges.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 34 | 2021-04-09T18:54:13.000Z | 2022-03-29T12:59:26.000Z | """
Sobel and Prewitt filters originally part of CellProfiler, code licensed under
both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import math
import cupy as cp
import numpy as np
from cupyx.scipy import ndimage as ndi
from .. import img_as_float
from .._shared.utils import check_nD
from ..restoration.uft import laplacian
# n-dimensional filter weights
SOBEL_EDGE = np.array([1, 0, -1])
SOBEL_SMOOTH = np.array([1, 2, 1]) / 4
HSOBEL_WEIGHTS = SOBEL_EDGE.reshape((3, 1)) * SOBEL_SMOOTH.reshape((1, 3))
VSOBEL_WEIGHTS = HSOBEL_WEIGHTS.T
SCHARR_EDGE = np.array([1, 0, -1])
SCHARR_SMOOTH = np.array([3, 10, 3]) / 16
HSCHARR_WEIGHTS = SCHARR_EDGE.reshape((3, 1)) * SCHARR_SMOOTH.reshape((1, 3))
VSCHARR_WEIGHTS = HSCHARR_WEIGHTS.T
PREWITT_EDGE = np.array([1, 0, -1])
PREWITT_SMOOTH = np.full((3,), 1 / 3)
HPREWITT_WEIGHTS = (PREWITT_EDGE.reshape((3, 1))
* PREWITT_SMOOTH.reshape((1, 3)))
VPREWITT_WEIGHTS = HPREWITT_WEIGHTS.T
# 2D-only filter weights
# fmt: off
ROBERTS_PD_WEIGHTS = np.array([[1, 0],
[0, -1]], dtype=np.double)
ROBERTS_ND_WEIGHTS = np.array([[0, 1],
[-1, 0]], dtype=np.double)
# fmt: on
# These filter weights can be found in Farid & Simoncelli (2004),
# Table 1 (3rd and 4th row). Additional decimal places were computed
# using the code found at https://www.cs.dartmouth.edu/farid/
# fmt: off
p = np.array([[0.0376593171958126, 0.249153396177344, 0.426374573253687,
0.249153396177344, 0.0376593171958126]])
d1 = np.array([[0.109603762960254, 0.276690988455557, 0, -0.276690988455557,
-0.109603762960254]])
# fmt: on
HFARID_WEIGHTS = d1.T * p
VFARID_WEIGHTS = np.copy(HFARID_WEIGHTS.T)
def _mask_filter_result(result, mask):
"""Return result after masking.
Input masks are eroded so that mask areas in the original image don't
affect values in the result.
"""
if mask is not None:
erosion_selem = ndi.generate_binary_structure(mask.ndim, mask.ndim)
mask = ndi.binary_erosion(mask, erosion_selem, border_value=0)
result *= mask
return result
def _kernel_shape(ndim, dim):
"""Return list of `ndim` 1s except at position `dim`, where value is -1.
Parameters
----------
ndim : int
The number of dimensions of the kernel shape.
dim : int
The axis of the kernel to expand to shape -1.
Returns
-------
shape : list of int
The requested shape.
Examples
--------
>>> _kernel_shape(2, 0)
[-1, 1]
>>> _kernel_shape(3, 1)
[1, -1, 1]
>>> _kernel_shape(4, -1)
[1, 1, 1, -1]
"""
shape = [1] * ndim
shape[dim] = -1
return shape
def _reshape_nd(arr, ndim, dim):
"""Reshape a 1D array to have n dimensions, all singletons but one.
Parameters
----------
arr : array, shape (N,)
Input array
ndim : int
Number of desired dimensions of reshaped array.
dim : int
Which dimension/axis will not be singleton-sized.
Returns
-------
arr_reshaped : array, shape ([1, ...], N, [1,...])
View of `arr` reshaped to the desired shape.
Examples
--------
>>> arr = cp.random.random(7)
>>> _reshape_nd(arr, 2, 0).shape
(7, 1)
>>> _reshape_nd(arr, 3, 1).shape
(1, 7, 1)
>>> _reshape_nd(arr, 4, -1).shape
(1, 1, 1, 7)
"""
kernel_shape = _kernel_shape(ndim, dim)
return cp.reshape(arr, kernel_shape)
def _generic_edge_filter(image, *, smooth_weights, edge_weights=[1, 0, -1],
axis=None, mode='reflect', cval=0.0, mask=None):
"""Apply a generic, n-dimensional edge filter.
The filter is computed by applying the edge weights along one dimension
and the smoothing weights along all other dimensions. If no axis is given,
or a tuple of axes is given the filter is computed along all axes in turn,
and the magnitude is computed as the square root of the average square
magnitude of all the axes.
Parameters
----------
image : array
The input image.
smooth_weights : array of float
The smoothing weights for the filter. These are applied to dimensions
orthogonal to the edge axis.
edge_weights : 1D array of float, optional
The weights to compute the edge along the chosen axes.
axis : int or sequence of int, optional
Compute the edge filter along this axis. If not provided, the edge
magnitude is computed. This is defined as::
edge_mag = np.sqrt(sum([_generic_edge_filter(image, ..., axis=i)**2
for i in range(image.ndim)]) / image.ndim)
The magnitude is also computed if axis is a sequence.
mode : str or sequence of str, optional
The boundary mode for the convolution. See `scipy.ndimage.convolve`
for a description of the modes. This can be either a single boundary
mode or one boundary mode per axis.
cval : float, optional
When `mode` is ``'constant'``, this is the constant used in values
outside the boundary of the image data.
"""
ndim = image.ndim
if axis is None:
axes = list(range(ndim))
elif np.isscalar(axis):
axes = [axis]
else:
axes = axis
return_magnitude = len(axes) > 1
float_dtype = cp.promote_types(image.dtype, np.float16)
# TODO: file an upstream scikit-image PR casting weights in this manner
edge_weights = cp.asarray(edge_weights, dtype=float_dtype)
smooth_weights = cp.asarray(smooth_weights, dtype=float_dtype)
if return_magnitude:
edge_weights /= math.sqrt(ndim)
# CuPy Backend: Apply the smoothing and edge convolutions separably
# rather than forming an n-dimensional kernel. This is
# moderately faster for large 2D images and substantially
# faster in 3D and higher dimensions.
for i, edge_dim in enumerate(axes):
ax_output = ndi.convolve1d(image, edge_weights, axis=edge_dim,
mode=mode, output=float_dtype)
smooth_axes = list(set(range(ndim)) - {edge_dim})
for smooth_dim in smooth_axes:
# TODO: why did this benchmark slower if output=ax_output was used?
ax_output = ndi.convolve1d(ax_output, smooth_weights,
axis=smooth_dim, mode=mode,
output=float_dtype)
if return_magnitude:
ax_output *= ax_output
if i == 0:
output = ax_output
else:
output += ax_output
if return_magnitude:
cp.sqrt(output, out=output)
return output
def sobel(image, mask=None, *, axis=None, mode='reflect', cval=0.0):
"""Find edges in an image using the Sobel filter.
Parameters
----------
image : array
The input image.
mask : array of bool, optional
Clip the output image to this mask. (Values where mask=0 will be set
to 0.)
axis : int or sequence of int, optional
Compute the edge filter along this axis. If not provided, the edge
magnitude is computed. This is defined as::
sobel_mag = np.sqrt(sum([sobel(image, axis=i)**2
for i in range(image.ndim)]) / image.ndim)
The magnitude is also computed if axis is a sequence.
mode : str or sequence of str, optional
The boundary mode for the convolution. See `scipy.ndimage.convolve`
for a description of the modes. This can be either a single boundary
mode or one boundary mode per axis.
cval : float, optional
When `mode` is ``'constant'``, this is the constant used in values
outside the boundary of the image data.
Returns
-------
output : array of float
The Sobel edge map.
See also
--------
scharr, prewitt, canny
References
----------
.. [1] D. Kroon, 2009, Short Paper University Twente, Numerical
Optimization of Kernel Based Image Derivatives.
.. [2] https://en.wikipedia.org/wiki/Sobel_operator
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage import filters
>>> camera = cp.array(data.camera())
>>> edges = filters.sobel(camera)
"""
image = img_as_float(image)
output = _generic_edge_filter(image, smooth_weights=SOBEL_SMOOTH,
axis=axis, mode=mode, cval=cval)
output = _mask_filter_result(output, mask)
return output
def sobel_h(image, mask=None):
"""Find the horizontal edges of an image using the Sobel transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Sobel edge map.
Notes
-----
We use the following kernel::
1 2 1
0 0 0
-1 -2 -1
"""
check_nD(image, 2)
return sobel(image, mask=mask, axis=0)
def sobel_v(image, mask=None):
"""Find the vertical edges of an image using the Sobel transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Sobel edge map.
Notes
-----
We use the following kernel::
1 0 -1
2 0 -2
1 0 -1
"""
check_nD(image, 2)
return sobel(image, mask=mask, axis=1)
def scharr(image, mask=None, *, axis=None, mode='reflect', cval=0.0):
"""Find the edge magnitude using the Scharr transform.
Parameters
----------
image : array
The input image.
mask : array of bool, optional
Clip the output image to this mask. (Values where mask=0 will be set
to 0.)
axis : int or sequence of int, optional
Compute the edge filter along this axis. If not provided, the edge
magnitude is computed. This is defined as::
sch_mag = np.sqrt(sum([scharr(image, axis=i)**2
for i in range(image.ndim)]) / image.ndim)
The magnitude is also computed if axis is a sequence.
mode : str or sequence of str, optional
The boundary mode for the convolution. See `scipy.ndimage.convolve`
for a description of the modes. This can be either a single boundary
mode or one boundary mode per axis.
cval : float, optional
When `mode` is ``'constant'``, this is the constant used in values
outside the boundary of the image data.
Returns
-------
output : array of float
The Scharr edge map.
See also
--------
sobel, prewitt, canny
Notes
-----
The Scharr operator has a better rotation invariance than
other edge filters such as the Sobel or the Prewitt operators.
References
----------
.. [1] D. Kroon, 2009, Short Paper University Twente, Numerical
Optimization of Kernel Based Image Derivatives.
.. [2] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage import filters
>>> camera = cp.array(data.camera())
>>> edges = filters.scharr(camera)
"""
image = img_as_float(image)
output = _generic_edge_filter(image, smooth_weights=SCHARR_SMOOTH,
axis=axis, mode=mode, cval=cval)
output = _mask_filter_result(output, mask)
return output
def scharr_h(image, mask=None):
"""Find the horizontal edges of an image using the Scharr transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Scharr edge map.
Notes
-----
We use the following kernel::
3 10 3
0 0 0
-3 -10 -3
References
----------
.. [1] D. Kroon, 2009, Short Paper University Twente, Numerical
Optimization of Kernel Based Image Derivatives.
"""
check_nD(image, 2)
return scharr(image, mask=mask, axis=0)
def scharr_v(image, mask=None):
"""Find the vertical edges of an image using the Scharr transform.
Parameters
----------
image : 2-D array
Image to process
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Scharr edge map.
Notes
-----
We use the following kernel::
3 0 -3
10 0 -10
3 0 -3
References
----------
.. [1] D. Kroon, 2009, Short Paper University Twente, Numerical
Optimization of Kernel Based Image Derivatives.
"""
check_nD(image, 2)
return scharr(image, mask=mask, axis=1)
def prewitt(image, mask=None, *, axis=None, mode='reflect', cval=0.0):
"""Find the edge magnitude using the Prewitt transform.
Parameters
----------
image : array
The input image.
mask : array of bool, optional
Clip the output image to this mask. (Values where mask=0 will be set
to 0.)
axis : int or sequence of int, optional
Compute the edge filter along this axis. If not provided, the edge
magnitude is computed. This is defined as::
prw_mag = np.sqrt(sum([prewitt(image, axis=i)**2
for i in range(image.ndim)]) / image.ndim)
The magnitude is also computed if axis is a sequence.
mode : str or sequence of str, optional
The boundary mode for the convolution. See `scipy.ndimage.convolve`
for a description of the modes. This can be either a single boundary
mode or one boundary mode per axis.
cval : float, optional
When `mode` is ``'constant'``, this is the constant used in values
outside the boundary of the image data.
Returns
-------
output : array of float
The Prewitt edge map.
See also
--------
sobel, scharr
Notes
-----
The edge magnitude depends slightly on edge directions, since the
approximation of the gradient operator by the Prewitt operator is not
completely rotation invariant. For a better rotation invariance, the Scharr
operator should be used. The Sobel operator has a better rotation
invariance than the Prewitt operator, but a worse rotation invariance than
the Scharr operator.
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage import filters
>>> camera = cp.array(data.camera())
>>> edges = filters.prewitt(camera)
"""
image = img_as_float(image)
output = _generic_edge_filter(image, smooth_weights=PREWITT_SMOOTH,
axis=axis, mode=mode, cval=cval)
output = _mask_filter_result(output, mask)
return output
def prewitt_h(image, mask=None):
"""Find the horizontal edges of an image using the Prewitt transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Prewitt edge map.
Notes
-----
We use the following kernel::
1/3 1/3 1/3
0 0 0
-1/3 -1/3 -1/3
"""
check_nD(image, 2)
return prewitt(image, mask=mask, axis=0)
def prewitt_v(image, mask=None):
"""Find the vertical edges of an image using the Prewitt transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Prewitt edge map.
Notes
-----
We use the following kernel::
1/3 0 -1/3
1/3 0 -1/3
1/3 0 -1/3
"""
check_nD(image, 2)
return prewitt(image, mask=mask, axis=1)
def roberts(image, mask=None):
"""Find the edge magnitude using Roberts' cross operator.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Roberts' Cross edge map.
See also
--------
sobel, scharr, prewitt, feature.canny
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> camera = cp.array(data.camera())
>>> from cucim.skimage import filters
>>> edges = filters.roberts(camera)
"""
check_nD(image, 2)
# CuPy Backend: refactored this section slightly for efficiency with CuPy
pos_diag_sq = roberts_pos_diag(image, mask)
pos_diag_sq *= pos_diag_sq
out = roberts_neg_diag(image, mask)
out *= out
out += pos_diag_sq
cp.sqrt(out, out=out)
out /= math.sqrt(2)
return out
def roberts_pos_diag(image, mask=None):
"""Find the cross edges of an image using Roberts' cross operator.
The kernel is applied to the input image to produce separate measurements
of the gradient component one orientation.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Robert's edge map.
Notes
-----
We use the following kernel::
1 0
0 -1
"""
check_nD(image, 2)
image = img_as_float(image)
# CuPy Backend: allow float16 & float32 filtering
weights = cp.array(ROBERTS_PD_WEIGHTS, dtype=image.dtype)
result = ndi.convolve(image, weights)
return _mask_filter_result(result, mask)
def roberts_neg_diag(image, mask=None):
"""Find the cross edges of an image using the Roberts' Cross operator.
The kernel is applied to the input image to produce separate measurements
of the gradient component one orientation.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Robert's edge map.
Notes
-----
We use the following kernel::
0 1
-1 0
"""
check_nD(image, 2)
image = img_as_float(image)
# CuPy Backend: allow float16 & float32 filtering
weights = cp.array(ROBERTS_ND_WEIGHTS, dtype=image.dtype)
result = ndi.convolve(image, weights)
return _mask_filter_result(result, mask)
def laplace(image, ksize=3, mask=None):
"""Find the edges of an image using the Laplace operator.
Parameters
----------
image : ndarray
Image to process.
ksize : int, optional
Define the size of the discrete Laplacian operator such that it
will have a size of (ksize,) * image.ndim.
mask : ndarray, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : ndarray
The Laplace edge map.
Notes
-----
The Laplacian operator is generated using the function
skimage.restoration.uft.laplacian().
"""
image = img_as_float(image)
# TODO: File an upstream bug for scikit-image. ksize does not appear to
# actually be used and is hard-coded to 3 in `laplacian`.
if ksize != 3:
raise NotImplementedError("only ksize=3 is supported")
# Create the discrete Laplacian operator - We keep only the real part of
# the filter
laplace_op = laplacian(image.ndim, None, dtype=image.dtype)
result = ndi.convolve(image, laplace_op)
return _mask_filter_result(result, mask)
def farid(image, *, mask=None):
"""Find the edge magnitude using the Farid transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Farid edge map.
See also
--------
sobel, prewitt, canny
Notes
-----
Take the square root of the sum of the squares of the horizontal and
vertical derivatives to get a magnitude that is somewhat insensitive to
direction. Similar to the Scharr operator, this operator is designed with
a rotation invariance constraint.
References
----------
.. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
multidimensional signals", IEEE Transactions on Image Processing
13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
.. [2] Wikipedia, "Farid and Simoncelli Derivatives." Available at:
<https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives>
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> camera = cp.array(data.camera())
>>> from cucim.skimage import filters
>>> edges = filters.farid(camera)
"""
check_nD(image, 2)
# CuPy Backend: refactored this section slightly for efficiency with CuPy
h_sq = farid_h(image, mask=mask)
h_sq *= h_sq
out = farid_v(image, mask=mask)
out *= out
out += h_sq
cp.sqrt(out, out=out)
out /= math.sqrt(2)
return out
def farid_h(image, *, mask=None):
"""Find the horizontal edges of an image using the Farid transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Farid edge map.
Notes
-----
The kernel was constructed using the 5-tap weights from [1].
References
----------
.. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
multidimensional signals", IEEE Transactions on Image Processing
13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
.. [2] Farid, H. and Simoncelli, E. P. "Optimally rotation-equivariant
directional derivative kernels", In: 7th International Conference on
Computer Analysis of Images and Patterns, Kiel, Germany. Sep, 1997.
"""
check_nD(image, 2)
image = img_as_float(image)
result = ndi.convolve(image, cp.array(HFARID_WEIGHTS, dtype=image.dtype))
return _mask_filter_result(result, mask)
def farid_v(image, *, mask=None):
"""Find the vertical edges of an image using the Farid transform.
Parameters
----------
image : 2-D array
Image to process.
mask : 2-D array, optional
An optional mask to limit the application to a certain area.
Note that pixels surrounding masked regions are also masked to
prevent masked regions from affecting the result.
Returns
-------
output : 2-D array
The Farid edge map.
Notes
-----
The kernel was constructed using the 5-tap weights from [1].
References
----------
.. [1] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
multidimensional signals", IEEE Transactions on Image Processing
13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
"""
check_nD(image, 2)
image = img_as_float(image)
result = ndi.convolve(image, cp.array(VFARID_WEIGHTS, dtype=image.dtype))
return _mask_filter_result(result, mask)
| 29.89627 | 93 | 0.625629 |
7436cf3a18ce2abbc2b2acfc30e00db33568c8f2 | 3,283 | py | Python | team_9/cocos/cocos/audio/pygame/__init__.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | 1 | 2019-09-15T18:59:49.000Z | 2019-09-15T18:59:49.000Z | team_9/cocos/cocos/audio/pygame/__init__.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | team_9/cocos/cocos/audio/pygame/__init__.py | Donnyvdm/dojo19 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | [
"BSD-3-Clause"
] | null | null | null | # pygame - Python Game Library
# Copyright (C) 2000-2001 Pete Shinners
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Pete Shinners
# pete@shinners.org
"""Top-level Pygame module.
Pygame is a set of Python modules designed for writing games.
It is written on top of the excellent SDL library. This allows you
to create fully featured games and multimedia programs in the Python
language. The package is highly portable, with games running on
Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.
"""
from __future__ import division, print_function, unicode_literals
import six
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 899 2006-08-04 16:52:18Z aholkner $'
import os
import sys
class MissingModule:
def __init__(self, name, info='', urgent=0):
self.name = name
self.info = str(info)
self.urgent = urgent
if urgent:
self.warn()
def __getattr__(self, var):
if not self.urgent:
self.warn()
self.urgent = 1
MissingPygameModule = "%s module not available" % self.name
raise NotImplementedError(MissingPygameModule)
if six.PY2:
def __nonzero__(self):
return 0
else:
def __bool__(self):
return 0
def warn(self):
if self.urgent:
type = 'import'
else:
type = 'use'
message = '%s %s: %s' % (type, self.name, self.info)
try:
import warnings
if self.urgent:
level = 4
else:
level = 3
warnings.warn(message, RuntimeWarning, level)
except ImportError:
print(message)
# we need to import like this, each at a time. the cleanest way to import
# our modules is with the import command (not the __import__ function)
# first, the "required" modules
# from pygame.array import *
from cocos.audio.pygame.base import *
from cocos.audio.pygame.version import *
__version__ = ver
# next, the "standard" modules
# we still allow them to be missing for stripped down pygame distributions
try:
import cocos.audio.pygame.mixer
except (ImportError, IOError) as msg:
mixer = MissingModule("mixer", msg, 0)
# there's also a couple "internal" modules not needed
# by users, but putting them here helps "dependency finder"
# programs get everything they need (like py2exe)
try:
import cocos.audio.pygame.mixer_music
del cocos.audio.pygame.mixer_music
except (ImportError, IOError):
pass
# cleanup namespace
del os, sys, # TODO rwobject, surflock, MissingModule, copy_reg
| 30.682243 | 75 | 0.68413 |
84d4e45fa77dd01254666fbf8e6759229c3f8c9b | 4,411 | py | Python | lib/ayeaye/connectors/json_connector.py | Aye-Aye-Dev/AyeAye | 8fd7c6cdb4313fffce9d0c21dd02391821c512da | [
"Apache-2.0"
] | 5 | 2020-02-04T12:54:15.000Z | 2022-02-15T11:14:35.000Z | lib/ayeaye/connectors/json_connector.py | Aye-Aye-Dev/AyeAye | 8fd7c6cdb4313fffce9d0c21dd02391821c512da | [
"Apache-2.0"
] | null | null | null | lib/ayeaye/connectors/json_connector.py | Aye-Aye-Dev/AyeAye | 8fd7c6cdb4313fffce9d0c21dd02391821c512da | [
"Apache-2.0"
] | 1 | 2020-11-09T08:47:34.000Z | 2020-11-09T08:47:34.000Z | """
Created on 15 Apr 2020
@author: si
"""
import json
import os
from ayeaye.connectors.base import DataConnector, AccessMode
from ayeaye.pinnate import Pinnate
class JsonConnector(DataConnector):
engine_type = "json://"
def __init__(self, *args, **kwargs):
"""
Single JSON file loaded into memory and made available as a :class:`Pinnate` object.
For args: @see :class:`connectors.base.DataConnector`
additional args for JsonConnector
None
Connection information-
engine_url format is
json://<filesystem absolute path>[;encoding=<character encoding>][;indent=<spaces when pretty printing write output>]
e.g. json:///data/my_project/the_data.json;encoding=latin-1
"""
super().__init__(*args, **kwargs)
self._doc = None
self._encoding = None
self._engine_params = None
@property
def engine_params(self):
if self._engine_params is None:
self._engine_params = self.ignition._decode_filesystem_engine_url(
self.engine_url, optional_args=["encoding", "indent"]
)
if "encoding" in self._engine_params:
self._encoding = self.engine_params.encoding
for typed_param in ["indent"]:
if typed_param in self.engine_params:
self.engine_params[typed_param] = int(self.engine_params[typed_param])
return self._engine_params
@property
def encoding(self):
"""
default encoding. 'sig' means don't include the unicode BOM
"""
if self._encoding is None:
ep = self.engine_params
self._encoding = ep.encoding if "encoding" in ep else "utf-8-sig"
return self._encoding
def close_connection(self):
self._doc = None
def connect(self):
"""
When in AccessMode.READ, read the contents of the file into a :class:`Pinnate` instance
that is available as self.data.
In AccessMode.WRITE mode connect() doesn't do anything because file handles aren't kept
open by the JsonConnector. The write operation is in :method:`_data_write`.
"""
if self._doc is None:
file_path = self.engine_params.file_path
if (
self.access in [AccessMode.READ, AccessMode.READWRITE]
and os.path.isfile(file_path)
and os.access(file_path, os.R_OK)
):
with open(file_path, "r", encoding=self.encoding) as f:
as_native = json.load(f)
self._doc = Pinnate(as_native)
else:
raise ValueError(f"Attempt to read '{file_path}' which isn't readable")
def __len__(self):
raise NotImplementedError("TODO")
def __getitem__(self, key):
raise NotImplementedError("TODO")
def __iter__(self):
raise NotImplementedError("Not an iterative dataset. Use .data instead.")
def _data_read(self):
self.connect()
return self._doc
def _data_write(self, new_data):
"""
Set the contents of a JSON file. `new_data` can be an instance of :class:`Pinnate` or any
python datatype that will serialise into JSON.
Will raise TypeError if the data can't be serialised to JSON.
@param new_data: (mixed, see description)
"""
if self.access not in [AccessMode.WRITE, AccessMode.READWRITE]:
raise ValueError("Write attempted on dataset opened in READ mode.")
json_args = {}
if "indent" in self.engine_params:
json_args["indent"] = self.engine_params["indent"]
if isinstance(new_data, Pinnate):
as_json = json.dumps(new_data.as_dict(), **json_args)
else:
as_json = json.dumps(new_data, **json_args)
# Data is written to disk immediately. The file handle isn't left open.
# @see :method:`connect`.
with open(self.engine_params.file_path, "w", encoding=self.encoding) as f:
f.write(as_json)
data = property(fget=_data_read, fset=_data_write)
@property
def datasource_exists(self):
"""
Returns:
(bool) if the datasource referred to in self.engine_url exists.
"""
return os.path.exists(self.engine_params.file_path)
| 31.963768 | 129 | 0.614827 |
1a0da544c4dfc429aede6343c02d17cb5cd52d6a | 19,024 | py | Python | main.py | LiYingwei/CondenseNet | 8a86cdb755a8667e4096698bdc2859ae4487c979 | [
"MIT"
] | null | null | null | main.py | LiYingwei/CondenseNet | 8a86cdb755a8667e4096698bdc2859ae4487c979 | [
"MIT"
] | null | null | null | main.py | LiYingwei/CondenseNet | 8a86cdb755a8667e4096698bdc2859ae4487c979 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import argparse
import os
import shutil
import time
import math
import warnings
import models
from utils import convert_model, measure_model
parser = argparse.ArgumentParser(description='PyTorch Condensed Convolutional Networks')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='condensenet', type=str, metavar='M',
help='model to train the dataset')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=120, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate (default: 0.1)')
parser.add_argument('--lr-type', default='cosine', type=str, metavar='T',
help='learning rate strategy (default: cosine)',
choices=['cosine', 'multistep'])
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model (default: false)')
parser.add_argument('--no-save-model', dest='no_save_model', action='store_true',
help='only save best model (default: false)')
parser.add_argument('--manual-seed', default=0, type=int, metavar='N',
help='manual seed (default: 0)')
parser.add_argument('--gpu',
help='gpu available')
parser.add_argument('--savedir', type=str, metavar='PATH', default='results/savedir',
help='path to save result and checkpoint (default: results/savedir)')
parser.add_argument('--resume', action='store_true',
help='use latest checkpoint if have any (default: none)')
parser.add_argument('--stages', type=str, metavar='STAGE DEPTH',
help='per layer depth')
parser.add_argument('--bottleneck', default=4, type=int, metavar='B',
help='bottleneck (default: 4)')
parser.add_argument('--group-1x1', type=int, metavar='G', default=4,
help='1x1 group convolution (default: 4)')
parser.add_argument('--group-3x3', type=int, metavar='G', default=4,
help='3x3 group convolution (default: 4)')
parser.add_argument('--condense-factor', type=int, metavar='C', default=4,
help='condense factor (default: 4)')
parser.add_argument('--growth', type=str, metavar='GROWTH RATE',
help='per layer growth')
parser.add_argument('--reduction', default=0.5, type=float, metavar='R',
help='transition reduction (default: 0.5)')
parser.add_argument('--dropout-rate', default=0, type=float,
help='drop out (default: 0)')
parser.add_argument('--group-lasso-lambda', default=0., type=float, metavar='LASSO',
help='group lasso loss weight (default: 0)')
parser.add_argument('--evaluate', action='store_true',
help='evaluate model on validation set (default: false)')
parser.add_argument('--convert-from', default=None, type=str, metavar='PATH',
help='path to saved checkpoint (default: none)')
parser.add_argument('--evaluate-from', default=None, type=str, metavar='PATH',
help='path to saved checkpoint (default: none)')
parser.add_argument('--shuffle1', action='store_true')
parser.add_argument('--shuffle2', action='store_true')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
args.stages = list(map(int, args.stages.split('-')))
args.growth = list(map(int, args.growth.split('-')))
if args.condense_factor is None:
args.condense_factor = args.group_1x1
if args.data == 'cifar10':
args.num_classes = 10
elif args.data == 'cifar100':
args.num_classes = 100
else:
args.num_classes = 1000
warnings.filterwarnings("ignore")
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
best_prec1 = 0
def main():
global args, best_prec1
### Calculate FLOPs & Param
model = getattr(models, args.model)(args)
print(model)
if args.data in ['cifar10', 'cifar100']:
IMAGE_SIZE = 32
else:
IMAGE_SIZE = 224
n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
args.filename = "%s_%s_%s.txt" % \
(args.model, int(n_params), int(n_flops))
del (model)
print(args)
### Create model
model = getattr(models, args.model)(args)
if args.model.startswith('alexnet') or args.model.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
### Define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
### Optionally resume from a checkpoint
if args.resume:
checkpoint = load_checkpoint(args)
if checkpoint is not None:
args.start_epoch = checkpoint['epoch'] + 1
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
### Optionally convert from a model
if args.convert_from is not None:
args.evaluate = True
state_dict = torch.load(args.convert_from)['state_dict']
model.load_state_dict(state_dict)
model = model.cpu().module
convert_model(model, args)
model = nn.DataParallel(model).cuda()
head, tail = os.path.split(args.convert_from)
tail = "converted_" + tail
torch.save({'state_dict': model.state_dict()}, os.path.join(head, tail))
### Optionally evaluate from a model
if args.evaluate_from is not None:
args.evaluate = True
state_dict = torch.load(args.evaluate_from)['state_dict']
model.load_state_dict(state_dict)
cudnn.benchmark = True
### Data loading
if args.data == "cifar10":
normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],
std=[0.2471, 0.2435, 0.2616])
train_set = datasets.CIFAR10('../data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_set = datasets.CIFAR10('../data', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
elif args.data == "cifar100":
normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
std=[0.2675, 0.2565, 0.2761])
train_set = datasets.CIFAR100('../data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_set = datasets.CIFAR100('../data', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
]))
else:
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_set = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_set = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_set,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
### Train for one epoch
tr_prec1, tr_prec5, loss, lr = \
train(train_loader, model, criterion, optimizer, epoch)
### Evaluate on validation set
val_prec1, val_prec5 = validate(val_loader, model, criterion)
### Remember best prec@1 and save checkpoint
is_best = val_prec1 < best_prec1
best_prec1 = max(val_prec1, best_prec1)
model_filename = 'checkpoint_%03d.pth.tar' % epoch
save_checkpoint({
'epoch': epoch,
'model': args.model,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, args, is_best, model_filename, "%.4f %.4f %.4f %.4f %.4f %.4f\n" %
(val_prec1, val_prec5, tr_prec1, tr_prec5, loss, lr))
### Convert model and test
model = model.cpu().module
convert_model(model, args)
model = nn.DataParallel(model).cuda()
print(model)
validate(val_loader, model, criterion)
n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
print('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
return
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
learned_module_list = []
### Switch to train mode
model.train()
### Find all learned convs to prepare for group lasso loss
for m in model.modules():
if m.__str__().startswith('LearnedGroupConv'):
learned_module_list.append(m)
running_lr = None
end = time.time()
for i, (input, target) in enumerate(train_loader):
progress = float(epoch * len(train_loader) + i) / \
(args.epochs * len(train_loader))
args.progress = progress
### Adjust learning rate
lr = adjust_learning_rate(optimizer, epoch, args, batch=i,
nBatch=len(train_loader), method=args.lr_type)
if running_lr is None:
running_lr = lr
### Measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
### Compute output
output = model(input_var, progress)
loss = criterion(output, target_var)
### Add group lasso loss
if args.group_lasso_lambda > 0:
lasso_loss = 0
for m in learned_module_list:
lasso_loss = lasso_loss + m.lasso_loss
loss = loss + args.group_lasso_lambda * lasso_loss
### Measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
### Compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
### Measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f}\t' # ({batch_time.avg:.3f}) '
'Data {data_time.val:.3f}\t' # ({data_time.avg:.3f}) '
'Loss {loss.val:.4f}\t' # ({loss.avg:.4f}) '
'Prec@1 {top1.val:.3f}\t' # ({top1.avg:.3f}) '
'Prec@5 {top5.val:.3f}\t' # ({top5.avg:.3f})'
'lr {lr: .4f}'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5, lr=lr))
return 100. - top1.avg, 100. - top5.avg, losses.avg, running_lr
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
### Switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
### Compute output
output = model(input_var)
loss = criterion(output, target_var)
### Measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
### Measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return 100. - top1.avg, 100. - top5.avg
def load_checkpoint(args):
model_dir = os.path.join(args.savedir, 'save_models')
latest_filename = os.path.join(model_dir, 'latest.txt')
if os.path.exists(latest_filename):
with open(latest_filename, 'r') as fin:
model_filename = fin.readlines()[0]
else:
return None
print("=> loading checkpoint '{}'".format(model_filename))
state = torch.load(model_filename)
print("=> loaded checkpoint '{}'".format(model_filename))
return state
def save_checkpoint(state, args, is_best, filename, result):
print(args)
result_filename = os.path.join(args.savedir, args.filename)
model_dir = os.path.join(args.savedir, 'save_models')
model_filename = os.path.join(model_dir, filename)
latest_filename = os.path.join(model_dir, 'latest.txt')
best_filename = os.path.join(model_dir, 'model_best.pth.tar')
os.makedirs(args.savedir, exist_ok=True)
os.makedirs(model_dir, exist_ok=True)
print("=> saving checkpoint '{}'".format(model_filename))
with open(result_filename, 'a') as fout:
fout.write(result)
torch.save(state, model_filename)
with open(latest_filename, 'w') as fout:
fout.write(model_filename)
if args.no_save_model:
shutil.move(model_filename, best_filename)
elif is_best:
shutil.copyfile(model_filename, best_filename)
print("=> saved checkpoint '{}'".format(model_filename))
return
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args, batch=None,
nBatch=None, method='cosine'):
if method == 'cosine':
T_total = args.epochs * nBatch
T_cur = (epoch % args.epochs) * nBatch + batch
lr = 0.5 * args.lr * (1 + math.cos(math.pi * T_cur / T_total))
elif method == 'multistep':
if args.data in ['cifar10', 'cifar100']:
lr, decay_rate = args.lr, 0.1
if epoch >= args.epochs * 0.75:
lr *= decay_rate ** 2
elif epoch >= args.epochs * 0.5:
lr *= decay_rate
else:
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 39.305785 | 95 | 0.587731 |
09a3b7aed9428f560b9c0e8a358c4809a490cffc | 9,220 | py | Python | tests/components/energy/test_websocket_api.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 7 | 2019-08-15T13:36:58.000Z | 2020-03-18T10:46:29.000Z | tests/components/energy/test_websocket_api.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 87 | 2020-07-06T22:22:54.000Z | 2022-03-31T06:01:46.000Z | tests/components/energy/test_websocket_api.py | winning1120xx/home-assistant | 53d4c0ce2d374b5e97bbdc37742656c27adf8eea | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """Test the Energy websocket API."""
from unittest.mock import AsyncMock, Mock
import pytest
from homeassistant.components.energy import data, is_configured
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, flush_store, mock_platform
@pytest.fixture(autouse=True)
async def setup_integration(hass):
"""Set up the integration."""
assert await async_setup_component(
hass, "energy", {"recorder": {"db_url": "sqlite://"}}
)
@pytest.fixture
def mock_energy_platform(hass):
"""Mock an energy platform."""
hass.config.components.add("some_domain")
mock_platform(
hass,
"some_domain.energy",
Mock(
async_get_solar_forecast=AsyncMock(
return_value={
"wh_hours": {
"2021-06-27T13:00:00+00:00": 12,
"2021-06-27T14:00:00+00:00": 8,
}
}
)
),
)
async def test_get_preferences_no_data(hass, hass_ws_client) -> None:
"""Test we get error if no preferences set."""
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "energy/get_prefs"})
msg = await client.receive_json()
assert msg["id"] == 5
assert not msg["success"]
assert msg["error"] == {"code": "not_found", "message": "No prefs"}
async def test_get_preferences_default(hass, hass_ws_client, hass_storage) -> None:
"""Test we get preferences."""
assert not await is_configured(hass)
manager = await data.async_get_manager(hass)
manager.data = data.EnergyManager.default_preferences()
client = await hass_ws_client(hass)
assert not await is_configured(hass)
await client.send_json({"id": 5, "type": "energy/get_prefs"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["success"]
assert msg["result"] == data.EnergyManager.default_preferences()
async def test_save_preferences(
hass, hass_ws_client, hass_storage, mock_energy_platform
) -> None:
"""Test we can save preferences."""
client = await hass_ws_client(hass)
# Test saving default prefs is also valid.
default_prefs = data.EnergyManager.default_preferences()
await client.send_json({"id": 5, "type": "energy/save_prefs", **default_prefs})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["success"]
assert msg["result"] == default_prefs
new_prefs = {
"energy_sources": [
{
"type": "grid",
"flow_from": [
{
"stat_energy_from": "sensor.heat_pump_meter",
"stat_cost": "heat_pump_kwh_cost",
"entity_energy_from": None,
"entity_energy_price": None,
"number_energy_price": None,
},
{
"stat_energy_from": "sensor.heat_pump_meter_2",
"stat_cost": None,
"entity_energy_from": "sensor.heat_pump_meter_2",
"entity_energy_price": None,
"number_energy_price": 0.20,
},
],
"flow_to": [
{
"stat_energy_to": "sensor.return_to_grid_peak",
"stat_compensation": None,
"entity_energy_to": None,
"entity_energy_price": None,
"number_energy_price": None,
},
{
"stat_energy_to": "sensor.return_to_grid_offpeak",
"stat_compensation": None,
"entity_energy_to": "sensor.return_to_grid_offpeak",
"entity_energy_price": None,
"number_energy_price": 0.20,
},
],
"cost_adjustment_day": 1.2,
},
{
"type": "solar",
"stat_energy_from": "my_solar_production",
"config_entry_solar_forecast": ["predicted_config_entry"],
},
{
"type": "battery",
"stat_energy_from": "my_battery_draining",
"stat_energy_to": "my_battery_charging",
},
],
"device_consumption": [{"stat_consumption": "some_device_usage"}],
}
await client.send_json({"id": 6, "type": "energy/save_prefs", **new_prefs})
msg = await client.receive_json()
assert msg["id"] == 6
assert msg["success"]
assert msg["result"] == new_prefs
assert data.STORAGE_KEY not in hass_storage, "expected not to be written yet"
await flush_store((await data.async_get_manager(hass))._store)
assert hass_storage[data.STORAGE_KEY]["data"] == new_prefs
assert await is_configured(hass)
# Verify info reflects data.
await client.send_json({"id": 7, "type": "energy/info"})
msg = await client.receive_json()
assert msg["id"] == 7
assert msg["success"]
assert msg["result"] == {
"cost_sensors": {
"sensor.heat_pump_meter_2": "sensor.heat_pump_meter_2_cost",
"sensor.return_to_grid_offpeak": "sensor.return_to_grid_offpeak_compensation",
},
"solar_forecast_domains": ["some_domain"],
}
# Prefs with limited options
new_prefs_2 = {
"energy_sources": [
{
"type": "grid",
"flow_from": [
{
"stat_energy_from": "sensor.heat_pump_meter",
"stat_cost": None,
"entity_energy_from": None,
"entity_energy_price": None,
"number_energy_price": None,
}
],
"flow_to": [],
"cost_adjustment_day": 1.2,
},
{
"type": "solar",
"stat_energy_from": "my_solar_production",
"config_entry_solar_forecast": None,
},
],
}
await client.send_json({"id": 8, "type": "energy/save_prefs", **new_prefs_2})
msg = await client.receive_json()
assert msg["id"] == 8
assert msg["success"]
assert msg["result"] == {**new_prefs, **new_prefs_2}
async def test_handle_duplicate_from_stat(hass, hass_ws_client) -> None:
"""Test we handle duplicate from stats."""
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 5,
"type": "energy/save_prefs",
"energy_sources": [
{
"type": "grid",
"flow_from": [
{
"stat_energy_from": "sensor.heat_pump_meter",
"stat_cost": None,
"entity_energy_from": None,
"entity_energy_price": None,
"number_energy_price": None,
},
{
"stat_energy_from": "sensor.heat_pump_meter",
"stat_cost": None,
"entity_energy_from": None,
"entity_energy_price": None,
"number_energy_price": None,
},
],
"flow_to": [],
"cost_adjustment_day": 0,
},
],
}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert not msg["success"]
assert msg["error"]["code"] == "invalid_format"
async def test_validate(hass, hass_ws_client) -> None:
"""Test we can validate the preferences."""
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "energy/validate"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["success"]
assert msg["result"] == {
"energy_sources": [],
"device_consumption": [],
}
async def test_get_solar_forecast(hass, hass_ws_client, mock_energy_platform) -> None:
"""Test we get preferences."""
entry = MockConfigEntry(domain="some_domain")
entry.add_to_hass(hass)
manager = await data.async_get_manager(hass)
manager.data = data.EnergyManager.default_preferences()
manager.data["energy_sources"].append(
{
"type": "solar",
"stat_energy_from": "my_solar_production",
"config_entry_solar_forecast": [entry.entry_id],
}
)
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "energy/solar_forecast"})
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["success"]
assert msg["result"] == {
entry.entry_id: {
"wh_hours": {
"2021-06-27T13:00:00+00:00": 12,
"2021-06-27T14:00:00+00:00": 8,
}
}
}
| 31.575342 | 90 | 0.526139 |
3d87fa55617200f65337dab0221e0c5dd72d9f6d | 1,952 | py | Python | var/spack/repos/builtin/packages/folly/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3 | 2021-09-29T02:14:40.000Z | 2022-01-27T20:50:36.000Z | var/spack/repos/builtin/packages/folly/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-01-08T08:41:11.000Z | 2022-03-14T19:28:07.000Z | var/spack/repos/builtin/packages/folly/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Folly(CMakePackage):
"""Folly (acronymed loosely after Facebook Open Source Library) is a
library of C++11 components designed with practicality and efficiency
in mind.
Folly contains a variety of core library components used extensively at
Facebook. In particular, it's often a dependency of Facebook's other open
source C++ efforts and place where those projects can share code.
"""
homepage = "https://github.com/facebook/folly"
url = "https://github.com/facebook/folly/releases/download/v2021.05.24.00/folly-v2021.05.24.00.tar.gz"
version('2021.05.24.00', sha256='9d308adefe4670637f5c7d96309b3b394ac3fa129bc954f5dfbdd8b741c02aad')
# CMakePackage Dependency
depends_on('pkgconfig', type='build')
# folly requires gcc 4.9+ and a version of boost compiled with >= C++14
# TODO: Specify the boost components
variant('cxxstd', default='14', values=('14', '17'), multi=False, description='Use the specified C++ standard when building.')
depends_on('boost+context+container cxxstd=14', when='cxxstd=14')
depends_on('boost+context+container cxxstd=17', when='cxxstd=17')
# required dependencies
depends_on('gflags')
depends_on('glog')
depends_on('double-conversion')
depends_on('libevent')
depends_on('fmt')
# optional dependencies
variant('libdwarf', default=False, description="Optional Dependency")
variant('elfutils', default=False, description="Optional Dependency")
variant('libunwind', default=False, description="Optional Dependency")
depends_on('libdwarf', when='+libdwarf')
depends_on('elfutils', when='+elfutils')
depends_on('libunwind', when='+libunwind')
configure_directory = 'folly'
| 39.836735 | 130 | 0.724898 |
4ae4aa56e63181f6bed66154b5e62f3507c0a7eb | 391,250 | py | Python | nbdev_scipy/_modidx.py | fastai/nbdev-index | 6fb8e36160d88b71c6e1f86562f741274558c74f | [
"Apache-2.0"
] | 2 | 2020-11-12T03:47:24.000Z | 2021-01-17T01:02:10.000Z | nbdev_scipy/_modidx.py | fastai/nbdev-index | 6fb8e36160d88b71c6e1f86562f741274558c74f | [
"Apache-2.0"
] | 28 | 2021-02-15T08:40:38.000Z | 2022-03-20T03:07:28.000Z | nbdev_scipy/_modidx.py | fastai/nbdev-index | 6fb8e36160d88b71c6e1f86562f741274558c74f | [
"Apache-2.0"
] | 2 | 2020-12-03T16:50:55.000Z | 2021-02-19T05:47:08.000Z | # Autogenerated by get_module_idx.py
d = { 'syms': { 'scipy': { 'scipy.cluster': 'https://docs.scipy.org/doc/scipy/reference/cluster.html#module-scipy.cluster',
'scipy.constants': 'https://docs.scipy.org/doc/scipy/reference/constants.html#module-scipy.constants',
'scipy.fft': 'https://docs.scipy.org/doc/scipy/reference/fft.html#module-scipy.fft',
'scipy.fftpack': 'https://docs.scipy.org/doc/scipy/reference/fftpack.html#module-scipy.fftpack',
'scipy.integrate': 'https://docs.scipy.org/doc/scipy/reference/integrate.html#module-scipy.integrate',
'scipy.interpolate': 'https://docs.scipy.org/doc/scipy/reference/interpolate.html#module-scipy.interpolate',
'scipy.io': 'https://docs.scipy.org/doc/scipy/reference/io.html#module-scipy.io',
'scipy.linalg': 'https://docs.scipy.org/doc/scipy/reference/linalg.html#module-scipy.linalg',
'scipy.misc': 'https://docs.scipy.org/doc/scipy/reference/misc.html#module-scipy.misc',
'scipy.ndimage': 'https://docs.scipy.org/doc/scipy/reference/ndimage.html#module-scipy.ndimage',
'scipy.odr': 'https://docs.scipy.org/doc/scipy/reference/odr.html#module-scipy.odr',
'scipy.optimize': 'https://docs.scipy.org/doc/scipy/reference/optimize.html#module-scipy.optimize',
'scipy.signal': 'https://docs.scipy.org/doc/scipy/reference/signal.html#module-scipy.signal',
'scipy.sparse': 'https://docs.scipy.org/doc/scipy/reference/sparse.html#module-scipy.sparse',
'scipy.spatial': 'https://docs.scipy.org/doc/scipy/reference/spatial.html#module-scipy.spatial',
'scipy.special': 'https://docs.scipy.org/doc/scipy/reference/special.html#module-scipy.special',
'scipy.stats': 'https://docs.scipy.org/doc/scipy/reference/stats.html#module-scipy.stats',
'scipy.LowLevelCallable': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.html#scipy.LowLevelCallable',
'scipy.LowLevelCallable.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.__getitem__.html#scipy.LowLevelCallable.__getitem__',
'scipy.LowLevelCallable.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.__len__.html#scipy.LowLevelCallable.__len__',
'scipy.LowLevelCallable.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.__mul__.html#scipy.LowLevelCallable.__mul__',
'scipy.LowLevelCallable.count': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.count.html#scipy.LowLevelCallable.count',
'scipy.LowLevelCallable.from_cython': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.from_cython.html#scipy.LowLevelCallable.from_cython',
'scipy.LowLevelCallable.function': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.function.html#scipy.LowLevelCallable.function',
'scipy.LowLevelCallable.index': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.index.html#scipy.LowLevelCallable.index',
'scipy.LowLevelCallable.signature': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.signature.html#scipy.LowLevelCallable.signature',
'scipy.LowLevelCallable.user_data': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.LowLevelCallable.user_data.html#scipy.LowLevelCallable.user_data'},
'scipy.cluster': { 'scipy.cluster.hierarchy': 'https://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html#module-scipy.cluster.hierarchy',
'scipy.cluster.vq': 'https://docs.scipy.org/doc/scipy/reference/cluster.vq.html#module-scipy.cluster.vq'},
'scipy.fftpack': { 'scipy.fftpack.convolve': 'https://docs.scipy.org/doc/scipy/reference/fftpack.html#module-scipy.fftpack.convolve',
'scipy.fftpack.cc_diff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.cc_diff.html#scipy.fftpack.cc_diff',
'scipy.fftpack.cs_diff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.cs_diff.html#scipy.fftpack.cs_diff',
'scipy.fftpack.dct': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.dct.html#scipy.fftpack.dct',
'scipy.fftpack.dctn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.dctn.html#scipy.fftpack.dctn',
'scipy.fftpack.diff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.diff.html#scipy.fftpack.diff',
'scipy.fftpack.dst': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.dst.html#scipy.fftpack.dst',
'scipy.fftpack.dstn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.dstn.html#scipy.fftpack.dstn',
'scipy.fftpack.fft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.fft.html#scipy.fftpack.fft',
'scipy.fftpack.fft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.fft2.html#scipy.fftpack.fft2',
'scipy.fftpack.fftfreq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.fftfreq.html#scipy.fftpack.fftfreq',
'scipy.fftpack.fftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.fftn.html#scipy.fftpack.fftn',
'scipy.fftpack.fftshift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.fftshift.html#scipy.fftpack.fftshift',
'scipy.fftpack.hilbert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.hilbert.html#scipy.fftpack.hilbert',
'scipy.fftpack.idct': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.idct.html#scipy.fftpack.idct',
'scipy.fftpack.idctn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.idctn.html#scipy.fftpack.idctn',
'scipy.fftpack.idst': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.idst.html#scipy.fftpack.idst',
'scipy.fftpack.idstn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.idstn.html#scipy.fftpack.idstn',
'scipy.fftpack.ifft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.ifft.html#scipy.fftpack.ifft',
'scipy.fftpack.ifft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.ifft2.html#scipy.fftpack.ifft2',
'scipy.fftpack.ifftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.ifftn.html#scipy.fftpack.ifftn',
'scipy.fftpack.ifftshift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.ifftshift.html#scipy.fftpack.ifftshift',
'scipy.fftpack.ihilbert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.ihilbert.html#scipy.fftpack.ihilbert',
'scipy.fftpack.irfft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.irfft.html#scipy.fftpack.irfft',
'scipy.fftpack.itilbert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.itilbert.html#scipy.fftpack.itilbert',
'scipy.fftpack.next_fast_len': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.next_fast_len.html#scipy.fftpack.next_fast_len',
'scipy.fftpack.rfft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.rfft.html#scipy.fftpack.rfft',
'scipy.fftpack.rfftfreq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.rfftfreq.html#scipy.fftpack.rfftfreq',
'scipy.fftpack.sc_diff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.sc_diff.html#scipy.fftpack.sc_diff',
'scipy.fftpack.shift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.shift.html#scipy.fftpack.shift',
'scipy.fftpack.ss_diff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.ss_diff.html#scipy.fftpack.ss_diff',
'scipy.fftpack.tilbert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.tilbert.html#scipy.fftpack.tilbert'},
'scipy.io': { 'scipy.io.arff': 'https://docs.scipy.org/doc/scipy/reference/io.html#module-scipy.io.arff',
'scipy.io.wavfile': 'https://docs.scipy.org/doc/scipy/reference/io.html#module-scipy.io.wavfile',
'scipy.io.FortranFile': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranFile.html#scipy.io.FortranFile',
'scipy.io.netcdf_file': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.html#scipy.io.netcdf_file',
'scipy.io.netcdf_variable': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.html#scipy.io.netcdf_variable',
'scipy.io.FortranEOFError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranEOFError.html#scipy.io.FortranEOFError.with_traceback',
'scipy.io.FortranFile.close': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranFile.close.html#scipy.io.FortranFile.close',
'scipy.io.FortranFile.read_ints': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranFile.read_ints.html#scipy.io.FortranFile.read_ints',
'scipy.io.FortranFile.read_reals': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranFile.read_reals.html#scipy.io.FortranFile.read_reals',
'scipy.io.FortranFile.read_record': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranFile.read_record.html#scipy.io.FortranFile.read_record',
'scipy.io.FortranFile.write_record': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranFile.write_record.html#scipy.io.FortranFile.write_record',
'scipy.io.FortranFormattingError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.FortranFormattingError.html#scipy.io.FortranFormattingError.with_traceback',
'scipy.io.netcdf_file.close': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.close.html#scipy.io.netcdf_file.close',
'scipy.io.netcdf_file.createDimension': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.createDimension.html#scipy.io.netcdf_file.createDimension',
'scipy.io.netcdf_file.createVariable': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.createVariable.html#scipy.io.netcdf_file.createVariable',
'scipy.io.netcdf_file.flush': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.flush.html#scipy.io.netcdf_file.flush',
'scipy.io.netcdf_file.sync': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_file.sync.html#scipy.io.netcdf_file.sync',
'scipy.io.netcdf_variable.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.__getitem__.html#scipy.io.netcdf_variable.__getitem__',
'scipy.io.netcdf_variable.assignValue': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.assignValue.html#scipy.io.netcdf_variable.assignValue',
'scipy.io.netcdf_variable.getValue': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.getValue.html#scipy.io.netcdf_variable.getValue',
'scipy.io.netcdf_variable.isrec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.isrec.html#scipy.io.netcdf_variable.isrec',
'scipy.io.netcdf_variable.itemsize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.itemsize.html#scipy.io.netcdf_variable.itemsize',
'scipy.io.netcdf_variable.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.shape.html#scipy.io.netcdf_variable.shape',
'scipy.io.netcdf_variable.typecode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.netcdf_variable.typecode.html#scipy.io.netcdf_variable.typecode',
'scipy.io.hb_read': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.hb_read.html#scipy.io.hb_read',
'scipy.io.hb_write': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.hb_write.html#scipy.io.hb_write',
'scipy.io.loadmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.loadmat.html#scipy.io.loadmat',
'scipy.io.mminfo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.mminfo.html#scipy.io.mminfo',
'scipy.io.mmread': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.mmread.html#scipy.io.mmread',
'scipy.io.mmwrite': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.mmwrite.html#scipy.io.mmwrite',
'scipy.io.readsav': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.readsav.html#scipy.io.readsav',
'scipy.io.savemat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.savemat.html#scipy.io.savemat',
'scipy.io.whosmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.whosmat.html#scipy.io.whosmat'},
'scipy.linalg': { 'scipy.linalg.blas': 'https://docs.scipy.org/doc/scipy/reference/linalg.blas.html#module-scipy.linalg.blas',
'scipy.linalg.cython_blas': 'https://docs.scipy.org/doc/scipy/reference/linalg.cython_blas.html#module-scipy.linalg.cython_blas',
'scipy.linalg.cython_lapack': 'https://docs.scipy.org/doc/scipy/reference/linalg.cython_lapack.html#module-scipy.linalg.cython_lapack',
'scipy.linalg.interpolative': 'https://docs.scipy.org/doc/scipy/reference/linalg.interpolative.html#module-scipy.linalg.interpolative',
'scipy.linalg.lapack': 'https://docs.scipy.org/doc/scipy/reference/linalg.lapack.html#module-scipy.linalg.lapack',
'scipy.linalg.LinAlgError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.LinAlgError.html#scipy.linalg.LinAlgError.with_traceback',
'scipy.linalg.LinAlgWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.LinAlgWarning.html#scipy.linalg.LinAlgWarning.with_traceback',
'scipy.linalg.block_diag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.block_diag.html#scipy.linalg.block_diag',
'scipy.linalg.cdf2rdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cdf2rdf.html#scipy.linalg.cdf2rdf',
'scipy.linalg.cho_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cho_factor.html#scipy.linalg.cho_factor',
'scipy.linalg.cho_solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cho_solve.html#scipy.linalg.cho_solve',
'scipy.linalg.cho_solve_banded': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cho_solve_banded.html#scipy.linalg.cho_solve_banded',
'scipy.linalg.cholesky': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cholesky.html#scipy.linalg.cholesky',
'scipy.linalg.cholesky_banded': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cholesky_banded.html#scipy.linalg.cholesky_banded',
'scipy.linalg.circulant': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.circulant.html#scipy.linalg.circulant',
'scipy.linalg.clarkson_woodruff_transform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.clarkson_woodruff_transform.html#scipy.linalg.clarkson_woodruff_transform',
'scipy.linalg.companion': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.companion.html#scipy.linalg.companion',
'scipy.linalg.convolution_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.convolution_matrix.html#scipy.linalg.convolution_matrix',
'scipy.linalg.coshm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.coshm.html#scipy.linalg.coshm',
'scipy.linalg.cosm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cosm.html#scipy.linalg.cosm',
'scipy.linalg.cossin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.cossin.html#scipy.linalg.cossin',
'scipy.linalg.det': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.det.html#scipy.linalg.det',
'scipy.linalg.dft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.dft.html#scipy.linalg.dft',
'scipy.linalg.diagsvd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.diagsvd.html#scipy.linalg.diagsvd',
'scipy.linalg.eig': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig',
'scipy.linalg.eig_banded': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eig_banded.html#scipy.linalg.eig_banded',
'scipy.linalg.eigh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigh.html#scipy.linalg.eigh',
'scipy.linalg.eigh_tridiagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigh_tridiagonal.html#scipy.linalg.eigh_tridiagonal',
'scipy.linalg.eigvals': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigvals.html#scipy.linalg.eigvals',
'scipy.linalg.eigvals_banded': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigvals_banded.html#scipy.linalg.eigvals_banded',
'scipy.linalg.eigvalsh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigvalsh.html#scipy.linalg.eigvalsh',
'scipy.linalg.eigvalsh_tridiagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.eigvalsh_tridiagonal.html#scipy.linalg.eigvalsh_tridiagonal',
'scipy.linalg.expm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.expm.html#scipy.linalg.expm',
'scipy.linalg.expm_cond': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.expm_cond.html#scipy.linalg.expm_cond',
'scipy.linalg.expm_frechet': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.expm_frechet.html#scipy.linalg.expm_frechet',
'scipy.linalg.fiedler': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.fiedler.html#scipy.linalg.fiedler',
'scipy.linalg.fiedler_companion': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.fiedler_companion.html#scipy.linalg.fiedler_companion',
'scipy.linalg.find_best_blas_type': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.find_best_blas_type.html#scipy.linalg.find_best_blas_type',
'scipy.linalg.fractional_matrix_power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.fractional_matrix_power.html#scipy.linalg.fractional_matrix_power',
'scipy.linalg.funm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.funm.html#scipy.linalg.funm',
'scipy.linalg.get_blas_funcs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.get_blas_funcs.html#scipy.linalg.get_blas_funcs',
'scipy.linalg.get_lapack_funcs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.get_lapack_funcs.html#scipy.linalg.get_lapack_funcs',
'scipy.linalg.hadamard': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.hadamard.html#scipy.linalg.hadamard',
'scipy.linalg.hankel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.hankel.html#scipy.linalg.hankel',
'scipy.linalg.helmert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.helmert.html#scipy.linalg.helmert',
'scipy.linalg.hessenberg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.hessenberg.html#scipy.linalg.hessenberg',
'scipy.linalg.hilbert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.hilbert.html#scipy.linalg.hilbert',
'scipy.linalg.inv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.inv.html#scipy.linalg.inv',
'scipy.linalg.invhilbert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.invhilbert.html#scipy.linalg.invhilbert',
'scipy.linalg.invpascal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.invpascal.html#scipy.linalg.invpascal',
'scipy.linalg.khatri_rao': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.khatri_rao.html#scipy.linalg.khatri_rao',
'scipy.linalg.kron': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.kron.html#scipy.linalg.kron',
'scipy.linalg.ldl': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.ldl.html#scipy.linalg.ldl',
'scipy.linalg.leslie': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.leslie.html#scipy.linalg.leslie',
'scipy.linalg.logm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.logm.html#scipy.linalg.logm',
'scipy.linalg.lstsq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lstsq.html#scipy.linalg.lstsq',
'scipy.linalg.lu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lu.html#scipy.linalg.lu',
'scipy.linalg.lu_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lu_factor.html#scipy.linalg.lu_factor',
'scipy.linalg.lu_solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lu_solve.html#scipy.linalg.lu_solve',
'scipy.linalg.matrix_balance': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.matrix_balance.html#scipy.linalg.matrix_balance',
'scipy.linalg.norm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.norm.html#scipy.linalg.norm',
'scipy.linalg.null_space': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.null_space.html#scipy.linalg.null_space',
'scipy.linalg.ordqz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.ordqz.html#scipy.linalg.ordqz',
'scipy.linalg.orth': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.orth.html#scipy.linalg.orth',
'scipy.linalg.orthogonal_procrustes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.orthogonal_procrustes.html#scipy.linalg.orthogonal_procrustes',
'scipy.linalg.pascal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.pascal.html#scipy.linalg.pascal',
'scipy.linalg.pinv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.pinv.html#scipy.linalg.pinv',
'scipy.linalg.pinv2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.pinv2.html#scipy.linalg.pinv2',
'scipy.linalg.pinvh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.pinvh.html#scipy.linalg.pinvh',
'scipy.linalg.polar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.polar.html#scipy.linalg.polar',
'scipy.linalg.qr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.qr.html#scipy.linalg.qr',
'scipy.linalg.qr_delete': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.qr_delete.html#scipy.linalg.qr_delete',
'scipy.linalg.qr_insert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.qr_insert.html#scipy.linalg.qr_insert',
'scipy.linalg.qr_multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.qr_multiply.html#scipy.linalg.qr_multiply',
'scipy.linalg.qr_update': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.qr_update.html#scipy.linalg.qr_update',
'scipy.linalg.qz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.qz.html#scipy.linalg.qz',
'scipy.linalg.rq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.rq.html#scipy.linalg.rq',
'scipy.linalg.rsf2csf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.rsf2csf.html#scipy.linalg.rsf2csf',
'scipy.linalg.schur': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.schur.html#scipy.linalg.schur',
'scipy.linalg.signm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.signm.html#scipy.linalg.signm',
'scipy.linalg.sinhm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.sinhm.html#scipy.linalg.sinhm',
'scipy.linalg.sinm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.sinm.html#scipy.linalg.sinm',
'scipy.linalg.solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve.html#scipy.linalg.solve',
'scipy.linalg.solve_banded': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_banded.html#scipy.linalg.solve_banded',
'scipy.linalg.solve_circulant': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_circulant.html#scipy.linalg.solve_circulant',
'scipy.linalg.solve_continuous_are': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_continuous_are.html#scipy.linalg.solve_continuous_are',
'scipy.linalg.solve_continuous_lyapunov': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_continuous_lyapunov.html#scipy.linalg.solve_continuous_lyapunov',
'scipy.linalg.solve_discrete_are': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_discrete_are.html#scipy.linalg.solve_discrete_are',
'scipy.linalg.solve_discrete_lyapunov': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_discrete_lyapunov.html#scipy.linalg.solve_discrete_lyapunov',
'scipy.linalg.solve_sylvester': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_sylvester.html#scipy.linalg.solve_sylvester',
'scipy.linalg.solve_toeplitz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_toeplitz.html#scipy.linalg.solve_toeplitz',
'scipy.linalg.solve_triangular': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solve_triangular.html#scipy.linalg.solve_triangular',
'scipy.linalg.solveh_banded': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html#scipy.linalg.solveh_banded',
'scipy.linalg.sqrtm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.sqrtm.html#scipy.linalg.sqrtm',
'scipy.linalg.subspace_angles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.subspace_angles.html#scipy.linalg.subspace_angles',
'scipy.linalg.svd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.svd.html#scipy.linalg.svd',
'scipy.linalg.svdvals': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.svdvals.html#scipy.linalg.svdvals',
'scipy.linalg.tanhm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.tanhm.html#scipy.linalg.tanhm',
'scipy.linalg.tanm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.tanm.html#scipy.linalg.tanm',
'scipy.linalg.toeplitz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.toeplitz.html#scipy.linalg.toeplitz',
'scipy.linalg.tri': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.tri.html#scipy.linalg.tri',
'scipy.linalg.tril': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.tril.html#scipy.linalg.tril',
'scipy.linalg.triu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.triu.html#scipy.linalg.triu'},
'scipy.optimize': { 'scipy.optimize.cython_optimize': 'https://docs.scipy.org/doc/scipy/reference/optimize.cython_optimize.html#module-scipy.optimize.cython_optimize',
'scipy.optimize.nonlin': 'https://docs.scipy.org/doc/scipy/reference/optimize.nonlin.html#module-scipy.optimize.nonlin',
'scipy.optimize.BFGS': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.BFGS.html#scipy.optimize.BFGS',
'scipy.optimize.Bounds': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.Bounds.html#scipy.optimize.Bounds',
'scipy.optimize.HessianUpdateStrategy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.HessianUpdateStrategy.html#scipy.optimize.HessianUpdateStrategy',
'scipy.optimize.LbfgsInvHessProduct': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.html#scipy.optimize.LbfgsInvHessProduct',
'scipy.optimize.LinearConstraint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LinearConstraint.html#scipy.optimize.LinearConstraint',
'scipy.optimize.NonlinearConstraint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.NonlinearConstraint.html#scipy.optimize.NonlinearConstraint',
'scipy.optimize.OptimizeResult': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html#scipy.optimize.OptimizeResult',
'scipy.optimize.RootResults': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.RootResults.html#scipy.optimize.RootResults',
'scipy.optimize.SR1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.SR1.html#scipy.optimize.SR1',
'scipy.optimize.BFGS.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.BFGS.dot.html#scipy.optimize.BFGS.dot',
'scipy.optimize.BFGS.get_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.BFGS.get_matrix.html#scipy.optimize.BFGS.get_matrix',
'scipy.optimize.BFGS.initialize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.BFGS.initialize.html#scipy.optimize.BFGS.initialize',
'scipy.optimize.BFGS.update': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.BFGS.update.html#scipy.optimize.BFGS.update',
'scipy.optimize.HessianUpdateStrategy.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.HessianUpdateStrategy.dot.html#scipy.optimize.HessianUpdateStrategy.dot',
'scipy.optimize.HessianUpdateStrategy.get_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.HessianUpdateStrategy.get_matrix.html#scipy.optimize.HessianUpdateStrategy.get_matrix',
'scipy.optimize.HessianUpdateStrategy.initialize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.HessianUpdateStrategy.initialize.html#scipy.optimize.HessianUpdateStrategy.initialize',
'scipy.optimize.HessianUpdateStrategy.update': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.HessianUpdateStrategy.update.html#scipy.optimize.HessianUpdateStrategy.update',
'scipy.optimize.LbfgsInvHessProduct.H': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.H.html#scipy.optimize.LbfgsInvHessProduct.H',
'scipy.optimize.LbfgsInvHessProduct.T': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.T.html#scipy.optimize.LbfgsInvHessProduct.T',
'scipy.optimize.LbfgsInvHessProduct.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.__call__.html#scipy.optimize.LbfgsInvHessProduct.__call__',
'scipy.optimize.LbfgsInvHessProduct.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.__mul__.html#scipy.optimize.LbfgsInvHessProduct.__mul__',
'scipy.optimize.LbfgsInvHessProduct.adjoint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.adjoint.html#scipy.optimize.LbfgsInvHessProduct.adjoint',
'scipy.optimize.LbfgsInvHessProduct.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.dot.html#scipy.optimize.LbfgsInvHessProduct.dot',
'scipy.optimize.LbfgsInvHessProduct.matmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.matmat.html#scipy.optimize.LbfgsInvHessProduct.matmat',
'scipy.optimize.LbfgsInvHessProduct.matvec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.matvec.html#scipy.optimize.LbfgsInvHessProduct.matvec',
'scipy.optimize.LbfgsInvHessProduct.rmatmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.rmatmat.html#scipy.optimize.LbfgsInvHessProduct.rmatmat',
'scipy.optimize.LbfgsInvHessProduct.rmatvec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.rmatvec.html#scipy.optimize.LbfgsInvHessProduct.rmatvec',
'scipy.optimize.LbfgsInvHessProduct.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.todense.html#scipy.optimize.LbfgsInvHessProduct.todense',
'scipy.optimize.LbfgsInvHessProduct.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.LbfgsInvHessProduct.transpose.html#scipy.optimize.LbfgsInvHessProduct.transpose',
'scipy.optimize.OptimizeResult.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.__getitem__.html#scipy.optimize.OptimizeResult.__getitem__',
'scipy.optimize.OptimizeResult.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.__len__.html#scipy.optimize.OptimizeResult.__len__',
'scipy.optimize.OptimizeResult.clear': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.clear.html#scipy.optimize.OptimizeResult.clear',
'scipy.optimize.OptimizeResult.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.copy.html#scipy.optimize.OptimizeResult.copy',
'scipy.optimize.OptimizeResult.fromkeys': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.fromkeys.html#scipy.optimize.OptimizeResult.fromkeys',
'scipy.optimize.OptimizeResult.get': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.get.html#scipy.optimize.OptimizeResult.get',
'scipy.optimize.OptimizeResult.items': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.items.html#scipy.optimize.OptimizeResult.items',
'scipy.optimize.OptimizeResult.keys': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.keys.html#scipy.optimize.OptimizeResult.keys',
'scipy.optimize.OptimizeResult.pop': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.pop.html#scipy.optimize.OptimizeResult.pop',
'scipy.optimize.OptimizeResult.popitem': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.popitem.html#scipy.optimize.OptimizeResult.popitem',
'scipy.optimize.OptimizeResult.setdefault': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.setdefault.html#scipy.optimize.OptimizeResult.setdefault',
'scipy.optimize.OptimizeResult.update': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.update.html#scipy.optimize.OptimizeResult.update',
'scipy.optimize.OptimizeResult.values': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.values.html#scipy.optimize.OptimizeResult.values',
'scipy.optimize.OptimizeWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeWarning.html#scipy.optimize.OptimizeWarning.with_traceback',
'scipy.optimize.SR1.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.SR1.dot.html#scipy.optimize.SR1.dot',
'scipy.optimize.SR1.get_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.SR1.get_matrix.html#scipy.optimize.SR1.get_matrix',
'scipy.optimize.SR1.initialize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.SR1.initialize.html#scipy.optimize.SR1.initialize',
'scipy.optimize.SR1.update': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.SR1.update.html#scipy.optimize.SR1.update',
'scipy.optimize.anderson': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.anderson.html#scipy.optimize.anderson',
'scipy.optimize.approx_fprime': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.approx_fprime.html#scipy.optimize.approx_fprime',
'scipy.optimize.basinhopping': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.basinhopping.html#scipy.optimize.basinhopping',
'scipy.optimize.bisect': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.bisect.html#scipy.optimize.bisect',
'scipy.optimize.bracket': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.bracket.html#scipy.optimize.bracket',
'scipy.optimize.brent': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brent.html#scipy.optimize.brent',
'scipy.optimize.brenth': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brenth.html#scipy.optimize.brenth',
'scipy.optimize.brentq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brentq.html#scipy.optimize.brentq',
'scipy.optimize.broyden1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.broyden1.html#scipy.optimize.broyden1',
'scipy.optimize.broyden2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.broyden2.html#scipy.optimize.broyden2',
'scipy.optimize.brute': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.brute.html#scipy.optimize.brute',
'scipy.optimize.check_grad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.check_grad.html#scipy.optimize.check_grad',
'scipy.optimize.curve_fit': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html#scipy.optimize.curve_fit',
'scipy.optimize.diagbroyden': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.diagbroyden.html#scipy.optimize.diagbroyden',
'scipy.optimize.differential_evolution': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.differential_evolution.html#scipy.optimize.differential_evolution',
'scipy.optimize.dual_annealing': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.dual_annealing.html#scipy.optimize.dual_annealing',
'scipy.optimize.excitingmixing': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.excitingmixing.html#scipy.optimize.excitingmixing',
'scipy.optimize.fixed_point': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fixed_point.html#scipy.optimize.fixed_point',
'scipy.optimize.fmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.html#scipy.optimize.fmin',
'scipy.optimize.fmin_bfgs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_bfgs.html#scipy.optimize.fmin_bfgs',
'scipy.optimize.fmin_cg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_cg.html#scipy.optimize.fmin_cg',
'scipy.optimize.fmin_cobyla': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_cobyla.html#scipy.optimize.fmin_cobyla',
'scipy.optimize.fmin_l_bfgs_b': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html#scipy.optimize.fmin_l_bfgs_b',
'scipy.optimize.fmin_ncg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_ncg.html#scipy.optimize.fmin_ncg',
'scipy.optimize.fmin_powell': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_powell.html#scipy.optimize.fmin_powell',
'scipy.optimize.fmin_slsqp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_slsqp.html#scipy.optimize.fmin_slsqp',
'scipy.optimize.fmin_tnc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_tnc.html#scipy.optimize.fmin_tnc',
'scipy.optimize.fminbound': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fminbound.html#scipy.optimize.fminbound',
'scipy.optimize.fsolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fsolve.html#scipy.optimize.fsolve',
'scipy.optimize.golden': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.golden.html#scipy.optimize.golden',
'scipy.optimize.least_squares': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares',
'scipy.optimize.leastsq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.leastsq.html#scipy.optimize.leastsq',
'scipy.optimize.line_search': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.line_search.html#scipy.optimize.line_search',
'scipy.optimize.linear_sum_assignment': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html#scipy.optimize.linear_sum_assignment',
'scipy.optimize.linearmixing': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linearmixing.html#scipy.optimize.linearmixing',
'scipy.optimize.linprog': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html#scipy.optimize.linprog',
'scipy.optimize.linprog_verbose_callback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog_verbose_callback.html#scipy.optimize.linprog_verbose_callback',
'scipy.optimize.lsq_linear': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.lsq_linear.html#scipy.optimize.lsq_linear',
'scipy.optimize.minimize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize',
'scipy.optimize.minimize_scalar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize_scalar.html#scipy.optimize.minimize_scalar',
'scipy.optimize.newton': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.newton.html#scipy.optimize.newton',
'scipy.optimize.newton_krylov': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.newton_krylov.html#scipy.optimize.newton_krylov',
'scipy.optimize.nnls': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.nnls.html#scipy.optimize.nnls',
'scipy.optimize.ridder': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.ridder.html#scipy.optimize.ridder',
'scipy.optimize.root': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html#scipy.optimize.root',
'scipy.optimize.root_scalar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root_scalar.html#scipy.optimize.root_scalar',
'scipy.optimize.rosen': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen.html#scipy.optimize.rosen',
'scipy.optimize.rosen_der': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_der.html#scipy.optimize.rosen_der',
'scipy.optimize.rosen_hess': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess.html#scipy.optimize.rosen_hess',
'scipy.optimize.rosen_hess_prod': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.rosen_hess_prod.html#scipy.optimize.rosen_hess_prod',
'scipy.optimize.shgo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.shgo.html#scipy.optimize.shgo',
'scipy.optimize.show_options': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.show_options.html#scipy.optimize.show_options',
'scipy.optimize.toms748': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.toms748.html#scipy.optimize.toms748'},
'scipy.signal': { 'scipy.signal.windows': 'https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows',
'scipy.signal.StateSpace': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.html#scipy.signal.StateSpace',
'scipy.signal.TransferFunction': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.html#scipy.signal.TransferFunction',
'scipy.signal.ZerosPolesGain': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.html#scipy.signal.ZerosPolesGain',
'scipy.signal.dlti': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.html#scipy.signal.dlti',
'scipy.signal.lti': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.html#scipy.signal.lti',
'scipy.signal.BadCoefficients.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.BadCoefficients.html#scipy.signal.BadCoefficients.with_traceback',
'scipy.signal.StateSpace.A': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.A.html#scipy.signal.StateSpace.A',
'scipy.signal.StateSpace.B': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.B.html#scipy.signal.StateSpace.B',
'scipy.signal.StateSpace.C': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.C.html#scipy.signal.StateSpace.C',
'scipy.signal.StateSpace.D': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.D.html#scipy.signal.StateSpace.D',
'scipy.signal.StateSpace.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.__mul__.html#scipy.signal.StateSpace.__mul__',
'scipy.signal.StateSpace.dt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.dt.html#scipy.signal.StateSpace.dt',
'scipy.signal.StateSpace.poles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.poles.html#scipy.signal.StateSpace.poles',
'scipy.signal.StateSpace.to_ss': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.to_ss.html#scipy.signal.StateSpace.to_ss',
'scipy.signal.StateSpace.to_tf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.to_tf.html#scipy.signal.StateSpace.to_tf',
'scipy.signal.StateSpace.to_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.to_zpk.html#scipy.signal.StateSpace.to_zpk',
'scipy.signal.StateSpace.zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.StateSpace.zeros.html#scipy.signal.StateSpace.zeros',
'scipy.signal.TransferFunction.den': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.den.html#scipy.signal.TransferFunction.den',
'scipy.signal.TransferFunction.dt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.dt.html#scipy.signal.TransferFunction.dt',
'scipy.signal.TransferFunction.num': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.num.html#scipy.signal.TransferFunction.num',
'scipy.signal.TransferFunction.poles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.poles.html#scipy.signal.TransferFunction.poles',
'scipy.signal.TransferFunction.to_ss': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.to_ss.html#scipy.signal.TransferFunction.to_ss',
'scipy.signal.TransferFunction.to_tf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.to_tf.html#scipy.signal.TransferFunction.to_tf',
'scipy.signal.TransferFunction.to_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.to_zpk.html#scipy.signal.TransferFunction.to_zpk',
'scipy.signal.TransferFunction.zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.zeros.html#scipy.signal.TransferFunction.zeros',
'scipy.signal.ZerosPolesGain.dt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.dt.html#scipy.signal.ZerosPolesGain.dt',
'scipy.signal.ZerosPolesGain.gain': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.gain.html#scipy.signal.ZerosPolesGain.gain',
'scipy.signal.ZerosPolesGain.poles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.poles.html#scipy.signal.ZerosPolesGain.poles',
'scipy.signal.ZerosPolesGain.to_ss': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.to_ss.html#scipy.signal.ZerosPolesGain.to_ss',
'scipy.signal.ZerosPolesGain.to_tf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.to_tf.html#scipy.signal.ZerosPolesGain.to_tf',
'scipy.signal.ZerosPolesGain.to_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.to_zpk.html#scipy.signal.ZerosPolesGain.to_zpk',
'scipy.signal.ZerosPolesGain.zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ZerosPolesGain.zeros.html#scipy.signal.ZerosPolesGain.zeros',
'scipy.signal.dlti.bode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.bode.html#scipy.signal.dlti.bode',
'scipy.signal.dlti.dt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.dt.html#scipy.signal.dlti.dt',
'scipy.signal.dlti.freqresp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.freqresp.html#scipy.signal.dlti.freqresp',
'scipy.signal.dlti.impulse': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.impulse.html#scipy.signal.dlti.impulse',
'scipy.signal.dlti.output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.output.html#scipy.signal.dlti.output',
'scipy.signal.dlti.poles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.poles.html#scipy.signal.dlti.poles',
'scipy.signal.dlti.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.step.html#scipy.signal.dlti.step',
'scipy.signal.dlti.zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlti.zeros.html#scipy.signal.dlti.zeros',
'scipy.signal.lti.bode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.bode.html#scipy.signal.lti.bode',
'scipy.signal.lti.dt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.dt.html#scipy.signal.lti.dt',
'scipy.signal.lti.freqresp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.freqresp.html#scipy.signal.lti.freqresp',
'scipy.signal.lti.impulse': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.impulse.html#scipy.signal.lti.impulse',
'scipy.signal.lti.output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.output.html#scipy.signal.lti.output',
'scipy.signal.lti.poles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.poles.html#scipy.signal.lti.poles',
'scipy.signal.lti.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.step.html#scipy.signal.lti.step',
'scipy.signal.lti.to_discrete': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.to_discrete.html#scipy.signal.lti.to_discrete',
'scipy.signal.lti.zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lti.zeros.html#scipy.signal.lti.zeros',
'scipy.signal.abcd_normalize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.abcd_normalize.html#scipy.signal.abcd_normalize',
'scipy.signal.argrelextrema': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.argrelextrema.html#scipy.signal.argrelextrema',
'scipy.signal.argrelmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.argrelmax.html#scipy.signal.argrelmax',
'scipy.signal.argrelmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.argrelmin.html#scipy.signal.argrelmin',
'scipy.signal.band_stop_obj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.band_stop_obj.html#scipy.signal.band_stop_obj',
'scipy.signal.bessel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.bessel.html#scipy.signal.bessel',
'scipy.signal.besselap': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.besselap.html#scipy.signal.besselap',
'scipy.signal.bilinear': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.bilinear.html#scipy.signal.bilinear',
'scipy.signal.bilinear_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.bilinear_zpk.html#scipy.signal.bilinear_zpk',
'scipy.signal.bode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.bode.html#scipy.signal.bode',
'scipy.signal.bspline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.bspline.html#scipy.signal.bspline',
'scipy.signal.buttap': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.buttap.html#scipy.signal.buttap',
'scipy.signal.butter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html#scipy.signal.butter',
'scipy.signal.buttord': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.buttord.html#scipy.signal.buttord',
'scipy.signal.cascade': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cascade.html#scipy.signal.cascade',
'scipy.signal.cheb1ap': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheb1ap.html#scipy.signal.cheb1ap',
'scipy.signal.cheb1ord': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheb1ord.html#scipy.signal.cheb1ord',
'scipy.signal.cheb2ap': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheb2ap.html#scipy.signal.cheb2ap',
'scipy.signal.cheb2ord': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheb2ord.html#scipy.signal.cheb2ord',
'scipy.signal.cheby1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheby1.html#scipy.signal.cheby1',
'scipy.signal.cheby2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheby2.html#scipy.signal.cheby2',
'scipy.signal.check_COLA': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.check_COLA.html#scipy.signal.check_COLA',
'scipy.signal.check_NOLA': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.check_NOLA.html#scipy.signal.check_NOLA',
'scipy.signal.chirp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.chirp.html#scipy.signal.chirp',
'scipy.signal.choose_conv_method': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.choose_conv_method.html#scipy.signal.choose_conv_method',
'scipy.signal.cmplx_sort': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cmplx_sort.html#scipy.signal.cmplx_sort',
'scipy.signal.coherence': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.coherence.html#scipy.signal.coherence',
'scipy.signal.cont2discrete': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cont2discrete.html#scipy.signal.cont2discrete',
'scipy.signal.convolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve.html#scipy.signal.convolve',
'scipy.signal.convolve2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html#scipy.signal.convolve2d',
'scipy.signal.correlate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.correlate.html#scipy.signal.correlate',
'scipy.signal.correlate2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.correlate2d.html#scipy.signal.correlate2d',
'scipy.signal.csd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.csd.html#scipy.signal.csd',
'scipy.signal.cspline1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cspline1d.html#scipy.signal.cspline1d',
'scipy.signal.cspline1d_eval': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cspline1d_eval.html#scipy.signal.cspline1d_eval',
'scipy.signal.cspline2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cspline2d.html#scipy.signal.cspline2d',
'scipy.signal.cubic': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cubic.html#scipy.signal.cubic',
'scipy.signal.cwt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cwt.html#scipy.signal.cwt',
'scipy.signal.daub': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.daub.html#scipy.signal.daub',
'scipy.signal.dbode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dbode.html#scipy.signal.dbode',
'scipy.signal.decimate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.decimate.html#scipy.signal.decimate',
'scipy.signal.deconvolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.deconvolve.html#scipy.signal.deconvolve',
'scipy.signal.detrend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.detrend.html#scipy.signal.detrend',
'scipy.signal.dfreqresp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dfreqresp.html#scipy.signal.dfreqresp',
'scipy.signal.dimpulse': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dimpulse.html#scipy.signal.dimpulse',
'scipy.signal.dlsim': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dlsim.html#scipy.signal.dlsim',
'scipy.signal.dstep': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.dstep.html#scipy.signal.dstep',
'scipy.signal.ellip': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ellip.html#scipy.signal.ellip',
'scipy.signal.ellipap': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ellipap.html#scipy.signal.ellipap',
'scipy.signal.ellipord': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ellipord.html#scipy.signal.ellipord',
'scipy.signal.fftconvolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.fftconvolve.html#scipy.signal.fftconvolve',
'scipy.signal.filtfilt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html#scipy.signal.filtfilt',
'scipy.signal.find_peaks': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html#scipy.signal.find_peaks',
'scipy.signal.find_peaks_cwt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks_cwt.html#scipy.signal.find_peaks_cwt',
'scipy.signal.findfreqs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.findfreqs.html#scipy.signal.findfreqs',
'scipy.signal.firls': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firls.html#scipy.signal.firls',
'scipy.signal.firwin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firwin.html#scipy.signal.firwin',
'scipy.signal.firwin2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firwin2.html#scipy.signal.firwin2',
'scipy.signal.freqresp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.freqresp.html#scipy.signal.freqresp',
'scipy.signal.freqs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.freqs.html#scipy.signal.freqs',
'scipy.signal.freqs_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.freqs_zpk.html#scipy.signal.freqs_zpk',
'scipy.signal.freqz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.freqz.html#scipy.signal.freqz',
'scipy.signal.freqz_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.freqz_zpk.html#scipy.signal.freqz_zpk',
'scipy.signal.gauss_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.gauss_spline.html#scipy.signal.gauss_spline',
'scipy.signal.gausspulse': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.gausspulse.html#scipy.signal.gausspulse',
'scipy.signal.get_window': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.get_window.html#scipy.signal.get_window',
'scipy.signal.group_delay': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.group_delay.html#scipy.signal.group_delay',
'scipy.signal.hilbert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.hilbert.html#scipy.signal.hilbert',
'scipy.signal.hilbert2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.hilbert2.html#scipy.signal.hilbert2',
'scipy.signal.iirdesign': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirdesign.html#scipy.signal.iirdesign',
'scipy.signal.iirfilter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirfilter.html#scipy.signal.iirfilter',
'scipy.signal.iirnotch': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirnotch.html#scipy.signal.iirnotch',
'scipy.signal.iirpeak': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirpeak.html#scipy.signal.iirpeak',
'scipy.signal.impulse': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.impulse.html#scipy.signal.impulse',
'scipy.signal.impulse2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.impulse2.html#scipy.signal.impulse2',
'scipy.signal.invres': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.invres.html#scipy.signal.invres',
'scipy.signal.invresz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.invresz.html#scipy.signal.invresz',
'scipy.signal.istft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.istft.html#scipy.signal.istft',
'scipy.signal.kaiser_atten': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.kaiser_atten.html#scipy.signal.kaiser_atten',
'scipy.signal.kaiser_beta': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.kaiser_beta.html#scipy.signal.kaiser_beta',
'scipy.signal.kaiserord': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.kaiserord.html#scipy.signal.kaiserord',
'scipy.signal.lfilter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html#scipy.signal.lfilter',
'scipy.signal.lfilter_zi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter_zi.html#scipy.signal.lfilter_zi',
'scipy.signal.lfiltic': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfiltic.html#scipy.signal.lfiltic',
'scipy.signal.lombscargle': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lombscargle.html#scipy.signal.lombscargle',
'scipy.signal.lp2bp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2bp.html#scipy.signal.lp2bp',
'scipy.signal.lp2bp_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2bp_zpk.html#scipy.signal.lp2bp_zpk',
'scipy.signal.lp2bs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2bs.html#scipy.signal.lp2bs',
'scipy.signal.lp2bs_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2bs_zpk.html#scipy.signal.lp2bs_zpk',
'scipy.signal.lp2hp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2hp.html#scipy.signal.lp2hp',
'scipy.signal.lp2hp_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2hp_zpk.html#scipy.signal.lp2hp_zpk',
'scipy.signal.lp2lp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2lp.html#scipy.signal.lp2lp',
'scipy.signal.lp2lp_zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lp2lp_zpk.html#scipy.signal.lp2lp_zpk',
'scipy.signal.lsim': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lsim.html#scipy.signal.lsim',
'scipy.signal.lsim2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lsim2.html#scipy.signal.lsim2',
'scipy.signal.max_len_seq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.max_len_seq.html#scipy.signal.max_len_seq',
'scipy.signal.medfilt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.medfilt.html#scipy.signal.medfilt',
'scipy.signal.medfilt2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.medfilt2d.html#scipy.signal.medfilt2d',
'scipy.signal.minimum_phase': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.minimum_phase.html#scipy.signal.minimum_phase',
'scipy.signal.morlet': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.morlet.html#scipy.signal.morlet',
'scipy.signal.morlet2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.morlet2.html#scipy.signal.morlet2',
'scipy.signal.normalize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.normalize.html#scipy.signal.normalize',
'scipy.signal.oaconvolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.oaconvolve.html#scipy.signal.oaconvolve',
'scipy.signal.order_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.order_filter.html#scipy.signal.order_filter',
'scipy.signal.peak_prominences': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_prominences.html#scipy.signal.peak_prominences',
'scipy.signal.peak_widths': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.peak_widths.html#scipy.signal.peak_widths',
'scipy.signal.periodogram': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.periodogram.html#scipy.signal.periodogram',
'scipy.signal.place_poles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.place_poles.html#scipy.signal.place_poles',
'scipy.signal.qmf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.qmf.html#scipy.signal.qmf',
'scipy.signal.qspline1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.qspline1d.html#scipy.signal.qspline1d',
'scipy.signal.qspline1d_eval': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.qspline1d_eval.html#scipy.signal.qspline1d_eval',
'scipy.signal.qspline2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.qspline2d.html#scipy.signal.qspline2d',
'scipy.signal.quadratic': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.quadratic.html#scipy.signal.quadratic',
'scipy.signal.remez': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.remez.html#scipy.signal.remez',
'scipy.signal.resample': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.resample.html#scipy.signal.resample',
'scipy.signal.resample_poly': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.resample_poly.html#scipy.signal.resample_poly',
'scipy.signal.residue': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.residue.html#scipy.signal.residue',
'scipy.signal.residuez': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.residuez.html#scipy.signal.residuez',
'scipy.signal.ricker': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ricker.html#scipy.signal.ricker',
'scipy.signal.savgol_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_coeffs.html#scipy.signal.savgol_coeffs',
'scipy.signal.savgol_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html#scipy.signal.savgol_filter',
'scipy.signal.sawtooth': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sawtooth.html#scipy.signal.sawtooth',
'scipy.signal.sepfir2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sepfir2d.html#scipy.signal.sepfir2d',
'scipy.signal.sos2tf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sos2tf.html#scipy.signal.sos2tf',
'scipy.signal.sos2zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sos2zpk.html#scipy.signal.sos2zpk',
'scipy.signal.sosfilt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sosfilt.html#scipy.signal.sosfilt',
'scipy.signal.sosfilt_zi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sosfilt_zi.html#scipy.signal.sosfilt_zi',
'scipy.signal.sosfiltfilt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sosfiltfilt.html#scipy.signal.sosfiltfilt',
'scipy.signal.sosfreqz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sosfreqz.html#scipy.signal.sosfreqz',
'scipy.signal.spectrogram': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html#scipy.signal.spectrogram',
'scipy.signal.spline_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spline_filter.html#scipy.signal.spline_filter',
'scipy.signal.square': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.square.html#scipy.signal.square',
'scipy.signal.ss2tf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ss2tf.html#scipy.signal.ss2tf',
'scipy.signal.ss2zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.ss2zpk.html#scipy.signal.ss2zpk',
'scipy.signal.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.step.html#scipy.signal.step',
'scipy.signal.step2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.step2.html#scipy.signal.step2',
'scipy.signal.stft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.stft.html#scipy.signal.stft',
'scipy.signal.sweep_poly': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sweep_poly.html#scipy.signal.sweep_poly',
'scipy.signal.symiirorder1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.symiirorder1.html#scipy.signal.symiirorder1',
'scipy.signal.symiirorder2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.symiirorder2.html#scipy.signal.symiirorder2',
'scipy.signal.tf2sos': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.tf2sos.html#scipy.signal.tf2sos',
'scipy.signal.tf2ss': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.tf2ss.html#scipy.signal.tf2ss',
'scipy.signal.tf2zpk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.tf2zpk.html#scipy.signal.tf2zpk',
'scipy.signal.unique_roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.unique_roots.html#scipy.signal.unique_roots',
'scipy.signal.unit_impulse': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.unit_impulse.html#scipy.signal.unit_impulse',
'scipy.signal.upfirdn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.upfirdn.html#scipy.signal.upfirdn',
'scipy.signal.vectorstrength': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.vectorstrength.html#scipy.signal.vectorstrength',
'scipy.signal.welch': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.welch.html#scipy.signal.welch',
'scipy.signal.wiener': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.wiener.html#scipy.signal.wiener',
'scipy.signal.zpk2sos': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.zpk2sos.html#scipy.signal.zpk2sos',
'scipy.signal.zpk2ss': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.zpk2ss.html#scipy.signal.zpk2ss',
'scipy.signal.zpk2tf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.zpk2tf.html#scipy.signal.zpk2tf'},
'scipy.sparse': { 'scipy.sparse.csgraph': 'https://docs.scipy.org/doc/scipy/reference/sparse.csgraph.html#module-scipy.sparse.csgraph',
'scipy.sparse.linalg': 'https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html#module-scipy.sparse.linalg',
'scipy.sparse.bsr_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.html#scipy.sparse.bsr_matrix',
'scipy.sparse.coo_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html#scipy.sparse.coo_matrix',
'scipy.sparse.csc_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.html#scipy.sparse.csc_matrix',
'scipy.sparse.csr_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html#scipy.sparse.csr_matrix',
'scipy.sparse.dia_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.html#scipy.sparse.dia_matrix',
'scipy.sparse.dok_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.html#scipy.sparse.dok_matrix',
'scipy.sparse.lil_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.html#scipy.sparse.lil_matrix',
'scipy.sparse.spmatrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.html#scipy.sparse.spmatrix',
'scipy.sparse.SparseEfficiencyWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.SparseEfficiencyWarning.html#scipy.sparse.SparseEfficiencyWarning.with_traceback',
'scipy.sparse.SparseWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.SparseWarning.html#scipy.sparse.SparseWarning.with_traceback',
'scipy.sparse.bsr_matrix.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.__getitem__.html#scipy.sparse.bsr_matrix.__getitem__',
'scipy.sparse.bsr_matrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.__len__.html#scipy.sparse.bsr_matrix.__len__',
'scipy.sparse.bsr_matrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.__mul__.html#scipy.sparse.bsr_matrix.__mul__',
'scipy.sparse.bsr_matrix.arcsin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.arcsin.html#scipy.sparse.bsr_matrix.arcsin',
'scipy.sparse.bsr_matrix.arcsinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.arcsinh.html#scipy.sparse.bsr_matrix.arcsinh',
'scipy.sparse.bsr_matrix.arctan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.arctan.html#scipy.sparse.bsr_matrix.arctan',
'scipy.sparse.bsr_matrix.arctanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.arctanh.html#scipy.sparse.bsr_matrix.arctanh',
'scipy.sparse.bsr_matrix.argmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.argmax.html#scipy.sparse.bsr_matrix.argmax',
'scipy.sparse.bsr_matrix.argmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.argmin.html#scipy.sparse.bsr_matrix.argmin',
'scipy.sparse.bsr_matrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.asformat.html#scipy.sparse.bsr_matrix.asformat',
'scipy.sparse.bsr_matrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.asfptype.html#scipy.sparse.bsr_matrix.asfptype',
'scipy.sparse.bsr_matrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.astype.html#scipy.sparse.bsr_matrix.astype',
'scipy.sparse.bsr_matrix.blocksize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.blocksize.html#scipy.sparse.bsr_matrix.blocksize',
'scipy.sparse.bsr_matrix.ceil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.ceil.html#scipy.sparse.bsr_matrix.ceil',
'scipy.sparse.bsr_matrix.check_format': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.check_format.html#scipy.sparse.bsr_matrix.check_format',
'scipy.sparse.bsr_matrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.conj.html#scipy.sparse.bsr_matrix.conj',
'scipy.sparse.bsr_matrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.conjugate.html#scipy.sparse.bsr_matrix.conjugate',
'scipy.sparse.bsr_matrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.copy.html#scipy.sparse.bsr_matrix.copy',
'scipy.sparse.bsr_matrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.count_nonzero.html#scipy.sparse.bsr_matrix.count_nonzero',
'scipy.sparse.bsr_matrix.deg2rad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.deg2rad.html#scipy.sparse.bsr_matrix.deg2rad',
'scipy.sparse.bsr_matrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.diagonal.html#scipy.sparse.bsr_matrix.diagonal',
'scipy.sparse.bsr_matrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.dot.html#scipy.sparse.bsr_matrix.dot',
'scipy.sparse.bsr_matrix.dtype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.dtype.html#scipy.sparse.bsr_matrix.dtype',
'scipy.sparse.bsr_matrix.eliminate_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.eliminate_zeros.html#scipy.sparse.bsr_matrix.eliminate_zeros',
'scipy.sparse.bsr_matrix.expm1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.expm1.html#scipy.sparse.bsr_matrix.expm1',
'scipy.sparse.bsr_matrix.floor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.floor.html#scipy.sparse.bsr_matrix.floor',
'scipy.sparse.bsr_matrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.getH.html#scipy.sparse.bsr_matrix.getH',
'scipy.sparse.bsr_matrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.get_shape.html#scipy.sparse.bsr_matrix.get_shape',
'scipy.sparse.bsr_matrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.getcol.html#scipy.sparse.bsr_matrix.getcol',
'scipy.sparse.bsr_matrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.getformat.html#scipy.sparse.bsr_matrix.getformat',
'scipy.sparse.bsr_matrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.getmaxprint.html#scipy.sparse.bsr_matrix.getmaxprint',
'scipy.sparse.bsr_matrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.getnnz.html#scipy.sparse.bsr_matrix.getnnz',
'scipy.sparse.bsr_matrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.getrow.html#scipy.sparse.bsr_matrix.getrow',
'scipy.sparse.bsr_matrix.has_canonical_format': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.has_canonical_format.html#scipy.sparse.bsr_matrix.has_canonical_format',
'scipy.sparse.bsr_matrix.has_sorted_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.has_sorted_indices.html#scipy.sparse.bsr_matrix.has_sorted_indices',
'scipy.sparse.bsr_matrix.log1p': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.log1p.html#scipy.sparse.bsr_matrix.log1p',
'scipy.sparse.bsr_matrix.matmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.matmat.html#scipy.sparse.bsr_matrix.matmat',
'scipy.sparse.bsr_matrix.matvec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.matvec.html#scipy.sparse.bsr_matrix.matvec',
'scipy.sparse.bsr_matrix.max': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.max.html#scipy.sparse.bsr_matrix.max',
'scipy.sparse.bsr_matrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.maximum.html#scipy.sparse.bsr_matrix.maximum',
'scipy.sparse.bsr_matrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.mean.html#scipy.sparse.bsr_matrix.mean',
'scipy.sparse.bsr_matrix.min': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.min.html#scipy.sparse.bsr_matrix.min',
'scipy.sparse.bsr_matrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.minimum.html#scipy.sparse.bsr_matrix.minimum',
'scipy.sparse.bsr_matrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.multiply.html#scipy.sparse.bsr_matrix.multiply',
'scipy.sparse.bsr_matrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.nnz.html#scipy.sparse.bsr_matrix.nnz',
'scipy.sparse.bsr_matrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.nonzero.html#scipy.sparse.bsr_matrix.nonzero',
'scipy.sparse.bsr_matrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.power.html#scipy.sparse.bsr_matrix.power',
'scipy.sparse.bsr_matrix.prune': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.prune.html#scipy.sparse.bsr_matrix.prune',
'scipy.sparse.bsr_matrix.rad2deg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.rad2deg.html#scipy.sparse.bsr_matrix.rad2deg',
'scipy.sparse.bsr_matrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.reshape.html#scipy.sparse.bsr_matrix.reshape',
'scipy.sparse.bsr_matrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.resize.html#scipy.sparse.bsr_matrix.resize',
'scipy.sparse.bsr_matrix.rint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.rint.html#scipy.sparse.bsr_matrix.rint',
'scipy.sparse.bsr_matrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.set_shape.html#scipy.sparse.bsr_matrix.set_shape',
'scipy.sparse.bsr_matrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.setdiag.html#scipy.sparse.bsr_matrix.setdiag',
'scipy.sparse.bsr_matrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.shape.html#scipy.sparse.bsr_matrix.shape',
'scipy.sparse.bsr_matrix.sign': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sign.html#scipy.sparse.bsr_matrix.sign',
'scipy.sparse.bsr_matrix.sin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sin.html#scipy.sparse.bsr_matrix.sin',
'scipy.sparse.bsr_matrix.sinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sinh.html#scipy.sparse.bsr_matrix.sinh',
'scipy.sparse.bsr_matrix.sort_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sort_indices.html#scipy.sparse.bsr_matrix.sort_indices',
'scipy.sparse.bsr_matrix.sorted_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sorted_indices.html#scipy.sparse.bsr_matrix.sorted_indices',
'scipy.sparse.bsr_matrix.sqrt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sqrt.html#scipy.sparse.bsr_matrix.sqrt',
'scipy.sparse.bsr_matrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sum.html#scipy.sparse.bsr_matrix.sum',
'scipy.sparse.bsr_matrix.sum_duplicates': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.sum_duplicates.html#scipy.sparse.bsr_matrix.sum_duplicates',
'scipy.sparse.bsr_matrix.tan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.tan.html#scipy.sparse.bsr_matrix.tan',
'scipy.sparse.bsr_matrix.tanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.tanh.html#scipy.sparse.bsr_matrix.tanh',
'scipy.sparse.bsr_matrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.toarray.html#scipy.sparse.bsr_matrix.toarray',
'scipy.sparse.bsr_matrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.tobsr.html#scipy.sparse.bsr_matrix.tobsr',
'scipy.sparse.bsr_matrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.tocoo.html#scipy.sparse.bsr_matrix.tocoo',
'scipy.sparse.bsr_matrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.tocsc.html#scipy.sparse.bsr_matrix.tocsc',
'scipy.sparse.bsr_matrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.tocsr.html#scipy.sparse.bsr_matrix.tocsr',
'scipy.sparse.bsr_matrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.todense.html#scipy.sparse.bsr_matrix.todense',
'scipy.sparse.bsr_matrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.todia.html#scipy.sparse.bsr_matrix.todia',
'scipy.sparse.bsr_matrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.todok.html#scipy.sparse.bsr_matrix.todok',
'scipy.sparse.bsr_matrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.tolil.html#scipy.sparse.bsr_matrix.tolil',
'scipy.sparse.bsr_matrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.transpose.html#scipy.sparse.bsr_matrix.transpose',
'scipy.sparse.bsr_matrix.trunc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bsr_matrix.trunc.html#scipy.sparse.bsr_matrix.trunc',
'scipy.sparse.coo_matrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.__len__.html#scipy.sparse.coo_matrix.__len__',
'scipy.sparse.coo_matrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.__mul__.html#scipy.sparse.coo_matrix.__mul__',
'scipy.sparse.coo_matrix.arcsin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.arcsin.html#scipy.sparse.coo_matrix.arcsin',
'scipy.sparse.coo_matrix.arcsinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.arcsinh.html#scipy.sparse.coo_matrix.arcsinh',
'scipy.sparse.coo_matrix.arctan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.arctan.html#scipy.sparse.coo_matrix.arctan',
'scipy.sparse.coo_matrix.arctanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.arctanh.html#scipy.sparse.coo_matrix.arctanh',
'scipy.sparse.coo_matrix.argmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.argmax.html#scipy.sparse.coo_matrix.argmax',
'scipy.sparse.coo_matrix.argmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.argmin.html#scipy.sparse.coo_matrix.argmin',
'scipy.sparse.coo_matrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.asformat.html#scipy.sparse.coo_matrix.asformat',
'scipy.sparse.coo_matrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.asfptype.html#scipy.sparse.coo_matrix.asfptype',
'scipy.sparse.coo_matrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.astype.html#scipy.sparse.coo_matrix.astype',
'scipy.sparse.coo_matrix.ceil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.ceil.html#scipy.sparse.coo_matrix.ceil',
'scipy.sparse.coo_matrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.conj.html#scipy.sparse.coo_matrix.conj',
'scipy.sparse.coo_matrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.conjugate.html#scipy.sparse.coo_matrix.conjugate',
'scipy.sparse.coo_matrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.copy.html#scipy.sparse.coo_matrix.copy',
'scipy.sparse.coo_matrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.count_nonzero.html#scipy.sparse.coo_matrix.count_nonzero',
'scipy.sparse.coo_matrix.deg2rad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.deg2rad.html#scipy.sparse.coo_matrix.deg2rad',
'scipy.sparse.coo_matrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.diagonal.html#scipy.sparse.coo_matrix.diagonal',
'scipy.sparse.coo_matrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.dot.html#scipy.sparse.coo_matrix.dot',
'scipy.sparse.coo_matrix.dtype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.dtype.html#scipy.sparse.coo_matrix.dtype',
'scipy.sparse.coo_matrix.eliminate_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.eliminate_zeros.html#scipy.sparse.coo_matrix.eliminate_zeros',
'scipy.sparse.coo_matrix.expm1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.expm1.html#scipy.sparse.coo_matrix.expm1',
'scipy.sparse.coo_matrix.floor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.floor.html#scipy.sparse.coo_matrix.floor',
'scipy.sparse.coo_matrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.getH.html#scipy.sparse.coo_matrix.getH',
'scipy.sparse.coo_matrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.get_shape.html#scipy.sparse.coo_matrix.get_shape',
'scipy.sparse.coo_matrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.getcol.html#scipy.sparse.coo_matrix.getcol',
'scipy.sparse.coo_matrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.getformat.html#scipy.sparse.coo_matrix.getformat',
'scipy.sparse.coo_matrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.getmaxprint.html#scipy.sparse.coo_matrix.getmaxprint',
'scipy.sparse.coo_matrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.getnnz.html#scipy.sparse.coo_matrix.getnnz',
'scipy.sparse.coo_matrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.getrow.html#scipy.sparse.coo_matrix.getrow',
'scipy.sparse.coo_matrix.log1p': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.log1p.html#scipy.sparse.coo_matrix.log1p',
'scipy.sparse.coo_matrix.max': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.max.html#scipy.sparse.coo_matrix.max',
'scipy.sparse.coo_matrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.maximum.html#scipy.sparse.coo_matrix.maximum',
'scipy.sparse.coo_matrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.mean.html#scipy.sparse.coo_matrix.mean',
'scipy.sparse.coo_matrix.min': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.min.html#scipy.sparse.coo_matrix.min',
'scipy.sparse.coo_matrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.minimum.html#scipy.sparse.coo_matrix.minimum',
'scipy.sparse.coo_matrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.multiply.html#scipy.sparse.coo_matrix.multiply',
'scipy.sparse.coo_matrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.nnz.html#scipy.sparse.coo_matrix.nnz',
'scipy.sparse.coo_matrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.nonzero.html#scipy.sparse.coo_matrix.nonzero',
'scipy.sparse.coo_matrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.power.html#scipy.sparse.coo_matrix.power',
'scipy.sparse.coo_matrix.rad2deg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.rad2deg.html#scipy.sparse.coo_matrix.rad2deg',
'scipy.sparse.coo_matrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.reshape.html#scipy.sparse.coo_matrix.reshape',
'scipy.sparse.coo_matrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.resize.html#scipy.sparse.coo_matrix.resize',
'scipy.sparse.coo_matrix.rint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.rint.html#scipy.sparse.coo_matrix.rint',
'scipy.sparse.coo_matrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.set_shape.html#scipy.sparse.coo_matrix.set_shape',
'scipy.sparse.coo_matrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.setdiag.html#scipy.sparse.coo_matrix.setdiag',
'scipy.sparse.coo_matrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.shape.html#scipy.sparse.coo_matrix.shape',
'scipy.sparse.coo_matrix.sign': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.sign.html#scipy.sparse.coo_matrix.sign',
'scipy.sparse.coo_matrix.sin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.sin.html#scipy.sparse.coo_matrix.sin',
'scipy.sparse.coo_matrix.sinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.sinh.html#scipy.sparse.coo_matrix.sinh',
'scipy.sparse.coo_matrix.sqrt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.sqrt.html#scipy.sparse.coo_matrix.sqrt',
'scipy.sparse.coo_matrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.sum.html#scipy.sparse.coo_matrix.sum',
'scipy.sparse.coo_matrix.sum_duplicates': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.sum_duplicates.html#scipy.sparse.coo_matrix.sum_duplicates',
'scipy.sparse.coo_matrix.tan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tan.html#scipy.sparse.coo_matrix.tan',
'scipy.sparse.coo_matrix.tanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tanh.html#scipy.sparse.coo_matrix.tanh',
'scipy.sparse.coo_matrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.toarray.html#scipy.sparse.coo_matrix.toarray',
'scipy.sparse.coo_matrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tobsr.html#scipy.sparse.coo_matrix.tobsr',
'scipy.sparse.coo_matrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tocoo.html#scipy.sparse.coo_matrix.tocoo',
'scipy.sparse.coo_matrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tocsc.html#scipy.sparse.coo_matrix.tocsc',
'scipy.sparse.coo_matrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tocsr.html#scipy.sparse.coo_matrix.tocsr',
'scipy.sparse.coo_matrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.todense.html#scipy.sparse.coo_matrix.todense',
'scipy.sparse.coo_matrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.todia.html#scipy.sparse.coo_matrix.todia',
'scipy.sparse.coo_matrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.todok.html#scipy.sparse.coo_matrix.todok',
'scipy.sparse.coo_matrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tolil.html#scipy.sparse.coo_matrix.tolil',
'scipy.sparse.coo_matrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.transpose.html#scipy.sparse.coo_matrix.transpose',
'scipy.sparse.coo_matrix.trunc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.trunc.html#scipy.sparse.coo_matrix.trunc',
'scipy.sparse.csc_matrix.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.__getitem__.html#scipy.sparse.csc_matrix.__getitem__',
'scipy.sparse.csc_matrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.__len__.html#scipy.sparse.csc_matrix.__len__',
'scipy.sparse.csc_matrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.__mul__.html#scipy.sparse.csc_matrix.__mul__',
'scipy.sparse.csc_matrix.arcsin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.arcsin.html#scipy.sparse.csc_matrix.arcsin',
'scipy.sparse.csc_matrix.arcsinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.arcsinh.html#scipy.sparse.csc_matrix.arcsinh',
'scipy.sparse.csc_matrix.arctan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.arctan.html#scipy.sparse.csc_matrix.arctan',
'scipy.sparse.csc_matrix.arctanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.arctanh.html#scipy.sparse.csc_matrix.arctanh',
'scipy.sparse.csc_matrix.argmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.argmax.html#scipy.sparse.csc_matrix.argmax',
'scipy.sparse.csc_matrix.argmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.argmin.html#scipy.sparse.csc_matrix.argmin',
'scipy.sparse.csc_matrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.asformat.html#scipy.sparse.csc_matrix.asformat',
'scipy.sparse.csc_matrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.asfptype.html#scipy.sparse.csc_matrix.asfptype',
'scipy.sparse.csc_matrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.astype.html#scipy.sparse.csc_matrix.astype',
'scipy.sparse.csc_matrix.ceil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.ceil.html#scipy.sparse.csc_matrix.ceil',
'scipy.sparse.csc_matrix.check_format': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.check_format.html#scipy.sparse.csc_matrix.check_format',
'scipy.sparse.csc_matrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.conj.html#scipy.sparse.csc_matrix.conj',
'scipy.sparse.csc_matrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.conjugate.html#scipy.sparse.csc_matrix.conjugate',
'scipy.sparse.csc_matrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.copy.html#scipy.sparse.csc_matrix.copy',
'scipy.sparse.csc_matrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.count_nonzero.html#scipy.sparse.csc_matrix.count_nonzero',
'scipy.sparse.csc_matrix.deg2rad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.deg2rad.html#scipy.sparse.csc_matrix.deg2rad',
'scipy.sparse.csc_matrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.diagonal.html#scipy.sparse.csc_matrix.diagonal',
'scipy.sparse.csc_matrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.dot.html#scipy.sparse.csc_matrix.dot',
'scipy.sparse.csc_matrix.dtype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.dtype.html#scipy.sparse.csc_matrix.dtype',
'scipy.sparse.csc_matrix.eliminate_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.eliminate_zeros.html#scipy.sparse.csc_matrix.eliminate_zeros',
'scipy.sparse.csc_matrix.expm1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.expm1.html#scipy.sparse.csc_matrix.expm1',
'scipy.sparse.csc_matrix.floor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.floor.html#scipy.sparse.csc_matrix.floor',
'scipy.sparse.csc_matrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.getH.html#scipy.sparse.csc_matrix.getH',
'scipy.sparse.csc_matrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.get_shape.html#scipy.sparse.csc_matrix.get_shape',
'scipy.sparse.csc_matrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.getcol.html#scipy.sparse.csc_matrix.getcol',
'scipy.sparse.csc_matrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.getformat.html#scipy.sparse.csc_matrix.getformat',
'scipy.sparse.csc_matrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.getmaxprint.html#scipy.sparse.csc_matrix.getmaxprint',
'scipy.sparse.csc_matrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.getnnz.html#scipy.sparse.csc_matrix.getnnz',
'scipy.sparse.csc_matrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.getrow.html#scipy.sparse.csc_matrix.getrow',
'scipy.sparse.csc_matrix.has_canonical_format': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.has_canonical_format.html#scipy.sparse.csc_matrix.has_canonical_format',
'scipy.sparse.csc_matrix.has_sorted_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.has_sorted_indices.html#scipy.sparse.csc_matrix.has_sorted_indices',
'scipy.sparse.csc_matrix.log1p': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.log1p.html#scipy.sparse.csc_matrix.log1p',
'scipy.sparse.csc_matrix.max': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.max.html#scipy.sparse.csc_matrix.max',
'scipy.sparse.csc_matrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.maximum.html#scipy.sparse.csc_matrix.maximum',
'scipy.sparse.csc_matrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.mean.html#scipy.sparse.csc_matrix.mean',
'scipy.sparse.csc_matrix.min': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.min.html#scipy.sparse.csc_matrix.min',
'scipy.sparse.csc_matrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.minimum.html#scipy.sparse.csc_matrix.minimum',
'scipy.sparse.csc_matrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.multiply.html#scipy.sparse.csc_matrix.multiply',
'scipy.sparse.csc_matrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.nnz.html#scipy.sparse.csc_matrix.nnz',
'scipy.sparse.csc_matrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.nonzero.html#scipy.sparse.csc_matrix.nonzero',
'scipy.sparse.csc_matrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.power.html#scipy.sparse.csc_matrix.power',
'scipy.sparse.csc_matrix.prune': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.prune.html#scipy.sparse.csc_matrix.prune',
'scipy.sparse.csc_matrix.rad2deg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.rad2deg.html#scipy.sparse.csc_matrix.rad2deg',
'scipy.sparse.csc_matrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.reshape.html#scipy.sparse.csc_matrix.reshape',
'scipy.sparse.csc_matrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.resize.html#scipy.sparse.csc_matrix.resize',
'scipy.sparse.csc_matrix.rint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.rint.html#scipy.sparse.csc_matrix.rint',
'scipy.sparse.csc_matrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.set_shape.html#scipy.sparse.csc_matrix.set_shape',
'scipy.sparse.csc_matrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.setdiag.html#scipy.sparse.csc_matrix.setdiag',
'scipy.sparse.csc_matrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.shape.html#scipy.sparse.csc_matrix.shape',
'scipy.sparse.csc_matrix.sign': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sign.html#scipy.sparse.csc_matrix.sign',
'scipy.sparse.csc_matrix.sin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sin.html#scipy.sparse.csc_matrix.sin',
'scipy.sparse.csc_matrix.sinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sinh.html#scipy.sparse.csc_matrix.sinh',
'scipy.sparse.csc_matrix.sort_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sort_indices.html#scipy.sparse.csc_matrix.sort_indices',
'scipy.sparse.csc_matrix.sorted_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sorted_indices.html#scipy.sparse.csc_matrix.sorted_indices',
'scipy.sparse.csc_matrix.sqrt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sqrt.html#scipy.sparse.csc_matrix.sqrt',
'scipy.sparse.csc_matrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sum.html#scipy.sparse.csc_matrix.sum',
'scipy.sparse.csc_matrix.sum_duplicates': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.sum_duplicates.html#scipy.sparse.csc_matrix.sum_duplicates',
'scipy.sparse.csc_matrix.tan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.tan.html#scipy.sparse.csc_matrix.tan',
'scipy.sparse.csc_matrix.tanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.tanh.html#scipy.sparse.csc_matrix.tanh',
'scipy.sparse.csc_matrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.toarray.html#scipy.sparse.csc_matrix.toarray',
'scipy.sparse.csc_matrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.tobsr.html#scipy.sparse.csc_matrix.tobsr',
'scipy.sparse.csc_matrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.tocoo.html#scipy.sparse.csc_matrix.tocoo',
'scipy.sparse.csc_matrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.tocsc.html#scipy.sparse.csc_matrix.tocsc',
'scipy.sparse.csc_matrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.tocsr.html#scipy.sparse.csc_matrix.tocsr',
'scipy.sparse.csc_matrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.todense.html#scipy.sparse.csc_matrix.todense',
'scipy.sparse.csc_matrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.todia.html#scipy.sparse.csc_matrix.todia',
'scipy.sparse.csc_matrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.todok.html#scipy.sparse.csc_matrix.todok',
'scipy.sparse.csc_matrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.tolil.html#scipy.sparse.csc_matrix.tolil',
'scipy.sparse.csc_matrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.transpose.html#scipy.sparse.csc_matrix.transpose',
'scipy.sparse.csc_matrix.trunc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.trunc.html#scipy.sparse.csc_matrix.trunc',
'scipy.sparse.csr_matrix.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.__getitem__.html#scipy.sparse.csr_matrix.__getitem__',
'scipy.sparse.csr_matrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.__len__.html#scipy.sparse.csr_matrix.__len__',
'scipy.sparse.csr_matrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.__mul__.html#scipy.sparse.csr_matrix.__mul__',
'scipy.sparse.csr_matrix.arcsin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.arcsin.html#scipy.sparse.csr_matrix.arcsin',
'scipy.sparse.csr_matrix.arcsinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.arcsinh.html#scipy.sparse.csr_matrix.arcsinh',
'scipy.sparse.csr_matrix.arctan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.arctan.html#scipy.sparse.csr_matrix.arctan',
'scipy.sparse.csr_matrix.arctanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.arctanh.html#scipy.sparse.csr_matrix.arctanh',
'scipy.sparse.csr_matrix.argmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.argmax.html#scipy.sparse.csr_matrix.argmax',
'scipy.sparse.csr_matrix.argmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.argmin.html#scipy.sparse.csr_matrix.argmin',
'scipy.sparse.csr_matrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.asformat.html#scipy.sparse.csr_matrix.asformat',
'scipy.sparse.csr_matrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.asfptype.html#scipy.sparse.csr_matrix.asfptype',
'scipy.sparse.csr_matrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.astype.html#scipy.sparse.csr_matrix.astype',
'scipy.sparse.csr_matrix.ceil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.ceil.html#scipy.sparse.csr_matrix.ceil',
'scipy.sparse.csr_matrix.check_format': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.check_format.html#scipy.sparse.csr_matrix.check_format',
'scipy.sparse.csr_matrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.conj.html#scipy.sparse.csr_matrix.conj',
'scipy.sparse.csr_matrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.conjugate.html#scipy.sparse.csr_matrix.conjugate',
'scipy.sparse.csr_matrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.copy.html#scipy.sparse.csr_matrix.copy',
'scipy.sparse.csr_matrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.count_nonzero.html#scipy.sparse.csr_matrix.count_nonzero',
'scipy.sparse.csr_matrix.deg2rad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.deg2rad.html#scipy.sparse.csr_matrix.deg2rad',
'scipy.sparse.csr_matrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.diagonal.html#scipy.sparse.csr_matrix.diagonal',
'scipy.sparse.csr_matrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.dot.html#scipy.sparse.csr_matrix.dot',
'scipy.sparse.csr_matrix.dtype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.dtype.html#scipy.sparse.csr_matrix.dtype',
'scipy.sparse.csr_matrix.eliminate_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.eliminate_zeros.html#scipy.sparse.csr_matrix.eliminate_zeros',
'scipy.sparse.csr_matrix.expm1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.expm1.html#scipy.sparse.csr_matrix.expm1',
'scipy.sparse.csr_matrix.floor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.floor.html#scipy.sparse.csr_matrix.floor',
'scipy.sparse.csr_matrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.getH.html#scipy.sparse.csr_matrix.getH',
'scipy.sparse.csr_matrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.get_shape.html#scipy.sparse.csr_matrix.get_shape',
'scipy.sparse.csr_matrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.getcol.html#scipy.sparse.csr_matrix.getcol',
'scipy.sparse.csr_matrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.getformat.html#scipy.sparse.csr_matrix.getformat',
'scipy.sparse.csr_matrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.getmaxprint.html#scipy.sparse.csr_matrix.getmaxprint',
'scipy.sparse.csr_matrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.getnnz.html#scipy.sparse.csr_matrix.getnnz',
'scipy.sparse.csr_matrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.getrow.html#scipy.sparse.csr_matrix.getrow',
'scipy.sparse.csr_matrix.has_canonical_format': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.has_canonical_format.html#scipy.sparse.csr_matrix.has_canonical_format',
'scipy.sparse.csr_matrix.has_sorted_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.has_sorted_indices.html#scipy.sparse.csr_matrix.has_sorted_indices',
'scipy.sparse.csr_matrix.log1p': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.log1p.html#scipy.sparse.csr_matrix.log1p',
'scipy.sparse.csr_matrix.max': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.max.html#scipy.sparse.csr_matrix.max',
'scipy.sparse.csr_matrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.maximum.html#scipy.sparse.csr_matrix.maximum',
'scipy.sparse.csr_matrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.mean.html#scipy.sparse.csr_matrix.mean',
'scipy.sparse.csr_matrix.min': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.min.html#scipy.sparse.csr_matrix.min',
'scipy.sparse.csr_matrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.minimum.html#scipy.sparse.csr_matrix.minimum',
'scipy.sparse.csr_matrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.multiply.html#scipy.sparse.csr_matrix.multiply',
'scipy.sparse.csr_matrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.nnz.html#scipy.sparse.csr_matrix.nnz',
'scipy.sparse.csr_matrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.nonzero.html#scipy.sparse.csr_matrix.nonzero',
'scipy.sparse.csr_matrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.power.html#scipy.sparse.csr_matrix.power',
'scipy.sparse.csr_matrix.prune': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.prune.html#scipy.sparse.csr_matrix.prune',
'scipy.sparse.csr_matrix.rad2deg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.rad2deg.html#scipy.sparse.csr_matrix.rad2deg',
'scipy.sparse.csr_matrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.reshape.html#scipy.sparse.csr_matrix.reshape',
'scipy.sparse.csr_matrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.resize.html#scipy.sparse.csr_matrix.resize',
'scipy.sparse.csr_matrix.rint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.rint.html#scipy.sparse.csr_matrix.rint',
'scipy.sparse.csr_matrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.set_shape.html#scipy.sparse.csr_matrix.set_shape',
'scipy.sparse.csr_matrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.setdiag.html#scipy.sparse.csr_matrix.setdiag',
'scipy.sparse.csr_matrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.shape.html#scipy.sparse.csr_matrix.shape',
'scipy.sparse.csr_matrix.sign': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sign.html#scipy.sparse.csr_matrix.sign',
'scipy.sparse.csr_matrix.sin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sin.html#scipy.sparse.csr_matrix.sin',
'scipy.sparse.csr_matrix.sinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sinh.html#scipy.sparse.csr_matrix.sinh',
'scipy.sparse.csr_matrix.sort_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sort_indices.html#scipy.sparse.csr_matrix.sort_indices',
'scipy.sparse.csr_matrix.sorted_indices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sorted_indices.html#scipy.sparse.csr_matrix.sorted_indices',
'scipy.sparse.csr_matrix.sqrt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sqrt.html#scipy.sparse.csr_matrix.sqrt',
'scipy.sparse.csr_matrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sum.html#scipy.sparse.csr_matrix.sum',
'scipy.sparse.csr_matrix.sum_duplicates': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.sum_duplicates.html#scipy.sparse.csr_matrix.sum_duplicates',
'scipy.sparse.csr_matrix.tan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.tan.html#scipy.sparse.csr_matrix.tan',
'scipy.sparse.csr_matrix.tanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.tanh.html#scipy.sparse.csr_matrix.tanh',
'scipy.sparse.csr_matrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.toarray.html#scipy.sparse.csr_matrix.toarray',
'scipy.sparse.csr_matrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.tobsr.html#scipy.sparse.csr_matrix.tobsr',
'scipy.sparse.csr_matrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.tocoo.html#scipy.sparse.csr_matrix.tocoo',
'scipy.sparse.csr_matrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.tocsc.html#scipy.sparse.csr_matrix.tocsc',
'scipy.sparse.csr_matrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.tocsr.html#scipy.sparse.csr_matrix.tocsr',
'scipy.sparse.csr_matrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.todense.html#scipy.sparse.csr_matrix.todense',
'scipy.sparse.csr_matrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.todia.html#scipy.sparse.csr_matrix.todia',
'scipy.sparse.csr_matrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.todok.html#scipy.sparse.csr_matrix.todok',
'scipy.sparse.csr_matrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.tolil.html#scipy.sparse.csr_matrix.tolil',
'scipy.sparse.csr_matrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.transpose.html#scipy.sparse.csr_matrix.transpose',
'scipy.sparse.csr_matrix.trunc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.trunc.html#scipy.sparse.csr_matrix.trunc',
'scipy.sparse.dia_matrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.__len__.html#scipy.sparse.dia_matrix.__len__',
'scipy.sparse.dia_matrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.__mul__.html#scipy.sparse.dia_matrix.__mul__',
'scipy.sparse.dia_matrix.arcsin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.arcsin.html#scipy.sparse.dia_matrix.arcsin',
'scipy.sparse.dia_matrix.arcsinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.arcsinh.html#scipy.sparse.dia_matrix.arcsinh',
'scipy.sparse.dia_matrix.arctan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.arctan.html#scipy.sparse.dia_matrix.arctan',
'scipy.sparse.dia_matrix.arctanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.arctanh.html#scipy.sparse.dia_matrix.arctanh',
'scipy.sparse.dia_matrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.asformat.html#scipy.sparse.dia_matrix.asformat',
'scipy.sparse.dia_matrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.asfptype.html#scipy.sparse.dia_matrix.asfptype',
'scipy.sparse.dia_matrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.astype.html#scipy.sparse.dia_matrix.astype',
'scipy.sparse.dia_matrix.ceil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.ceil.html#scipy.sparse.dia_matrix.ceil',
'scipy.sparse.dia_matrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.conj.html#scipy.sparse.dia_matrix.conj',
'scipy.sparse.dia_matrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.conjugate.html#scipy.sparse.dia_matrix.conjugate',
'scipy.sparse.dia_matrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.copy.html#scipy.sparse.dia_matrix.copy',
'scipy.sparse.dia_matrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.count_nonzero.html#scipy.sparse.dia_matrix.count_nonzero',
'scipy.sparse.dia_matrix.deg2rad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.deg2rad.html#scipy.sparse.dia_matrix.deg2rad',
'scipy.sparse.dia_matrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.diagonal.html#scipy.sparse.dia_matrix.diagonal',
'scipy.sparse.dia_matrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.dot.html#scipy.sparse.dia_matrix.dot',
'scipy.sparse.dia_matrix.dtype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.dtype.html#scipy.sparse.dia_matrix.dtype',
'scipy.sparse.dia_matrix.expm1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.expm1.html#scipy.sparse.dia_matrix.expm1',
'scipy.sparse.dia_matrix.floor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.floor.html#scipy.sparse.dia_matrix.floor',
'scipy.sparse.dia_matrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.getH.html#scipy.sparse.dia_matrix.getH',
'scipy.sparse.dia_matrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.get_shape.html#scipy.sparse.dia_matrix.get_shape',
'scipy.sparse.dia_matrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.getcol.html#scipy.sparse.dia_matrix.getcol',
'scipy.sparse.dia_matrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.getformat.html#scipy.sparse.dia_matrix.getformat',
'scipy.sparse.dia_matrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.getmaxprint.html#scipy.sparse.dia_matrix.getmaxprint',
'scipy.sparse.dia_matrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.getnnz.html#scipy.sparse.dia_matrix.getnnz',
'scipy.sparse.dia_matrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.getrow.html#scipy.sparse.dia_matrix.getrow',
'scipy.sparse.dia_matrix.log1p': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.log1p.html#scipy.sparse.dia_matrix.log1p',
'scipy.sparse.dia_matrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.maximum.html#scipy.sparse.dia_matrix.maximum',
'scipy.sparse.dia_matrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.mean.html#scipy.sparse.dia_matrix.mean',
'scipy.sparse.dia_matrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.minimum.html#scipy.sparse.dia_matrix.minimum',
'scipy.sparse.dia_matrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.multiply.html#scipy.sparse.dia_matrix.multiply',
'scipy.sparse.dia_matrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.nnz.html#scipy.sparse.dia_matrix.nnz',
'scipy.sparse.dia_matrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.nonzero.html#scipy.sparse.dia_matrix.nonzero',
'scipy.sparse.dia_matrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.power.html#scipy.sparse.dia_matrix.power',
'scipy.sparse.dia_matrix.rad2deg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.rad2deg.html#scipy.sparse.dia_matrix.rad2deg',
'scipy.sparse.dia_matrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.reshape.html#scipy.sparse.dia_matrix.reshape',
'scipy.sparse.dia_matrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.resize.html#scipy.sparse.dia_matrix.resize',
'scipy.sparse.dia_matrix.rint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.rint.html#scipy.sparse.dia_matrix.rint',
'scipy.sparse.dia_matrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.set_shape.html#scipy.sparse.dia_matrix.set_shape',
'scipy.sparse.dia_matrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.setdiag.html#scipy.sparse.dia_matrix.setdiag',
'scipy.sparse.dia_matrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.shape.html#scipy.sparse.dia_matrix.shape',
'scipy.sparse.dia_matrix.sign': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.sign.html#scipy.sparse.dia_matrix.sign',
'scipy.sparse.dia_matrix.sin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.sin.html#scipy.sparse.dia_matrix.sin',
'scipy.sparse.dia_matrix.sinh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.sinh.html#scipy.sparse.dia_matrix.sinh',
'scipy.sparse.dia_matrix.sqrt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.sqrt.html#scipy.sparse.dia_matrix.sqrt',
'scipy.sparse.dia_matrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.sum.html#scipy.sparse.dia_matrix.sum',
'scipy.sparse.dia_matrix.tan': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.tan.html#scipy.sparse.dia_matrix.tan',
'scipy.sparse.dia_matrix.tanh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.tanh.html#scipy.sparse.dia_matrix.tanh',
'scipy.sparse.dia_matrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.toarray.html#scipy.sparse.dia_matrix.toarray',
'scipy.sparse.dia_matrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.tobsr.html#scipy.sparse.dia_matrix.tobsr',
'scipy.sparse.dia_matrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.tocoo.html#scipy.sparse.dia_matrix.tocoo',
'scipy.sparse.dia_matrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.tocsc.html#scipy.sparse.dia_matrix.tocsc',
'scipy.sparse.dia_matrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.tocsr.html#scipy.sparse.dia_matrix.tocsr',
'scipy.sparse.dia_matrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.todense.html#scipy.sparse.dia_matrix.todense',
'scipy.sparse.dia_matrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.todia.html#scipy.sparse.dia_matrix.todia',
'scipy.sparse.dia_matrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.todok.html#scipy.sparse.dia_matrix.todok',
'scipy.sparse.dia_matrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.tolil.html#scipy.sparse.dia_matrix.tolil',
'scipy.sparse.dia_matrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.transpose.html#scipy.sparse.dia_matrix.transpose',
'scipy.sparse.dia_matrix.trunc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dia_matrix.trunc.html#scipy.sparse.dia_matrix.trunc',
'scipy.sparse.dok_matrix.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.__getitem__.html#scipy.sparse.dok_matrix.__getitem__',
'scipy.sparse.dok_matrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.__len__.html#scipy.sparse.dok_matrix.__len__',
'scipy.sparse.dok_matrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.__mul__.html#scipy.sparse.dok_matrix.__mul__',
'scipy.sparse.dok_matrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.asformat.html#scipy.sparse.dok_matrix.asformat',
'scipy.sparse.dok_matrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.asfptype.html#scipy.sparse.dok_matrix.asfptype',
'scipy.sparse.dok_matrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.astype.html#scipy.sparse.dok_matrix.astype',
'scipy.sparse.dok_matrix.clear': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.clear.html#scipy.sparse.dok_matrix.clear',
'scipy.sparse.dok_matrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.conj.html#scipy.sparse.dok_matrix.conj',
'scipy.sparse.dok_matrix.conjtransp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.conjtransp.html#scipy.sparse.dok_matrix.conjtransp',
'scipy.sparse.dok_matrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.conjugate.html#scipy.sparse.dok_matrix.conjugate',
'scipy.sparse.dok_matrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.copy.html#scipy.sparse.dok_matrix.copy',
'scipy.sparse.dok_matrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.count_nonzero.html#scipy.sparse.dok_matrix.count_nonzero',
'scipy.sparse.dok_matrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.diagonal.html#scipy.sparse.dok_matrix.diagonal',
'scipy.sparse.dok_matrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.dot.html#scipy.sparse.dok_matrix.dot',
'scipy.sparse.dok_matrix.fromkeys': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.fromkeys.html#scipy.sparse.dok_matrix.fromkeys',
'scipy.sparse.dok_matrix.get': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.get.html#scipy.sparse.dok_matrix.get',
'scipy.sparse.dok_matrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.getH.html#scipy.sparse.dok_matrix.getH',
'scipy.sparse.dok_matrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.get_shape.html#scipy.sparse.dok_matrix.get_shape',
'scipy.sparse.dok_matrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.getcol.html#scipy.sparse.dok_matrix.getcol',
'scipy.sparse.dok_matrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.getformat.html#scipy.sparse.dok_matrix.getformat',
'scipy.sparse.dok_matrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.getmaxprint.html#scipy.sparse.dok_matrix.getmaxprint',
'scipy.sparse.dok_matrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.getnnz.html#scipy.sparse.dok_matrix.getnnz',
'scipy.sparse.dok_matrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.getrow.html#scipy.sparse.dok_matrix.getrow',
'scipy.sparse.dok_matrix.items': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.items.html#scipy.sparse.dok_matrix.items',
'scipy.sparse.dok_matrix.keys': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.keys.html#scipy.sparse.dok_matrix.keys',
'scipy.sparse.dok_matrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.maximum.html#scipy.sparse.dok_matrix.maximum',
'scipy.sparse.dok_matrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.mean.html#scipy.sparse.dok_matrix.mean',
'scipy.sparse.dok_matrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.minimum.html#scipy.sparse.dok_matrix.minimum',
'scipy.sparse.dok_matrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.multiply.html#scipy.sparse.dok_matrix.multiply',
'scipy.sparse.dok_matrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.nnz.html#scipy.sparse.dok_matrix.nnz',
'scipy.sparse.dok_matrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.nonzero.html#scipy.sparse.dok_matrix.nonzero',
'scipy.sparse.dok_matrix.pop': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.pop.html#scipy.sparse.dok_matrix.pop',
'scipy.sparse.dok_matrix.popitem': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.popitem.html#scipy.sparse.dok_matrix.popitem',
'scipy.sparse.dok_matrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.power.html#scipy.sparse.dok_matrix.power',
'scipy.sparse.dok_matrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.reshape.html#scipy.sparse.dok_matrix.reshape',
'scipy.sparse.dok_matrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.resize.html#scipy.sparse.dok_matrix.resize',
'scipy.sparse.dok_matrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.set_shape.html#scipy.sparse.dok_matrix.set_shape',
'scipy.sparse.dok_matrix.setdefault': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.setdefault.html#scipy.sparse.dok_matrix.setdefault',
'scipy.sparse.dok_matrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.setdiag.html#scipy.sparse.dok_matrix.setdiag',
'scipy.sparse.dok_matrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.shape.html#scipy.sparse.dok_matrix.shape',
'scipy.sparse.dok_matrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.sum.html#scipy.sparse.dok_matrix.sum',
'scipy.sparse.dok_matrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.toarray.html#scipy.sparse.dok_matrix.toarray',
'scipy.sparse.dok_matrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.tobsr.html#scipy.sparse.dok_matrix.tobsr',
'scipy.sparse.dok_matrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.tocoo.html#scipy.sparse.dok_matrix.tocoo',
'scipy.sparse.dok_matrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.tocsc.html#scipy.sparse.dok_matrix.tocsc',
'scipy.sparse.dok_matrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.tocsr.html#scipy.sparse.dok_matrix.tocsr',
'scipy.sparse.dok_matrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.todense.html#scipy.sparse.dok_matrix.todense',
'scipy.sparse.dok_matrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.todia.html#scipy.sparse.dok_matrix.todia',
'scipy.sparse.dok_matrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.todok.html#scipy.sparse.dok_matrix.todok',
'scipy.sparse.dok_matrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.tolil.html#scipy.sparse.dok_matrix.tolil',
'scipy.sparse.dok_matrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.transpose.html#scipy.sparse.dok_matrix.transpose',
'scipy.sparse.dok_matrix.update': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.update.html#scipy.sparse.dok_matrix.update',
'scipy.sparse.dok_matrix.values': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.dok_matrix.values.html#scipy.sparse.dok_matrix.values',
'scipy.sparse.lil_matrix.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.__getitem__.html#scipy.sparse.lil_matrix.__getitem__',
'scipy.sparse.lil_matrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.__len__.html#scipy.sparse.lil_matrix.__len__',
'scipy.sparse.lil_matrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.__mul__.html#scipy.sparse.lil_matrix.__mul__',
'scipy.sparse.lil_matrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.asformat.html#scipy.sparse.lil_matrix.asformat',
'scipy.sparse.lil_matrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.asfptype.html#scipy.sparse.lil_matrix.asfptype',
'scipy.sparse.lil_matrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.astype.html#scipy.sparse.lil_matrix.astype',
'scipy.sparse.lil_matrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.conj.html#scipy.sparse.lil_matrix.conj',
'scipy.sparse.lil_matrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.conjugate.html#scipy.sparse.lil_matrix.conjugate',
'scipy.sparse.lil_matrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.copy.html#scipy.sparse.lil_matrix.copy',
'scipy.sparse.lil_matrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.count_nonzero.html#scipy.sparse.lil_matrix.count_nonzero',
'scipy.sparse.lil_matrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.diagonal.html#scipy.sparse.lil_matrix.diagonal',
'scipy.sparse.lil_matrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.dot.html#scipy.sparse.lil_matrix.dot',
'scipy.sparse.lil_matrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.getH.html#scipy.sparse.lil_matrix.getH',
'scipy.sparse.lil_matrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.get_shape.html#scipy.sparse.lil_matrix.get_shape',
'scipy.sparse.lil_matrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.getcol.html#scipy.sparse.lil_matrix.getcol',
'scipy.sparse.lil_matrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.getformat.html#scipy.sparse.lil_matrix.getformat',
'scipy.sparse.lil_matrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.getmaxprint.html#scipy.sparse.lil_matrix.getmaxprint',
'scipy.sparse.lil_matrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.getnnz.html#scipy.sparse.lil_matrix.getnnz',
'scipy.sparse.lil_matrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.getrow.html#scipy.sparse.lil_matrix.getrow',
'scipy.sparse.lil_matrix.getrowview': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.getrowview.html#scipy.sparse.lil_matrix.getrowview',
'scipy.sparse.lil_matrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.maximum.html#scipy.sparse.lil_matrix.maximum',
'scipy.sparse.lil_matrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.mean.html#scipy.sparse.lil_matrix.mean',
'scipy.sparse.lil_matrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.minimum.html#scipy.sparse.lil_matrix.minimum',
'scipy.sparse.lil_matrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.multiply.html#scipy.sparse.lil_matrix.multiply',
'scipy.sparse.lil_matrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.nnz.html#scipy.sparse.lil_matrix.nnz',
'scipy.sparse.lil_matrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.nonzero.html#scipy.sparse.lil_matrix.nonzero',
'scipy.sparse.lil_matrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.power.html#scipy.sparse.lil_matrix.power',
'scipy.sparse.lil_matrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.reshape.html#scipy.sparse.lil_matrix.reshape',
'scipy.sparse.lil_matrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.resize.html#scipy.sparse.lil_matrix.resize',
'scipy.sparse.lil_matrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.set_shape.html#scipy.sparse.lil_matrix.set_shape',
'scipy.sparse.lil_matrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.setdiag.html#scipy.sparse.lil_matrix.setdiag',
'scipy.sparse.lil_matrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.shape.html#scipy.sparse.lil_matrix.shape',
'scipy.sparse.lil_matrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.sum.html#scipy.sparse.lil_matrix.sum',
'scipy.sparse.lil_matrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.toarray.html#scipy.sparse.lil_matrix.toarray',
'scipy.sparse.lil_matrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.tobsr.html#scipy.sparse.lil_matrix.tobsr',
'scipy.sparse.lil_matrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.tocoo.html#scipy.sparse.lil_matrix.tocoo',
'scipy.sparse.lil_matrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.tocsc.html#scipy.sparse.lil_matrix.tocsc',
'scipy.sparse.lil_matrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.tocsr.html#scipy.sparse.lil_matrix.tocsr',
'scipy.sparse.lil_matrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.todense.html#scipy.sparse.lil_matrix.todense',
'scipy.sparse.lil_matrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.todia.html#scipy.sparse.lil_matrix.todia',
'scipy.sparse.lil_matrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.todok.html#scipy.sparse.lil_matrix.todok',
'scipy.sparse.lil_matrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.tolil.html#scipy.sparse.lil_matrix.tolil',
'scipy.sparse.lil_matrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.lil_matrix.transpose.html#scipy.sparse.lil_matrix.transpose',
'scipy.sparse.spmatrix.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.__len__.html#scipy.sparse.spmatrix.__len__',
'scipy.sparse.spmatrix.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.__mul__.html#scipy.sparse.spmatrix.__mul__',
'scipy.sparse.spmatrix.asformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.asformat.html#scipy.sparse.spmatrix.asformat',
'scipy.sparse.spmatrix.asfptype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.asfptype.html#scipy.sparse.spmatrix.asfptype',
'scipy.sparse.spmatrix.astype': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.astype.html#scipy.sparse.spmatrix.astype',
'scipy.sparse.spmatrix.conj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.conj.html#scipy.sparse.spmatrix.conj',
'scipy.sparse.spmatrix.conjugate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.conjugate.html#scipy.sparse.spmatrix.conjugate',
'scipy.sparse.spmatrix.copy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.copy.html#scipy.sparse.spmatrix.copy',
'scipy.sparse.spmatrix.count_nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.count_nonzero.html#scipy.sparse.spmatrix.count_nonzero',
'scipy.sparse.spmatrix.diagonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.diagonal.html#scipy.sparse.spmatrix.diagonal',
'scipy.sparse.spmatrix.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.dot.html#scipy.sparse.spmatrix.dot',
'scipy.sparse.spmatrix.getH': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.getH.html#scipy.sparse.spmatrix.getH',
'scipy.sparse.spmatrix.get_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.get_shape.html#scipy.sparse.spmatrix.get_shape',
'scipy.sparse.spmatrix.getcol': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.getcol.html#scipy.sparse.spmatrix.getcol',
'scipy.sparse.spmatrix.getformat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.getformat.html#scipy.sparse.spmatrix.getformat',
'scipy.sparse.spmatrix.getmaxprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.getmaxprint.html#scipy.sparse.spmatrix.getmaxprint',
'scipy.sparse.spmatrix.getnnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.getnnz.html#scipy.sparse.spmatrix.getnnz',
'scipy.sparse.spmatrix.getrow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.getrow.html#scipy.sparse.spmatrix.getrow',
'scipy.sparse.spmatrix.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.maximum.html#scipy.sparse.spmatrix.maximum',
'scipy.sparse.spmatrix.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.mean.html#scipy.sparse.spmatrix.mean',
'scipy.sparse.spmatrix.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.minimum.html#scipy.sparse.spmatrix.minimum',
'scipy.sparse.spmatrix.multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.multiply.html#scipy.sparse.spmatrix.multiply',
'scipy.sparse.spmatrix.nnz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.nnz.html#scipy.sparse.spmatrix.nnz',
'scipy.sparse.spmatrix.nonzero': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.nonzero.html#scipy.sparse.spmatrix.nonzero',
'scipy.sparse.spmatrix.power': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.power.html#scipy.sparse.spmatrix.power',
'scipy.sparse.spmatrix.reshape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.reshape.html#scipy.sparse.spmatrix.reshape',
'scipy.sparse.spmatrix.resize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.resize.html#scipy.sparse.spmatrix.resize',
'scipy.sparse.spmatrix.set_shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.set_shape.html#scipy.sparse.spmatrix.set_shape',
'scipy.sparse.spmatrix.setdiag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.setdiag.html#scipy.sparse.spmatrix.setdiag',
'scipy.sparse.spmatrix.shape': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.shape.html#scipy.sparse.spmatrix.shape',
'scipy.sparse.spmatrix.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.sum.html#scipy.sparse.spmatrix.sum',
'scipy.sparse.spmatrix.toarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.toarray.html#scipy.sparse.spmatrix.toarray',
'scipy.sparse.spmatrix.tobsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.tobsr.html#scipy.sparse.spmatrix.tobsr',
'scipy.sparse.spmatrix.tocoo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.tocoo.html#scipy.sparse.spmatrix.tocoo',
'scipy.sparse.spmatrix.tocsc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.tocsc.html#scipy.sparse.spmatrix.tocsc',
'scipy.sparse.spmatrix.tocsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.tocsr.html#scipy.sparse.spmatrix.tocsr',
'scipy.sparse.spmatrix.todense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.todense.html#scipy.sparse.spmatrix.todense',
'scipy.sparse.spmatrix.todia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.todia.html#scipy.sparse.spmatrix.todia',
'scipy.sparse.spmatrix.todok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.todok.html#scipy.sparse.spmatrix.todok',
'scipy.sparse.spmatrix.tolil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.tolil.html#scipy.sparse.spmatrix.tolil',
'scipy.sparse.spmatrix.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spmatrix.transpose.html#scipy.sparse.spmatrix.transpose',
'scipy.sparse.block_diag': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.block_diag.html#scipy.sparse.block_diag',
'scipy.sparse.bmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.bmat.html#scipy.sparse.bmat',
'scipy.sparse.diags': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.diags.html#scipy.sparse.diags',
'scipy.sparse.eye': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.eye.html#scipy.sparse.eye',
'scipy.sparse.find': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.find.html#scipy.sparse.find',
'scipy.sparse.hstack': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.hstack.html#scipy.sparse.hstack',
'scipy.sparse.identity': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.identity.html#scipy.sparse.identity',
'scipy.sparse.issparse': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.issparse.html#scipy.sparse.issparse',
'scipy.sparse.isspmatrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix.html#scipy.sparse.isspmatrix',
'scipy.sparse.isspmatrix_bsr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix_bsr.html#scipy.sparse.isspmatrix_bsr',
'scipy.sparse.isspmatrix_coo': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix_coo.html#scipy.sparse.isspmatrix_coo',
'scipy.sparse.isspmatrix_csc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix_csc.html#scipy.sparse.isspmatrix_csc',
'scipy.sparse.isspmatrix_csr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix_csr.html#scipy.sparse.isspmatrix_csr',
'scipy.sparse.isspmatrix_dia': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix_dia.html#scipy.sparse.isspmatrix_dia',
'scipy.sparse.isspmatrix_dok': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix_dok.html#scipy.sparse.isspmatrix_dok',
'scipy.sparse.isspmatrix_lil': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.isspmatrix_lil.html#scipy.sparse.isspmatrix_lil',
'scipy.sparse.kron': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.kron.html#scipy.sparse.kron',
'scipy.sparse.kronsum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.kronsum.html#scipy.sparse.kronsum',
'scipy.sparse.load_npz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.load_npz.html#scipy.sparse.load_npz',
'scipy.sparse.rand': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.rand.html#scipy.sparse.rand',
'scipy.sparse.random': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.random.html#scipy.sparse.random',
'scipy.sparse.save_npz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.save_npz.html#scipy.sparse.save_npz',
'scipy.sparse.spdiags': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.spdiags.html#scipy.sparse.spdiags',
'scipy.sparse.tril': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.tril.html#scipy.sparse.tril',
'scipy.sparse.triu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.triu.html#scipy.sparse.triu',
'scipy.sparse.vstack': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.vstack.html#scipy.sparse.vstack'},
'scipy.spatial': { 'scipy.spatial.distance': 'https://docs.scipy.org/doc/scipy/reference/spatial.distance.html#module-scipy.spatial.distance',
'scipy.spatial.transform': 'https://docs.scipy.org/doc/scipy/reference/spatial.transform.html#module-scipy.spatial.transform',
'scipy.spatial.ConvexHull': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html#scipy.spatial.ConvexHull',
'scipy.spatial.Delaunay': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.html#scipy.spatial.Delaunay',
'scipy.spatial.HalfspaceIntersection': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.HalfspaceIntersection.html#scipy.spatial.HalfspaceIntersection',
'scipy.spatial.KDTree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.html#scipy.spatial.KDTree',
'scipy.spatial.Rectangle': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Rectangle.html#scipy.spatial.Rectangle',
'scipy.spatial.SphericalVoronoi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.SphericalVoronoi.html#scipy.spatial.SphericalVoronoi',
'scipy.spatial.Voronoi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Voronoi.html#scipy.spatial.Voronoi',
'scipy.spatial.cKDTree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.html#scipy.spatial.cKDTree',
'scipy.spatial.ConvexHull.add_points': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.add_points.html#scipy.spatial.ConvexHull.add_points',
'scipy.spatial.ConvexHull.close': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.close.html#scipy.spatial.ConvexHull.close',
'scipy.spatial.ConvexHull.points': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.points.html#scipy.spatial.ConvexHull.points',
'scipy.spatial.ConvexHull.vertices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.vertices.html#scipy.spatial.ConvexHull.vertices',
'scipy.spatial.Delaunay.add_points': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.add_points.html#scipy.spatial.Delaunay.add_points',
'scipy.spatial.Delaunay.close': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.close.html#scipy.spatial.Delaunay.close',
'scipy.spatial.Delaunay.convex_hull': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.convex_hull.html#scipy.spatial.Delaunay.convex_hull',
'scipy.spatial.Delaunay.find_simplex': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.find_simplex.html#scipy.spatial.Delaunay.find_simplex',
'scipy.spatial.Delaunay.lift_points': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.lift_points.html#scipy.spatial.Delaunay.lift_points',
'scipy.spatial.Delaunay.plane_distance': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.plane_distance.html#scipy.spatial.Delaunay.plane_distance',
'scipy.spatial.Delaunay.points': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.points.html#scipy.spatial.Delaunay.points',
'scipy.spatial.Delaunay.transform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.transform.html#scipy.spatial.Delaunay.transform',
'scipy.spatial.Delaunay.vertex_neighbor_vertices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.vertex_neighbor_vertices.html#scipy.spatial.Delaunay.vertex_neighbor_vertices',
'scipy.spatial.Delaunay.vertex_to_simplex': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.vertex_to_simplex.html#scipy.spatial.Delaunay.vertex_to_simplex',
'scipy.spatial.HalfspaceIntersection.add_halfspaces': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.HalfspaceIntersection.add_halfspaces.html#scipy.spatial.HalfspaceIntersection.add_halfspaces',
'scipy.spatial.HalfspaceIntersection.close': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.HalfspaceIntersection.close.html#scipy.spatial.HalfspaceIntersection.close',
'scipy.spatial.HalfspaceIntersection.dual_vertices': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.HalfspaceIntersection.dual_vertices.html#scipy.spatial.HalfspaceIntersection.dual_vertices',
'scipy.spatial.HalfspaceIntersection.halfspaces': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.HalfspaceIntersection.halfspaces.html#scipy.spatial.HalfspaceIntersection.halfspaces',
'scipy.spatial.KDTree.count_neighbors': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.count_neighbors.html#scipy.spatial.KDTree.count_neighbors',
'scipy.spatial.KDTree.query': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query.html#scipy.spatial.KDTree.query',
'scipy.spatial.KDTree.query_ball_point': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html#scipy.spatial.KDTree.query_ball_point',
'scipy.spatial.KDTree.query_ball_tree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_tree.html#scipy.spatial.KDTree.query_ball_tree',
'scipy.spatial.KDTree.query_pairs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_pairs.html#scipy.spatial.KDTree.query_pairs',
'scipy.spatial.KDTree.sparse_distance_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.sparse_distance_matrix.html#scipy.spatial.KDTree.sparse_distance_matrix',
'scipy.spatial.Rectangle.max_distance_point': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Rectangle.max_distance_point.html#scipy.spatial.Rectangle.max_distance_point',
'scipy.spatial.Rectangle.max_distance_rectangle': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Rectangle.max_distance_rectangle.html#scipy.spatial.Rectangle.max_distance_rectangle',
'scipy.spatial.Rectangle.min_distance_point': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Rectangle.min_distance_point.html#scipy.spatial.Rectangle.min_distance_point',
'scipy.spatial.Rectangle.min_distance_rectangle': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Rectangle.min_distance_rectangle.html#scipy.spatial.Rectangle.min_distance_rectangle',
'scipy.spatial.Rectangle.split': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Rectangle.split.html#scipy.spatial.Rectangle.split',
'scipy.spatial.Rectangle.volume': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Rectangle.volume.html#scipy.spatial.Rectangle.volume',
'scipy.spatial.SphericalVoronoi.calculate_areas': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.SphericalVoronoi.calculate_areas.html#scipy.spatial.SphericalVoronoi.calculate_areas',
'scipy.spatial.SphericalVoronoi.sort_vertices_of_regions': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.SphericalVoronoi.sort_vertices_of_regions.html#scipy.spatial.SphericalVoronoi.sort_vertices_of_regions',
'scipy.spatial.Voronoi.add_points': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Voronoi.add_points.html#scipy.spatial.Voronoi.add_points',
'scipy.spatial.Voronoi.close': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Voronoi.close.html#scipy.spatial.Voronoi.close',
'scipy.spatial.Voronoi.points': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Voronoi.points.html#scipy.spatial.Voronoi.points',
'scipy.spatial.Voronoi.ridge_dict': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Voronoi.ridge_dict.html#scipy.spatial.Voronoi.ridge_dict',
'scipy.spatial.cKDTree.count_neighbors': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.count_neighbors.html#scipy.spatial.cKDTree.count_neighbors',
'scipy.spatial.cKDTree.query': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.query.html#scipy.spatial.cKDTree.query',
'scipy.spatial.cKDTree.query_ball_point': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.query_ball_point.html#scipy.spatial.cKDTree.query_ball_point',
'scipy.spatial.cKDTree.query_ball_tree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.query_ball_tree.html#scipy.spatial.cKDTree.query_ball_tree',
'scipy.spatial.cKDTree.query_pairs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.query_pairs.html#scipy.spatial.cKDTree.query_pairs',
'scipy.spatial.cKDTree.sparse_distance_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.sparse_distance_matrix.html#scipy.spatial.cKDTree.sparse_distance_matrix',
'scipy.spatial.convex_hull_plot_2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.convex_hull_plot_2d.html#scipy.spatial.convex_hull_plot_2d',
'scipy.spatial.delaunay_plot_2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.delaunay_plot_2d.html#scipy.spatial.delaunay_plot_2d',
'scipy.spatial.distance_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance_matrix.html#scipy.spatial.distance_matrix',
'scipy.spatial.geometric_slerp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.geometric_slerp.html#scipy.spatial.geometric_slerp',
'scipy.spatial.minkowski_distance': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.minkowski_distance.html#scipy.spatial.minkowski_distance',
'scipy.spatial.minkowski_distance_p': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.minkowski_distance_p.html#scipy.spatial.minkowski_distance_p',
'scipy.spatial.procrustes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.procrustes.html#scipy.spatial.procrustes',
'scipy.spatial.tsearch': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.tsearch.html#scipy.spatial.tsearch',
'scipy.spatial.voronoi_plot_2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.voronoi_plot_2d.html#scipy.spatial.voronoi_plot_2d'},
'scipy.special': { 'scipy.special.cython_special': 'https://docs.scipy.org/doc/scipy/reference/special.cython_special.html#module-scipy.special.cython_special',
'scipy.special.errstate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.errstate.html#scipy.special.errstate',
'scipy.special.SpecialFunctionError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.SpecialFunctionError.html#scipy.special.SpecialFunctionError.with_traceback',
'scipy.special.SpecialFunctionWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.SpecialFunctionWarning.html#scipy.special.SpecialFunctionWarning.with_traceback',
'scipy.special.ai_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ai_zeros.html#scipy.special.ai_zeros',
'scipy.special.assoc_laguerre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.assoc_laguerre.html#scipy.special.assoc_laguerre',
'scipy.special.bei_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.bei_zeros.html#scipy.special.bei_zeros',
'scipy.special.beip_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.beip_zeros.html#scipy.special.beip_zeros',
'scipy.special.ber_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ber_zeros.html#scipy.special.ber_zeros',
'scipy.special.bernoulli': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.bernoulli.html#scipy.special.bernoulli',
'scipy.special.berp_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.berp_zeros.html#scipy.special.berp_zeros',
'scipy.special.bi_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.bi_zeros.html#scipy.special.bi_zeros',
'scipy.special.chebyc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.chebyc.html#scipy.special.chebyc',
'scipy.special.chebys': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.chebys.html#scipy.special.chebys',
'scipy.special.chebyt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.chebyt.html#scipy.special.chebyt',
'scipy.special.chebyu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.chebyu.html#scipy.special.chebyu',
'scipy.special.clpmn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.clpmn.html#scipy.special.clpmn',
'scipy.special.comb': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.comb.html#scipy.special.comb',
'scipy.special.diric': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.diric.html#scipy.special.diric',
'scipy.special.ellip_harm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ellip_harm.html#scipy.special.ellip_harm',
'scipy.special.ellip_harm_2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ellip_harm_2.html#scipy.special.ellip_harm_2',
'scipy.special.ellip_normal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ellip_normal.html#scipy.special.ellip_normal',
'scipy.special.erf_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.erf_zeros.html#scipy.special.erf_zeros',
'scipy.special.euler': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.euler.html#scipy.special.euler',
'scipy.special.factorial': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.factorial.html#scipy.special.factorial',
'scipy.special.factorial2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.factorial2.html#scipy.special.factorial2',
'scipy.special.factorialk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.factorialk.html#scipy.special.factorialk',
'scipy.special.fresnel_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.fresnel_zeros.html#scipy.special.fresnel_zeros',
'scipy.special.fresnelc_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.fresnelc_zeros.html#scipy.special.fresnelc_zeros',
'scipy.special.fresnels_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.fresnels_zeros.html#scipy.special.fresnels_zeros',
'scipy.special.gegenbauer': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.gegenbauer.html#scipy.special.gegenbauer',
'scipy.special.genlaguerre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.genlaguerre.html#scipy.special.genlaguerre',
'scipy.special.geterr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.geterr.html#scipy.special.geterr',
'scipy.special.h1vp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.h1vp.html#scipy.special.h1vp',
'scipy.special.h2vp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.h2vp.html#scipy.special.h2vp',
'scipy.special.hermite': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hermite.html#scipy.special.hermite',
'scipy.special.hermitenorm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hermitenorm.html#scipy.special.hermitenorm',
'scipy.special.ivp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ivp.html#scipy.special.ivp',
'scipy.special.jacobi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jacobi.html#scipy.special.jacobi',
'scipy.special.jn_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jn_zeros.html#scipy.special.jn_zeros',
'scipy.special.jnjnp_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jnjnp_zeros.html#scipy.special.jnjnp_zeros',
'scipy.special.jnp_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jnp_zeros.html#scipy.special.jnp_zeros',
'scipy.special.jnyn_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jnyn_zeros.html#scipy.special.jnyn_zeros',
'scipy.special.jvp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.jvp.html#scipy.special.jvp',
'scipy.special.kei_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.kei_zeros.html#scipy.special.kei_zeros',
'scipy.special.keip_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.keip_zeros.html#scipy.special.keip_zeros',
'scipy.special.kelvin_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.kelvin_zeros.html#scipy.special.kelvin_zeros',
'scipy.special.ker_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ker_zeros.html#scipy.special.ker_zeros',
'scipy.special.kerp_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.kerp_zeros.html#scipy.special.kerp_zeros',
'scipy.special.kvp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.kvp.html#scipy.special.kvp',
'scipy.special.laguerre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.laguerre.html#scipy.special.laguerre',
'scipy.special.lambertw': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.lambertw.html#scipy.special.lambertw',
'scipy.special.legendre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.legendre.html#scipy.special.legendre',
'scipy.special.lmbda': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.lmbda.html#scipy.special.lmbda',
'scipy.special.log_softmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.log_softmax.html#scipy.special.log_softmax',
'scipy.special.logsumexp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.logsumexp.html#scipy.special.logsumexp',
'scipy.special.lpmn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.lpmn.html#scipy.special.lpmn',
'scipy.special.lpn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.lpn.html#scipy.special.lpn',
'scipy.special.lqmn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.lqmn.html#scipy.special.lqmn',
'scipy.special.lqn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.lqn.html#scipy.special.lqn',
'scipy.special.mathieu_even_coef': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.mathieu_even_coef.html#scipy.special.mathieu_even_coef',
'scipy.special.mathieu_odd_coef': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.mathieu_odd_coef.html#scipy.special.mathieu_odd_coef',
'scipy.special.multigammaln': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.multigammaln.html#scipy.special.multigammaln',
'scipy.special.obl_cv_seq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.obl_cv_seq.html#scipy.special.obl_cv_seq',
'scipy.special.pbdn_seq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.pbdn_seq.html#scipy.special.pbdn_seq',
'scipy.special.pbdv_seq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.pbdv_seq.html#scipy.special.pbdv_seq',
'scipy.special.pbvv_seq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.pbvv_seq.html#scipy.special.pbvv_seq',
'scipy.special.perm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.perm.html#scipy.special.perm',
'scipy.special.polygamma': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.polygamma.html#scipy.special.polygamma',
'scipy.special.pro_cv_seq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.pro_cv_seq.html#scipy.special.pro_cv_seq',
'scipy.special.riccati_jn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.riccati_jn.html#scipy.special.riccati_jn',
'scipy.special.riccati_yn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.riccati_yn.html#scipy.special.riccati_yn',
'scipy.special.roots_chebyc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_chebyc.html#scipy.special.roots_chebyc',
'scipy.special.roots_chebys': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_chebys.html#scipy.special.roots_chebys',
'scipy.special.roots_chebyt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_chebyt.html#scipy.special.roots_chebyt',
'scipy.special.roots_chebyu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_chebyu.html#scipy.special.roots_chebyu',
'scipy.special.roots_gegenbauer': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_gegenbauer.html#scipy.special.roots_gegenbauer',
'scipy.special.roots_genlaguerre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_genlaguerre.html#scipy.special.roots_genlaguerre',
'scipy.special.roots_hermite': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_hermite.html#scipy.special.roots_hermite',
'scipy.special.roots_hermitenorm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_hermitenorm.html#scipy.special.roots_hermitenorm',
'scipy.special.roots_jacobi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_jacobi.html#scipy.special.roots_jacobi',
'scipy.special.roots_laguerre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_laguerre.html#scipy.special.roots_laguerre',
'scipy.special.roots_legendre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_legendre.html#scipy.special.roots_legendre',
'scipy.special.roots_sh_chebyt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_sh_chebyt.html#scipy.special.roots_sh_chebyt',
'scipy.special.roots_sh_chebyu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_sh_chebyu.html#scipy.special.roots_sh_chebyu',
'scipy.special.roots_sh_jacobi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_sh_jacobi.html#scipy.special.roots_sh_jacobi',
'scipy.special.roots_sh_legendre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_sh_legendre.html#scipy.special.roots_sh_legendre',
'scipy.special.seterr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.seterr.html#scipy.special.seterr',
'scipy.special.sh_chebyt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sh_chebyt.html#scipy.special.sh_chebyt',
'scipy.special.sh_chebyu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sh_chebyu.html#scipy.special.sh_chebyu',
'scipy.special.sh_jacobi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sh_jacobi.html#scipy.special.sh_jacobi',
'scipy.special.sh_legendre': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sh_legendre.html#scipy.special.sh_legendre',
'scipy.special.sinc': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sinc.html#scipy.special.sinc',
'scipy.special.softmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.softmax.html#scipy.special.softmax',
'scipy.special.spherical_in': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.spherical_in.html#scipy.special.spherical_in',
'scipy.special.spherical_jn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.spherical_jn.html#scipy.special.spherical_jn',
'scipy.special.spherical_kn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.spherical_kn.html#scipy.special.spherical_kn',
'scipy.special.spherical_yn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.spherical_yn.html#scipy.special.spherical_yn',
'scipy.special.y0_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.y0_zeros.html#scipy.special.y0_zeros',
'scipy.special.y1_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.y1_zeros.html#scipy.special.y1_zeros',
'scipy.special.y1p_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.y1p_zeros.html#scipy.special.y1p_zeros',
'scipy.special.yn_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.yn_zeros.html#scipy.special.yn_zeros',
'scipy.special.ynp_zeros': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ynp_zeros.html#scipy.special.ynp_zeros',
'scipy.special.yvp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.yvp.html#scipy.special.yvp',
'scipy.special.zeta': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.zeta.html#scipy.special.zeta'},
'scipy.stats': { 'scipy.stats.mstats': 'https://docs.scipy.org/doc/scipy/reference/stats.mstats.html#module-scipy.stats.mstats',
'scipy.stats.gaussian_kde': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html#scipy.stats.gaussian_kde',
'scipy.stats.rv_continuous': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.html#scipy.stats.rv_continuous',
'scipy.stats.rv_discrete': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.html#scipy.stats.rv_discrete',
'scipy.stats.rv_histogram': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.html#scipy.stats.rv_histogram',
'scipy.stats.F_onewayBadInputSizesWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.F_onewayBadInputSizesWarning.html#scipy.stats.F_onewayBadInputSizesWarning.with_traceback',
'scipy.stats.F_onewayConstantInputWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.F_onewayConstantInputWarning.html#scipy.stats.F_onewayConstantInputWarning.with_traceback',
'scipy.stats.PearsonRConstantInputWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.PearsonRConstantInputWarning.html#scipy.stats.PearsonRConstantInputWarning.with_traceback',
'scipy.stats.PearsonRNearConstantInputWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.PearsonRNearConstantInputWarning.html#scipy.stats.PearsonRNearConstantInputWarning.with_traceback',
'scipy.stats.SpearmanRConstantInputWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.SpearmanRConstantInputWarning.html#scipy.stats.SpearmanRConstantInputWarning.with_traceback',
'scipy.stats.gaussian_kde.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.__call__.html#scipy.stats.gaussian_kde.__call__',
'scipy.stats.gaussian_kde.covariance_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.covariance_factor.html#scipy.stats.gaussian_kde.covariance_factor',
'scipy.stats.gaussian_kde.evaluate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.evaluate.html#scipy.stats.gaussian_kde.evaluate',
'scipy.stats.gaussian_kde.integrate_box': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.integrate_box.html#scipy.stats.gaussian_kde.integrate_box',
'scipy.stats.gaussian_kde.integrate_box_1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.integrate_box_1d.html#scipy.stats.gaussian_kde.integrate_box_1d',
'scipy.stats.gaussian_kde.integrate_gaussian': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.integrate_gaussian.html#scipy.stats.gaussian_kde.integrate_gaussian',
'scipy.stats.gaussian_kde.integrate_kde': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.integrate_kde.html#scipy.stats.gaussian_kde.integrate_kde',
'scipy.stats.gaussian_kde.logpdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.logpdf.html#scipy.stats.gaussian_kde.logpdf',
'scipy.stats.gaussian_kde.neff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.neff.html#scipy.stats.gaussian_kde.neff',
'scipy.stats.gaussian_kde.pdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.pdf.html#scipy.stats.gaussian_kde.pdf',
'scipy.stats.gaussian_kde.resample': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.resample.html#scipy.stats.gaussian_kde.resample',
'scipy.stats.gaussian_kde.scotts_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.scotts_factor.html#scipy.stats.gaussian_kde.scotts_factor',
'scipy.stats.gaussian_kde.set_bandwidth': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.set_bandwidth.html#scipy.stats.gaussian_kde.set_bandwidth',
'scipy.stats.gaussian_kde.silverman_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.silverman_factor.html#scipy.stats.gaussian_kde.silverman_factor',
'scipy.stats.gaussian_kde.weights': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.weights.html#scipy.stats.gaussian_kde.weights',
'scipy.stats.rv_continuous.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.__call__.html#scipy.stats.rv_continuous.__call__',
'scipy.stats.rv_continuous.cdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.cdf.html#scipy.stats.rv_continuous.cdf',
'scipy.stats.rv_continuous.entropy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.entropy.html#scipy.stats.rv_continuous.entropy',
'scipy.stats.rv_continuous.expect': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.expect.html#scipy.stats.rv_continuous.expect',
'scipy.stats.rv_continuous.fit': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit',
'scipy.stats.rv_continuous.fit_loc_scale': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit_loc_scale.html#scipy.stats.rv_continuous.fit_loc_scale',
'scipy.stats.rv_continuous.freeze': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.freeze.html#scipy.stats.rv_continuous.freeze',
'scipy.stats.rv_continuous.interval': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.interval.html#scipy.stats.rv_continuous.interval',
'scipy.stats.rv_continuous.isf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.isf.html#scipy.stats.rv_continuous.isf',
'scipy.stats.rv_continuous.logcdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.logcdf.html#scipy.stats.rv_continuous.logcdf',
'scipy.stats.rv_continuous.logpdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.logpdf.html#scipy.stats.rv_continuous.logpdf',
'scipy.stats.rv_continuous.logsf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.logsf.html#scipy.stats.rv_continuous.logsf',
'scipy.stats.rv_continuous.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.mean.html#scipy.stats.rv_continuous.mean',
'scipy.stats.rv_continuous.median': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.median.html#scipy.stats.rv_continuous.median',
'scipy.stats.rv_continuous.moment': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.moment.html#scipy.stats.rv_continuous.moment',
'scipy.stats.rv_continuous.nnlf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.nnlf.html#scipy.stats.rv_continuous.nnlf',
'scipy.stats.rv_continuous.pdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.pdf.html#scipy.stats.rv_continuous.pdf',
'scipy.stats.rv_continuous.ppf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.ppf.html#scipy.stats.rv_continuous.ppf',
'scipy.stats.rv_continuous.random_state': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.random_state.html#scipy.stats.rv_continuous.random_state',
'scipy.stats.rv_continuous.rvs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.rvs.html#scipy.stats.rv_continuous.rvs',
'scipy.stats.rv_continuous.sf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.sf.html#scipy.stats.rv_continuous.sf',
'scipy.stats.rv_continuous.stats': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.stats.html#scipy.stats.rv_continuous.stats',
'scipy.stats.rv_continuous.std': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.std.html#scipy.stats.rv_continuous.std',
'scipy.stats.rv_continuous.support': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.support.html#scipy.stats.rv_continuous.support',
'scipy.stats.rv_continuous.var': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.var.html#scipy.stats.rv_continuous.var',
'scipy.stats.rv_discrete.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.__call__.html#scipy.stats.rv_discrete.__call__',
'scipy.stats.rv_discrete.cdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.cdf.html#scipy.stats.rv_discrete.cdf',
'scipy.stats.rv_discrete.entropy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.entropy.html#scipy.stats.rv_discrete.entropy',
'scipy.stats.rv_discrete.expect': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.expect.html#scipy.stats.rv_discrete.expect',
'scipy.stats.rv_discrete.freeze': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.freeze.html#scipy.stats.rv_discrete.freeze',
'scipy.stats.rv_discrete.interval': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.interval.html#scipy.stats.rv_discrete.interval',
'scipy.stats.rv_discrete.isf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.isf.html#scipy.stats.rv_discrete.isf',
'scipy.stats.rv_discrete.logcdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.logcdf.html#scipy.stats.rv_discrete.logcdf',
'scipy.stats.rv_discrete.logpmf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.logpmf.html#scipy.stats.rv_discrete.logpmf',
'scipy.stats.rv_discrete.logsf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.logsf.html#scipy.stats.rv_discrete.logsf',
'scipy.stats.rv_discrete.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.mean.html#scipy.stats.rv_discrete.mean',
'scipy.stats.rv_discrete.median': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.median.html#scipy.stats.rv_discrete.median',
'scipy.stats.rv_discrete.moment': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.moment.html#scipy.stats.rv_discrete.moment',
'scipy.stats.rv_discrete.pmf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.pmf.html#scipy.stats.rv_discrete.pmf',
'scipy.stats.rv_discrete.ppf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.ppf.html#scipy.stats.rv_discrete.ppf',
'scipy.stats.rv_discrete.random_state': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.random_state.html#scipy.stats.rv_discrete.random_state',
'scipy.stats.rv_discrete.rvs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.rvs.html#scipy.stats.rv_discrete.rvs',
'scipy.stats.rv_discrete.sf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.sf.html#scipy.stats.rv_discrete.sf',
'scipy.stats.rv_discrete.stats': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.stats.html#scipy.stats.rv_discrete.stats',
'scipy.stats.rv_discrete.std': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.std.html#scipy.stats.rv_discrete.std',
'scipy.stats.rv_discrete.support': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.support.html#scipy.stats.rv_discrete.support',
'scipy.stats.rv_discrete.var': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.var.html#scipy.stats.rv_discrete.var',
'scipy.stats.rv_histogram.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.__call__.html#scipy.stats.rv_histogram.__call__',
'scipy.stats.rv_histogram.cdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.cdf.html#scipy.stats.rv_histogram.cdf',
'scipy.stats.rv_histogram.entropy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.entropy.html#scipy.stats.rv_histogram.entropy',
'scipy.stats.rv_histogram.expect': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.expect.html#scipy.stats.rv_histogram.expect',
'scipy.stats.rv_histogram.fit': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.fit.html#scipy.stats.rv_histogram.fit',
'scipy.stats.rv_histogram.fit_loc_scale': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.fit_loc_scale.html#scipy.stats.rv_histogram.fit_loc_scale',
'scipy.stats.rv_histogram.freeze': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.freeze.html#scipy.stats.rv_histogram.freeze',
'scipy.stats.rv_histogram.interval': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.interval.html#scipy.stats.rv_histogram.interval',
'scipy.stats.rv_histogram.isf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.isf.html#scipy.stats.rv_histogram.isf',
'scipy.stats.rv_histogram.logcdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.logcdf.html#scipy.stats.rv_histogram.logcdf',
'scipy.stats.rv_histogram.logpdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.logpdf.html#scipy.stats.rv_histogram.logpdf',
'scipy.stats.rv_histogram.logsf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.logsf.html#scipy.stats.rv_histogram.logsf',
'scipy.stats.rv_histogram.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.mean.html#scipy.stats.rv_histogram.mean',
'scipy.stats.rv_histogram.median': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.median.html#scipy.stats.rv_histogram.median',
'scipy.stats.rv_histogram.moment': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.moment.html#scipy.stats.rv_histogram.moment',
'scipy.stats.rv_histogram.nnlf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.nnlf.html#scipy.stats.rv_histogram.nnlf',
'scipy.stats.rv_histogram.pdf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.pdf.html#scipy.stats.rv_histogram.pdf',
'scipy.stats.rv_histogram.ppf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.ppf.html#scipy.stats.rv_histogram.ppf',
'scipy.stats.rv_histogram.random_state': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.random_state.html#scipy.stats.rv_histogram.random_state',
'scipy.stats.rv_histogram.rvs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.rvs.html#scipy.stats.rv_histogram.rvs',
'scipy.stats.rv_histogram.sf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.sf.html#scipy.stats.rv_histogram.sf',
'scipy.stats.rv_histogram.stats': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.stats.html#scipy.stats.rv_histogram.stats',
'scipy.stats.rv_histogram.std': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.std.html#scipy.stats.rv_histogram.std',
'scipy.stats.rv_histogram.support': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.support.html#scipy.stats.rv_histogram.support',
'scipy.stats.rv_histogram.var': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_histogram.var.html#scipy.stats.rv_histogram.var',
'scipy.stats.anderson': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.anderson.html#scipy.stats.anderson',
'scipy.stats.anderson_ksamp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.anderson_ksamp.html#scipy.stats.anderson_ksamp',
'scipy.stats.ansari': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ansari.html#scipy.stats.ansari',
'scipy.stats.bartlett': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bartlett.html#scipy.stats.bartlett',
'scipy.stats.bayes_mvs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bayes_mvs.html#scipy.stats.bayes_mvs',
'scipy.stats.binned_statistic': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic.html#scipy.stats.binned_statistic',
'scipy.stats.binned_statistic_2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic_2d.html#scipy.stats.binned_statistic_2d',
'scipy.stats.binned_statistic_dd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binned_statistic_dd.html#scipy.stats.binned_statistic_dd',
'scipy.stats.binom_test': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binom_test.html#scipy.stats.binom_test',
'scipy.stats.boxcox': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.boxcox.html#scipy.stats.boxcox',
'scipy.stats.boxcox_llf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.boxcox_llf.html#scipy.stats.boxcox_llf',
'scipy.stats.boxcox_normmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.boxcox_normmax.html#scipy.stats.boxcox_normmax',
'scipy.stats.boxcox_normplot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.boxcox_normplot.html#scipy.stats.boxcox_normplot',
'scipy.stats.brunnermunzel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.brunnermunzel.html#scipy.stats.brunnermunzel',
'scipy.stats.chi2_contingency': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2_contingency.html#scipy.stats.chi2_contingency',
'scipy.stats.chisquare': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html#scipy.stats.chisquare',
'scipy.stats.circmean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.circmean.html#scipy.stats.circmean',
'scipy.stats.circstd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.circstd.html#scipy.stats.circstd',
'scipy.stats.circvar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.circvar.html#scipy.stats.circvar',
'scipy.stats.combine_pvalues': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.combine_pvalues.html#scipy.stats.combine_pvalues',
'scipy.stats.cumfreq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.cumfreq.html#scipy.stats.cumfreq',
'scipy.stats.describe': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.describe.html#scipy.stats.describe',
'scipy.stats.energy_distance': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.energy_distance.html#scipy.stats.energy_distance',
'scipy.stats.entropy': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.entropy.html#scipy.stats.entropy',
'scipy.stats.epps_singleton_2samp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.epps_singleton_2samp.html#scipy.stats.epps_singleton_2samp',
'scipy.stats.f_oneway': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.f_oneway.html#scipy.stats.f_oneway',
'scipy.stats.find_repeats': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.find_repeats.html#scipy.stats.find_repeats',
'scipy.stats.fisher_exact': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.fisher_exact.html#scipy.stats.fisher_exact',
'scipy.stats.fligner': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.fligner.html#scipy.stats.fligner',
'scipy.stats.friedmanchisquare': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.friedmanchisquare.html#scipy.stats.friedmanchisquare',
'scipy.stats.gmean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gmean.html#scipy.stats.gmean',
'scipy.stats.gstd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gstd.html#scipy.stats.gstd',
'scipy.stats.hmean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.hmean.html#scipy.stats.hmean',
'scipy.stats.iqr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.iqr.html#scipy.stats.iqr',
'scipy.stats.itemfreq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.itemfreq.html#scipy.stats.itemfreq',
'scipy.stats.jarque_bera': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.jarque_bera.html#scipy.stats.jarque_bera',
'scipy.stats.kendalltau': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kendalltau.html#scipy.stats.kendalltau',
'scipy.stats.kruskal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kruskal.html#scipy.stats.kruskal',
'scipy.stats.ks_1samp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_1samp.html#scipy.stats.ks_1samp',
'scipy.stats.ks_2samp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html#scipy.stats.ks_2samp',
'scipy.stats.kstat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstat.html#scipy.stats.kstat',
'scipy.stats.kstatvar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstatvar.html#scipy.stats.kstatvar',
'scipy.stats.kstest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html#scipy.stats.kstest',
'scipy.stats.kurtosis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kurtosis.html#scipy.stats.kurtosis',
'scipy.stats.kurtosistest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kurtosistest.html#scipy.stats.kurtosistest',
'scipy.stats.levene': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.levene.html#scipy.stats.levene',
'scipy.stats.linregress': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html#scipy.stats.linregress',
'scipy.stats.mannwhitneyu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mannwhitneyu.html#scipy.stats.mannwhitneyu',
'scipy.stats.median_abs_deviation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.median_abs_deviation.html#scipy.stats.median_abs_deviation',
'scipy.stats.median_absolute_deviation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.median_absolute_deviation.html#scipy.stats.median_absolute_deviation',
'scipy.stats.median_test': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.median_test.html#scipy.stats.median_test',
'scipy.stats.mode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mode.html#scipy.stats.mode',
'scipy.stats.moment': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.moment.html#scipy.stats.moment',
'scipy.stats.mood': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mood.html#scipy.stats.mood',
'scipy.stats.multiscale_graphcorr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.multiscale_graphcorr.html#scipy.stats.multiscale_graphcorr',
'scipy.stats.mvsdist': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mvsdist.html#scipy.stats.mvsdist',
'scipy.stats.normaltest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.normaltest.html#scipy.stats.normaltest',
'scipy.stats.obrientransform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.obrientransform.html#scipy.stats.obrientransform',
'scipy.stats.pearsonr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html#scipy.stats.pearsonr',
'scipy.stats.percentileofscore': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.percentileofscore.html#scipy.stats.percentileofscore',
'scipy.stats.pointbiserialr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pointbiserialr.html#scipy.stats.pointbiserialr',
'scipy.stats.power_divergence': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.power_divergence.html#scipy.stats.power_divergence',
'scipy.stats.ppcc_max': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ppcc_max.html#scipy.stats.ppcc_max',
'scipy.stats.ppcc_plot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ppcc_plot.html#scipy.stats.ppcc_plot',
'scipy.stats.probplot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.probplot.html#scipy.stats.probplot',
'scipy.stats.rankdata': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rankdata.html#scipy.stats.rankdata',
'scipy.stats.ranksums': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ranksums.html#scipy.stats.ranksums',
'scipy.stats.relfreq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.relfreq.html#scipy.stats.relfreq',
'scipy.stats.rvs_ratio_uniforms': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rvs_ratio_uniforms.html#scipy.stats.rvs_ratio_uniforms',
'scipy.stats.scoreatpercentile': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.scoreatpercentile.html#scipy.stats.scoreatpercentile',
'scipy.stats.sem': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sem.html#scipy.stats.sem',
'scipy.stats.shapiro': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.shapiro.html#scipy.stats.shapiro',
'scipy.stats.siegelslopes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.siegelslopes.html#scipy.stats.siegelslopes',
'scipy.stats.sigmaclip': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html#scipy.stats.sigmaclip',
'scipy.stats.skew': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skew.html#scipy.stats.skew',
'scipy.stats.skewtest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skewtest.html#scipy.stats.skewtest',
'scipy.stats.spearmanr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html#scipy.stats.spearmanr',
'scipy.stats.theilslopes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.theilslopes.html#scipy.stats.theilslopes',
'scipy.stats.tiecorrect': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tiecorrect.html#scipy.stats.tiecorrect',
'scipy.stats.tmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tmax.html#scipy.stats.tmax',
'scipy.stats.tmean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tmean.html#scipy.stats.tmean',
'scipy.stats.tmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tmin.html#scipy.stats.tmin',
'scipy.stats.trim1': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.trim1.html#scipy.stats.trim1',
'scipy.stats.trim_mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.trim_mean.html#scipy.stats.trim_mean',
'scipy.stats.trimboth': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.trimboth.html#scipy.stats.trimboth',
'scipy.stats.tsem': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tsem.html#scipy.stats.tsem',
'scipy.stats.tstd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tstd.html#scipy.stats.tstd',
'scipy.stats.ttest_1samp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_1samp.html#scipy.stats.ttest_1samp',
'scipy.stats.ttest_ind': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html#scipy.stats.ttest_ind',
'scipy.stats.ttest_ind_from_stats': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind_from_stats.html#scipy.stats.ttest_ind_from_stats',
'scipy.stats.ttest_rel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_rel.html#scipy.stats.ttest_rel',
'scipy.stats.tvar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.tvar.html#scipy.stats.tvar',
'scipy.stats.variation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.variation.html#scipy.stats.variation',
'scipy.stats.wasserstein_distance': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.wasserstein_distance.html#scipy.stats.wasserstein_distance',
'scipy.stats.weightedtau': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.weightedtau.html#scipy.stats.weightedtau',
'scipy.stats.wilcoxon': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.wilcoxon.html#scipy.stats.wilcoxon',
'scipy.stats.yeojohnson': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.yeojohnson.html#scipy.stats.yeojohnson',
'scipy.stats.yeojohnson_llf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.yeojohnson_llf.html#scipy.stats.yeojohnson_llf',
'scipy.stats.yeojohnson_normmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.yeojohnson_normmax.html#scipy.stats.yeojohnson_normmax',
'scipy.stats.yeojohnson_normplot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.yeojohnson_normplot.html#scipy.stats.yeojohnson_normplot',
'scipy.stats.zmap': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.zmap.html#scipy.stats.zmap',
'scipy.stats.zscore': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.zscore.html#scipy.stats.zscore'},
'scipy.cluster.hierarchy': { 'scipy.cluster.hierarchy.ClusterNode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ClusterNode.html#scipy.cluster.hierarchy.ClusterNode',
'scipy.cluster.hierarchy.ClusterNode.get_count': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ClusterNode.get_count.html#scipy.cluster.hierarchy.ClusterNode.get_count',
'scipy.cluster.hierarchy.ClusterNode.get_id': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ClusterNode.get_id.html#scipy.cluster.hierarchy.ClusterNode.get_id',
'scipy.cluster.hierarchy.ClusterNode.get_left': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ClusterNode.get_left.html#scipy.cluster.hierarchy.ClusterNode.get_left',
'scipy.cluster.hierarchy.ClusterNode.get_right': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ClusterNode.get_right.html#scipy.cluster.hierarchy.ClusterNode.get_right',
'scipy.cluster.hierarchy.ClusterNode.is_leaf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ClusterNode.is_leaf.html#scipy.cluster.hierarchy.ClusterNode.is_leaf',
'scipy.cluster.hierarchy.ClusterNode.pre_order': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ClusterNode.pre_order.html#scipy.cluster.hierarchy.ClusterNode.pre_order',
'scipy.cluster.hierarchy.average': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.average.html#scipy.cluster.hierarchy.average',
'scipy.cluster.hierarchy.centroid': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.centroid.html#scipy.cluster.hierarchy.centroid',
'scipy.cluster.hierarchy.complete': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.complete.html#scipy.cluster.hierarchy.complete',
'scipy.cluster.hierarchy.cophenet': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.cophenet.html#scipy.cluster.hierarchy.cophenet',
'scipy.cluster.hierarchy.correspond': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.correspond.html#scipy.cluster.hierarchy.correspond',
'scipy.cluster.hierarchy.cut_tree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.cut_tree.html#scipy.cluster.hierarchy.cut_tree',
'scipy.cluster.hierarchy.dendrogram': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.dendrogram.html#scipy.cluster.hierarchy.dendrogram',
'scipy.cluster.hierarchy.fcluster': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html#scipy.cluster.hierarchy.fcluster',
'scipy.cluster.hierarchy.fclusterdata': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fclusterdata.html#scipy.cluster.hierarchy.fclusterdata',
'scipy.cluster.hierarchy.from_mlab_linkage': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.from_mlab_linkage.html#scipy.cluster.hierarchy.from_mlab_linkage',
'scipy.cluster.hierarchy.inconsistent': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.inconsistent.html#scipy.cluster.hierarchy.inconsistent',
'scipy.cluster.hierarchy.is_isomorphic': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.is_isomorphic.html#scipy.cluster.hierarchy.is_isomorphic',
'scipy.cluster.hierarchy.is_monotonic': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.is_monotonic.html#scipy.cluster.hierarchy.is_monotonic',
'scipy.cluster.hierarchy.is_valid_im': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.is_valid_im.html#scipy.cluster.hierarchy.is_valid_im',
'scipy.cluster.hierarchy.is_valid_linkage': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.is_valid_linkage.html#scipy.cluster.hierarchy.is_valid_linkage',
'scipy.cluster.hierarchy.leaders': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.leaders.html#scipy.cluster.hierarchy.leaders',
'scipy.cluster.hierarchy.leaves_list': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.leaves_list.html#scipy.cluster.hierarchy.leaves_list',
'scipy.cluster.hierarchy.linkage': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy.cluster.hierarchy.linkage',
'scipy.cluster.hierarchy.maxRstat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.maxRstat.html#scipy.cluster.hierarchy.maxRstat',
'scipy.cluster.hierarchy.maxdists': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.maxdists.html#scipy.cluster.hierarchy.maxdists',
'scipy.cluster.hierarchy.maxinconsts': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.maxinconsts.html#scipy.cluster.hierarchy.maxinconsts',
'scipy.cluster.hierarchy.median': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.median.html#scipy.cluster.hierarchy.median',
'scipy.cluster.hierarchy.num_obs_linkage': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.num_obs_linkage.html#scipy.cluster.hierarchy.num_obs_linkage',
'scipy.cluster.hierarchy.optimal_leaf_ordering': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.optimal_leaf_ordering.html#scipy.cluster.hierarchy.optimal_leaf_ordering',
'scipy.cluster.hierarchy.set_link_color_palette': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.set_link_color_palette.html#scipy.cluster.hierarchy.set_link_color_palette',
'scipy.cluster.hierarchy.single': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.single.html#scipy.cluster.hierarchy.single',
'scipy.cluster.hierarchy.to_mlab_linkage': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.to_mlab_linkage.html#scipy.cluster.hierarchy.to_mlab_linkage',
'scipy.cluster.hierarchy.to_tree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.to_tree.html#scipy.cluster.hierarchy.to_tree',
'scipy.cluster.hierarchy.ward': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.ward.html#scipy.cluster.hierarchy.ward',
'scipy.cluster.hierarchy.weighted': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.weighted.html#scipy.cluster.hierarchy.weighted'},
'scipy.integrate': { 'scipy.integrate.BDF': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.BDF.html#scipy.integrate.BDF',
'scipy.integrate.DOP853': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.DOP853.html#scipy.integrate.DOP853',
'scipy.integrate.DenseOutput': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.DenseOutput.html#scipy.integrate.DenseOutput',
'scipy.integrate.LSODA': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.LSODA.html#scipy.integrate.LSODA',
'scipy.integrate.OdeSolution': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.OdeSolution.html#scipy.integrate.OdeSolution',
'scipy.integrate.OdeSolver': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.OdeSolver.html#scipy.integrate.OdeSolver',
'scipy.integrate.RK23': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK23.html#scipy.integrate.RK23',
'scipy.integrate.RK45': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK45.html#scipy.integrate.RK45',
'scipy.integrate.Radau': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.Radau.html#scipy.integrate.Radau',
'scipy.integrate.complex_ode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.html#scipy.integrate.complex_ode',
'scipy.integrate.ode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html#scipy.integrate.ode',
'scipy.integrate.AccuracyWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.AccuracyWarning.html#scipy.integrate.AccuracyWarning.with_traceback',
'scipy.integrate.BDF.dense_output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.BDF.dense_output.html#scipy.integrate.BDF.dense_output',
'scipy.integrate.BDF.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.BDF.step.html#scipy.integrate.BDF.step',
'scipy.integrate.BDF.step_size': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.BDF.step_size.html#scipy.integrate.BDF.step_size',
'scipy.integrate.DOP853.dense_output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.DOP853.dense_output.html#scipy.integrate.DOP853.dense_output',
'scipy.integrate.DOP853.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.DOP853.step.html#scipy.integrate.DOP853.step',
'scipy.integrate.DOP853.step_size': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.DOP853.step_size.html#scipy.integrate.DOP853.step_size',
'scipy.integrate.DenseOutput.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.DenseOutput.__call__.html#scipy.integrate.DenseOutput.__call__',
'scipy.integrate.IntegrationWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.IntegrationWarning.html#scipy.integrate.IntegrationWarning.with_traceback',
'scipy.integrate.LSODA.dense_output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.LSODA.dense_output.html#scipy.integrate.LSODA.dense_output',
'scipy.integrate.LSODA.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.LSODA.step.html#scipy.integrate.LSODA.step',
'scipy.integrate.LSODA.step_size': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.LSODA.step_size.html#scipy.integrate.LSODA.step_size',
'scipy.integrate.OdeSolution.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.OdeSolution.__call__.html#scipy.integrate.OdeSolution.__call__',
'scipy.integrate.OdeSolver.dense_output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.OdeSolver.dense_output.html#scipy.integrate.OdeSolver.dense_output',
'scipy.integrate.OdeSolver.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.OdeSolver.step.html#scipy.integrate.OdeSolver.step',
'scipy.integrate.OdeSolver.step_size': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.OdeSolver.step_size.html#scipy.integrate.OdeSolver.step_size',
'scipy.integrate.RK23.dense_output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK23.dense_output.html#scipy.integrate.RK23.dense_output',
'scipy.integrate.RK23.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK23.step.html#scipy.integrate.RK23.step',
'scipy.integrate.RK23.step_size': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK23.step_size.html#scipy.integrate.RK23.step_size',
'scipy.integrate.RK45.dense_output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK45.dense_output.html#scipy.integrate.RK45.dense_output',
'scipy.integrate.RK45.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK45.step.html#scipy.integrate.RK45.step',
'scipy.integrate.RK45.step_size': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.RK45.step_size.html#scipy.integrate.RK45.step_size',
'scipy.integrate.Radau.dense_output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.Radau.dense_output.html#scipy.integrate.Radau.dense_output',
'scipy.integrate.Radau.step': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.Radau.step.html#scipy.integrate.Radau.step',
'scipy.integrate.Radau.step_size': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.Radau.step_size.html#scipy.integrate.Radau.step_size',
'scipy.integrate.complex_ode.get_return_code': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.get_return_code.html#scipy.integrate.complex_ode.get_return_code',
'scipy.integrate.complex_ode.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.integrate.html#scipy.integrate.complex_ode.integrate',
'scipy.integrate.complex_ode.set_f_params': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.set_f_params.html#scipy.integrate.complex_ode.set_f_params',
'scipy.integrate.complex_ode.set_initial_value': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.set_initial_value.html#scipy.integrate.complex_ode.set_initial_value',
'scipy.integrate.complex_ode.set_integrator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.set_integrator.html#scipy.integrate.complex_ode.set_integrator',
'scipy.integrate.complex_ode.set_jac_params': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.set_jac_params.html#scipy.integrate.complex_ode.set_jac_params',
'scipy.integrate.complex_ode.set_solout': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.set_solout.html#scipy.integrate.complex_ode.set_solout',
'scipy.integrate.complex_ode.successful': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.successful.html#scipy.integrate.complex_ode.successful',
'scipy.integrate.complex_ode.y': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.complex_ode.y.html#scipy.integrate.complex_ode.y',
'scipy.integrate.ode.get_return_code': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.get_return_code.html#scipy.integrate.ode.get_return_code',
'scipy.integrate.ode.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.integrate.html#scipy.integrate.ode.integrate',
'scipy.integrate.ode.set_f_params': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.set_f_params.html#scipy.integrate.ode.set_f_params',
'scipy.integrate.ode.set_initial_value': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.set_initial_value.html#scipy.integrate.ode.set_initial_value',
'scipy.integrate.ode.set_integrator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.set_integrator.html#scipy.integrate.ode.set_integrator',
'scipy.integrate.ode.set_jac_params': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.set_jac_params.html#scipy.integrate.ode.set_jac_params',
'scipy.integrate.ode.set_solout': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.set_solout.html#scipy.integrate.ode.set_solout',
'scipy.integrate.ode.successful': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.successful.html#scipy.integrate.ode.successful',
'scipy.integrate.ode.y': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.y.html#scipy.integrate.ode.y',
'scipy.integrate.cumtrapz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.cumtrapz.html#scipy.integrate.cumtrapz',
'scipy.integrate.dblquad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.dblquad.html#scipy.integrate.dblquad',
'scipy.integrate.fixed_quad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.fixed_quad.html#scipy.integrate.fixed_quad',
'scipy.integrate.newton_cotes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.newton_cotes.html#scipy.integrate.newton_cotes',
'scipy.integrate.nquad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.nquad.html#scipy.integrate.nquad',
'scipy.integrate.odeint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html#scipy.integrate.odeint',
'scipy.integrate.quad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.quad.html#scipy.integrate.quad',
'scipy.integrate.quad_explain': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.quad_explain.html#scipy.integrate.quad_explain',
'scipy.integrate.quad_vec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.quad_vec.html#scipy.integrate.quad_vec',
'scipy.integrate.quadrature': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.quadrature.html#scipy.integrate.quadrature',
'scipy.integrate.romb': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.romb.html#scipy.integrate.romb',
'scipy.integrate.romberg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.romberg.html#scipy.integrate.romberg',
'scipy.integrate.simps': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.simps.html#scipy.integrate.simps',
'scipy.integrate.solve_bvp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_bvp.html#scipy.integrate.solve_bvp',
'scipy.integrate.solve_ivp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html#scipy.integrate.solve_ivp',
'scipy.integrate.tplquad': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.tplquad.html#scipy.integrate.tplquad',
'scipy.integrate.trapz': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.trapz.html#scipy.integrate.trapz'},
'scipy.interpolate': { 'scipy.interpolate.Akima1DInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.html#scipy.interpolate.Akima1DInterpolator',
'scipy.interpolate.BPoly': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.html#scipy.interpolate.BPoly',
'scipy.interpolate.BSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline',
'scipy.interpolate.BarycentricInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BarycentricInterpolator.html#scipy.interpolate.BarycentricInterpolator',
'scipy.interpolate.BivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BivariateSpline.html#scipy.interpolate.BivariateSpline',
'scipy.interpolate.CloughTocher2DInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CloughTocher2DInterpolator.html#scipy.interpolate.CloughTocher2DInterpolator',
'scipy.interpolate.CubicHermiteSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.html#scipy.interpolate.CubicHermiteSpline',
'scipy.interpolate.CubicSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.html#scipy.interpolate.CubicSpline',
'scipy.interpolate.InterpolatedUnivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.html#scipy.interpolate.InterpolatedUnivariateSpline',
'scipy.interpolate.KroghInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.KroghInterpolator.html#scipy.interpolate.KroghInterpolator',
'scipy.interpolate.LSQBivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQBivariateSpline.html#scipy.interpolate.LSQBivariateSpline',
'scipy.interpolate.LSQSphereBivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQSphereBivariateSpline.html#scipy.interpolate.LSQSphereBivariateSpline',
'scipy.interpolate.LSQUnivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.html#scipy.interpolate.LSQUnivariateSpline',
'scipy.interpolate.LinearNDInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LinearNDInterpolator.html#scipy.interpolate.LinearNDInterpolator',
'scipy.interpolate.NdPPoly': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NdPPoly.html#scipy.interpolate.NdPPoly',
'scipy.interpolate.NearestNDInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NearestNDInterpolator.html#scipy.interpolate.NearestNDInterpolator',
'scipy.interpolate.PPoly': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.html#scipy.interpolate.PPoly',
'scipy.interpolate.PchipInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.html#scipy.interpolate.PchipInterpolator',
'scipy.interpolate.Rbf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Rbf.html#scipy.interpolate.Rbf',
'scipy.interpolate.RectBivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.html#scipy.interpolate.RectBivariateSpline',
'scipy.interpolate.RectSphereBivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectSphereBivariateSpline.html#scipy.interpolate.RectSphereBivariateSpline',
'scipy.interpolate.RegularGridInterpolator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RegularGridInterpolator.html#scipy.interpolate.RegularGridInterpolator',
'scipy.interpolate.SmoothBivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothBivariateSpline.html#scipy.interpolate.SmoothBivariateSpline',
'scipy.interpolate.SmoothSphereBivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothSphereBivariateSpline.html#scipy.interpolate.SmoothSphereBivariateSpline',
'scipy.interpolate.UnivariateSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.html#scipy.interpolate.UnivariateSpline',
'scipy.interpolate.interp1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html#scipy.interpolate.interp1d',
'scipy.interpolate.interp2d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp2d.html#scipy.interpolate.interp2d',
'scipy.interpolate.Akima1DInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.__call__.html#scipy.interpolate.Akima1DInterpolator.__call__',
'scipy.interpolate.Akima1DInterpolator.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.antiderivative.html#scipy.interpolate.Akima1DInterpolator.antiderivative',
'scipy.interpolate.Akima1DInterpolator.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.construct_fast.html#scipy.interpolate.Akima1DInterpolator.construct_fast',
'scipy.interpolate.Akima1DInterpolator.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.derivative.html#scipy.interpolate.Akima1DInterpolator.derivative',
'scipy.interpolate.Akima1DInterpolator.extend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.extend.html#scipy.interpolate.Akima1DInterpolator.extend',
'scipy.interpolate.Akima1DInterpolator.from_bernstein_basis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.from_bernstein_basis.html#scipy.interpolate.Akima1DInterpolator.from_bernstein_basis',
'scipy.interpolate.Akima1DInterpolator.from_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.from_spline.html#scipy.interpolate.Akima1DInterpolator.from_spline',
'scipy.interpolate.Akima1DInterpolator.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.integrate.html#scipy.interpolate.Akima1DInterpolator.integrate',
'scipy.interpolate.Akima1DInterpolator.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.roots.html#scipy.interpolate.Akima1DInterpolator.roots',
'scipy.interpolate.Akima1DInterpolator.solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Akima1DInterpolator.solve.html#scipy.interpolate.Akima1DInterpolator.solve',
'scipy.interpolate.BPoly.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.__call__.html#scipy.interpolate.BPoly.__call__',
'scipy.interpolate.BPoly.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.antiderivative.html#scipy.interpolate.BPoly.antiderivative',
'scipy.interpolate.BPoly.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.construct_fast.html#scipy.interpolate.BPoly.construct_fast',
'scipy.interpolate.BPoly.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.derivative.html#scipy.interpolate.BPoly.derivative',
'scipy.interpolate.BPoly.extend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.extend.html#scipy.interpolate.BPoly.extend',
'scipy.interpolate.BPoly.from_derivatives': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.from_derivatives.html#scipy.interpolate.BPoly.from_derivatives',
'scipy.interpolate.BPoly.from_power_basis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.from_power_basis.html#scipy.interpolate.BPoly.from_power_basis',
'scipy.interpolate.BPoly.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BPoly.integrate.html#scipy.interpolate.BPoly.integrate',
'scipy.interpolate.BSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.__call__.html#scipy.interpolate.BSpline.__call__',
'scipy.interpolate.BSpline.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.antiderivative.html#scipy.interpolate.BSpline.antiderivative',
'scipy.interpolate.BSpline.basis_element': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.basis_element.html#scipy.interpolate.BSpline.basis_element',
'scipy.interpolate.BSpline.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.construct_fast.html#scipy.interpolate.BSpline.construct_fast',
'scipy.interpolate.BSpline.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.derivative.html#scipy.interpolate.BSpline.derivative',
'scipy.interpolate.BSpline.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.integrate.html#scipy.interpolate.BSpline.integrate',
'scipy.interpolate.BSpline.tck': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.tck.html#scipy.interpolate.BSpline.tck',
'scipy.interpolate.BarycentricInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BarycentricInterpolator.__call__.html#scipy.interpolate.BarycentricInterpolator.__call__',
'scipy.interpolate.BarycentricInterpolator.add_xi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BarycentricInterpolator.add_xi.html#scipy.interpolate.BarycentricInterpolator.add_xi',
'scipy.interpolate.BarycentricInterpolator.set_yi': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BarycentricInterpolator.set_yi.html#scipy.interpolate.BarycentricInterpolator.set_yi',
'scipy.interpolate.BivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BivariateSpline.__call__.html#scipy.interpolate.BivariateSpline.__call__',
'scipy.interpolate.BivariateSpline.ev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BivariateSpline.ev.html#scipy.interpolate.BivariateSpline.ev',
'scipy.interpolate.BivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BivariateSpline.get_coeffs.html#scipy.interpolate.BivariateSpline.get_coeffs',
'scipy.interpolate.BivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BivariateSpline.get_knots.html#scipy.interpolate.BivariateSpline.get_knots',
'scipy.interpolate.BivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BivariateSpline.get_residual.html#scipy.interpolate.BivariateSpline.get_residual',
'scipy.interpolate.BivariateSpline.integral': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BivariateSpline.integral.html#scipy.interpolate.BivariateSpline.integral',
'scipy.interpolate.CloughTocher2DInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CloughTocher2DInterpolator.__call__.html#scipy.interpolate.CloughTocher2DInterpolator.__call__',
'scipy.interpolate.CubicHermiteSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.__call__.html#scipy.interpolate.CubicHermiteSpline.__call__',
'scipy.interpolate.CubicHermiteSpline.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.antiderivative.html#scipy.interpolate.CubicHermiteSpline.antiderivative',
'scipy.interpolate.CubicHermiteSpline.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.construct_fast.html#scipy.interpolate.CubicHermiteSpline.construct_fast',
'scipy.interpolate.CubicHermiteSpline.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.derivative.html#scipy.interpolate.CubicHermiteSpline.derivative',
'scipy.interpolate.CubicHermiteSpline.extend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.extend.html#scipy.interpolate.CubicHermiteSpline.extend',
'scipy.interpolate.CubicHermiteSpline.from_bernstein_basis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.from_bernstein_basis.html#scipy.interpolate.CubicHermiteSpline.from_bernstein_basis',
'scipy.interpolate.CubicHermiteSpline.from_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.from_spline.html#scipy.interpolate.CubicHermiteSpline.from_spline',
'scipy.interpolate.CubicHermiteSpline.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.integrate.html#scipy.interpolate.CubicHermiteSpline.integrate',
'scipy.interpolate.CubicHermiteSpline.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.roots.html#scipy.interpolate.CubicHermiteSpline.roots',
'scipy.interpolate.CubicHermiteSpline.solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicHermiteSpline.solve.html#scipy.interpolate.CubicHermiteSpline.solve',
'scipy.interpolate.CubicSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.__call__.html#scipy.interpolate.CubicSpline.__call__',
'scipy.interpolate.CubicSpline.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.antiderivative.html#scipy.interpolate.CubicSpline.antiderivative',
'scipy.interpolate.CubicSpline.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.construct_fast.html#scipy.interpolate.CubicSpline.construct_fast',
'scipy.interpolate.CubicSpline.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.derivative.html#scipy.interpolate.CubicSpline.derivative',
'scipy.interpolate.CubicSpline.extend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.extend.html#scipy.interpolate.CubicSpline.extend',
'scipy.interpolate.CubicSpline.from_bernstein_basis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.from_bernstein_basis.html#scipy.interpolate.CubicSpline.from_bernstein_basis',
'scipy.interpolate.CubicSpline.from_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.from_spline.html#scipy.interpolate.CubicSpline.from_spline',
'scipy.interpolate.CubicSpline.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.integrate.html#scipy.interpolate.CubicSpline.integrate',
'scipy.interpolate.CubicSpline.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.roots.html#scipy.interpolate.CubicSpline.roots',
'scipy.interpolate.CubicSpline.solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.CubicSpline.solve.html#scipy.interpolate.CubicSpline.solve',
'scipy.interpolate.InterpolatedUnivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.__call__.html#scipy.interpolate.InterpolatedUnivariateSpline.__call__',
'scipy.interpolate.InterpolatedUnivariateSpline.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.antiderivative.html#scipy.interpolate.InterpolatedUnivariateSpline.antiderivative',
'scipy.interpolate.InterpolatedUnivariateSpline.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.derivative.html#scipy.interpolate.InterpolatedUnivariateSpline.derivative',
'scipy.interpolate.InterpolatedUnivariateSpline.derivatives': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.derivatives.html#scipy.interpolate.InterpolatedUnivariateSpline.derivatives',
'scipy.interpolate.InterpolatedUnivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_coeffs.html#scipy.interpolate.InterpolatedUnivariateSpline.get_coeffs',
'scipy.interpolate.InterpolatedUnivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_knots.html#scipy.interpolate.InterpolatedUnivariateSpline.get_knots',
'scipy.interpolate.InterpolatedUnivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.get_residual.html#scipy.interpolate.InterpolatedUnivariateSpline.get_residual',
'scipy.interpolate.InterpolatedUnivariateSpline.integral': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.integral.html#scipy.interpolate.InterpolatedUnivariateSpline.integral',
'scipy.interpolate.InterpolatedUnivariateSpline.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.roots.html#scipy.interpolate.InterpolatedUnivariateSpline.roots',
'scipy.interpolate.InterpolatedUnivariateSpline.set_smoothing_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.set_smoothing_factor.html#scipy.interpolate.InterpolatedUnivariateSpline.set_smoothing_factor',
'scipy.interpolate.InterpolatedUnivariateSpline.validate_input': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.InterpolatedUnivariateSpline.validate_input.html#scipy.interpolate.InterpolatedUnivariateSpline.validate_input',
'scipy.interpolate.KroghInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.KroghInterpolator.__call__.html#scipy.interpolate.KroghInterpolator.__call__',
'scipy.interpolate.KroghInterpolator.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.KroghInterpolator.derivative.html#scipy.interpolate.KroghInterpolator.derivative',
'scipy.interpolate.KroghInterpolator.derivatives': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.KroghInterpolator.derivatives.html#scipy.interpolate.KroghInterpolator.derivatives',
'scipy.interpolate.LSQBivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQBivariateSpline.__call__.html#scipy.interpolate.LSQBivariateSpline.__call__',
'scipy.interpolate.LSQBivariateSpline.ev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQBivariateSpline.ev.html#scipy.interpolate.LSQBivariateSpline.ev',
'scipy.interpolate.LSQBivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQBivariateSpline.get_coeffs.html#scipy.interpolate.LSQBivariateSpline.get_coeffs',
'scipy.interpolate.LSQBivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQBivariateSpline.get_knots.html#scipy.interpolate.LSQBivariateSpline.get_knots',
'scipy.interpolate.LSQBivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQBivariateSpline.get_residual.html#scipy.interpolate.LSQBivariateSpline.get_residual',
'scipy.interpolate.LSQBivariateSpline.integral': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQBivariateSpline.integral.html#scipy.interpolate.LSQBivariateSpline.integral',
'scipy.interpolate.LSQSphereBivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQSphereBivariateSpline.__call__.html#scipy.interpolate.LSQSphereBivariateSpline.__call__',
'scipy.interpolate.LSQSphereBivariateSpline.ev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQSphereBivariateSpline.ev.html#scipy.interpolate.LSQSphereBivariateSpline.ev',
'scipy.interpolate.LSQSphereBivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQSphereBivariateSpline.get_coeffs.html#scipy.interpolate.LSQSphereBivariateSpline.get_coeffs',
'scipy.interpolate.LSQSphereBivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQSphereBivariateSpline.get_knots.html#scipy.interpolate.LSQSphereBivariateSpline.get_knots',
'scipy.interpolate.LSQSphereBivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQSphereBivariateSpline.get_residual.html#scipy.interpolate.LSQSphereBivariateSpline.get_residual',
'scipy.interpolate.LSQUnivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.__call__.html#scipy.interpolate.LSQUnivariateSpline.__call__',
'scipy.interpolate.LSQUnivariateSpline.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.antiderivative.html#scipy.interpolate.LSQUnivariateSpline.antiderivative',
'scipy.interpolate.LSQUnivariateSpline.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.derivative.html#scipy.interpolate.LSQUnivariateSpline.derivative',
'scipy.interpolate.LSQUnivariateSpline.derivatives': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.derivatives.html#scipy.interpolate.LSQUnivariateSpline.derivatives',
'scipy.interpolate.LSQUnivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.get_coeffs.html#scipy.interpolate.LSQUnivariateSpline.get_coeffs',
'scipy.interpolate.LSQUnivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.get_knots.html#scipy.interpolate.LSQUnivariateSpline.get_knots',
'scipy.interpolate.LSQUnivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.get_residual.html#scipy.interpolate.LSQUnivariateSpline.get_residual',
'scipy.interpolate.LSQUnivariateSpline.integral': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.integral.html#scipy.interpolate.LSQUnivariateSpline.integral',
'scipy.interpolate.LSQUnivariateSpline.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.roots.html#scipy.interpolate.LSQUnivariateSpline.roots',
'scipy.interpolate.LSQUnivariateSpline.set_smoothing_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.set_smoothing_factor.html#scipy.interpolate.LSQUnivariateSpline.set_smoothing_factor',
'scipy.interpolate.LSQUnivariateSpline.validate_input': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LSQUnivariateSpline.validate_input.html#scipy.interpolate.LSQUnivariateSpline.validate_input',
'scipy.interpolate.LinearNDInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.LinearNDInterpolator.__call__.html#scipy.interpolate.LinearNDInterpolator.__call__',
'scipy.interpolate.NdPPoly.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NdPPoly.__call__.html#scipy.interpolate.NdPPoly.__call__',
'scipy.interpolate.NdPPoly.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NdPPoly.antiderivative.html#scipy.interpolate.NdPPoly.antiderivative',
'scipy.interpolate.NdPPoly.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NdPPoly.construct_fast.html#scipy.interpolate.NdPPoly.construct_fast',
'scipy.interpolate.NdPPoly.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NdPPoly.derivative.html#scipy.interpolate.NdPPoly.derivative',
'scipy.interpolate.NdPPoly.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NdPPoly.integrate.html#scipy.interpolate.NdPPoly.integrate',
'scipy.interpolate.NdPPoly.integrate_1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NdPPoly.integrate_1d.html#scipy.interpolate.NdPPoly.integrate_1d',
'scipy.interpolate.NearestNDInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.NearestNDInterpolator.__call__.html#scipy.interpolate.NearestNDInterpolator.__call__',
'scipy.interpolate.PPoly.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.__call__.html#scipy.interpolate.PPoly.__call__',
'scipy.interpolate.PPoly.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.antiderivative.html#scipy.interpolate.PPoly.antiderivative',
'scipy.interpolate.PPoly.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.construct_fast.html#scipy.interpolate.PPoly.construct_fast',
'scipy.interpolate.PPoly.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.derivative.html#scipy.interpolate.PPoly.derivative',
'scipy.interpolate.PPoly.extend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.extend.html#scipy.interpolate.PPoly.extend',
'scipy.interpolate.PPoly.from_bernstein_basis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.from_bernstein_basis.html#scipy.interpolate.PPoly.from_bernstein_basis',
'scipy.interpolate.PPoly.from_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.from_spline.html#scipy.interpolate.PPoly.from_spline',
'scipy.interpolate.PPoly.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.integrate.html#scipy.interpolate.PPoly.integrate',
'scipy.interpolate.PPoly.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.roots.html#scipy.interpolate.PPoly.roots',
'scipy.interpolate.PPoly.solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PPoly.solve.html#scipy.interpolate.PPoly.solve',
'scipy.interpolate.PchipInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.__call__.html#scipy.interpolate.PchipInterpolator.__call__',
'scipy.interpolate.PchipInterpolator.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.antiderivative.html#scipy.interpolate.PchipInterpolator.antiderivative',
'scipy.interpolate.PchipInterpolator.construct_fast': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.construct_fast.html#scipy.interpolate.PchipInterpolator.construct_fast',
'scipy.interpolate.PchipInterpolator.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.derivative.html#scipy.interpolate.PchipInterpolator.derivative',
'scipy.interpolate.PchipInterpolator.extend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.extend.html#scipy.interpolate.PchipInterpolator.extend',
'scipy.interpolate.PchipInterpolator.from_bernstein_basis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.from_bernstein_basis.html#scipy.interpolate.PchipInterpolator.from_bernstein_basis',
'scipy.interpolate.PchipInterpolator.from_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.from_spline.html#scipy.interpolate.PchipInterpolator.from_spline',
'scipy.interpolate.PchipInterpolator.integrate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.integrate.html#scipy.interpolate.PchipInterpolator.integrate',
'scipy.interpolate.PchipInterpolator.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.roots.html#scipy.interpolate.PchipInterpolator.roots',
'scipy.interpolate.PchipInterpolator.solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.PchipInterpolator.solve.html#scipy.interpolate.PchipInterpolator.solve',
'scipy.interpolate.Rbf.A': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Rbf.A.html#scipy.interpolate.Rbf.A',
'scipy.interpolate.Rbf.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.Rbf.__call__.html#scipy.interpolate.Rbf.__call__',
'scipy.interpolate.RectBivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.__call__.html#scipy.interpolate.RectBivariateSpline.__call__',
'scipy.interpolate.RectBivariateSpline.ev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.ev.html#scipy.interpolate.RectBivariateSpline.ev',
'scipy.interpolate.RectBivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.get_coeffs.html#scipy.interpolate.RectBivariateSpline.get_coeffs',
'scipy.interpolate.RectBivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.get_knots.html#scipy.interpolate.RectBivariateSpline.get_knots',
'scipy.interpolate.RectBivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.get_residual.html#scipy.interpolate.RectBivariateSpline.get_residual',
'scipy.interpolate.RectBivariateSpline.integral': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.integral.html#scipy.interpolate.RectBivariateSpline.integral',
'scipy.interpolate.RectSphereBivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectSphereBivariateSpline.__call__.html#scipy.interpolate.RectSphereBivariateSpline.__call__',
'scipy.interpolate.RectSphereBivariateSpline.ev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectSphereBivariateSpline.ev.html#scipy.interpolate.RectSphereBivariateSpline.ev',
'scipy.interpolate.RectSphereBivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectSphereBivariateSpline.get_coeffs.html#scipy.interpolate.RectSphereBivariateSpline.get_coeffs',
'scipy.interpolate.RectSphereBivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectSphereBivariateSpline.get_knots.html#scipy.interpolate.RectSphereBivariateSpline.get_knots',
'scipy.interpolate.RectSphereBivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectSphereBivariateSpline.get_residual.html#scipy.interpolate.RectSphereBivariateSpline.get_residual',
'scipy.interpolate.RegularGridInterpolator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RegularGridInterpolator.__call__.html#scipy.interpolate.RegularGridInterpolator.__call__',
'scipy.interpolate.SmoothBivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothBivariateSpline.__call__.html#scipy.interpolate.SmoothBivariateSpline.__call__',
'scipy.interpolate.SmoothBivariateSpline.ev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothBivariateSpline.ev.html#scipy.interpolate.SmoothBivariateSpline.ev',
'scipy.interpolate.SmoothBivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothBivariateSpline.get_coeffs.html#scipy.interpolate.SmoothBivariateSpline.get_coeffs',
'scipy.interpolate.SmoothBivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothBivariateSpline.get_knots.html#scipy.interpolate.SmoothBivariateSpline.get_knots',
'scipy.interpolate.SmoothBivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothBivariateSpline.get_residual.html#scipy.interpolate.SmoothBivariateSpline.get_residual',
'scipy.interpolate.SmoothBivariateSpline.integral': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothBivariateSpline.integral.html#scipy.interpolate.SmoothBivariateSpline.integral',
'scipy.interpolate.SmoothSphereBivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothSphereBivariateSpline.__call__.html#scipy.interpolate.SmoothSphereBivariateSpline.__call__',
'scipy.interpolate.SmoothSphereBivariateSpline.ev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothSphereBivariateSpline.ev.html#scipy.interpolate.SmoothSphereBivariateSpline.ev',
'scipy.interpolate.SmoothSphereBivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothSphereBivariateSpline.get_coeffs.html#scipy.interpolate.SmoothSphereBivariateSpline.get_coeffs',
'scipy.interpolate.SmoothSphereBivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothSphereBivariateSpline.get_knots.html#scipy.interpolate.SmoothSphereBivariateSpline.get_knots',
'scipy.interpolate.SmoothSphereBivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.SmoothSphereBivariateSpline.get_residual.html#scipy.interpolate.SmoothSphereBivariateSpline.get_residual',
'scipy.interpolate.UnivariateSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.__call__.html#scipy.interpolate.UnivariateSpline.__call__',
'scipy.interpolate.UnivariateSpline.antiderivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.antiderivative.html#scipy.interpolate.UnivariateSpline.antiderivative',
'scipy.interpolate.UnivariateSpline.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.derivative.html#scipy.interpolate.UnivariateSpline.derivative',
'scipy.interpolate.UnivariateSpline.derivatives': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.derivatives.html#scipy.interpolate.UnivariateSpline.derivatives',
'scipy.interpolate.UnivariateSpline.get_coeffs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.get_coeffs.html#scipy.interpolate.UnivariateSpline.get_coeffs',
'scipy.interpolate.UnivariateSpline.get_knots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.get_knots.html#scipy.interpolate.UnivariateSpline.get_knots',
'scipy.interpolate.UnivariateSpline.get_residual': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.get_residual.html#scipy.interpolate.UnivariateSpline.get_residual',
'scipy.interpolate.UnivariateSpline.integral': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.integral.html#scipy.interpolate.UnivariateSpline.integral',
'scipy.interpolate.UnivariateSpline.roots': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.roots.html#scipy.interpolate.UnivariateSpline.roots',
'scipy.interpolate.UnivariateSpline.set_smoothing_factor': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.set_smoothing_factor.html#scipy.interpolate.UnivariateSpline.set_smoothing_factor',
'scipy.interpolate.UnivariateSpline.validate_input': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.UnivariateSpline.validate_input.html#scipy.interpolate.UnivariateSpline.validate_input',
'scipy.interpolate.interp1d.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.__call__.html#scipy.interpolate.interp1d.__call__',
'scipy.interpolate.interp1d.fill_value': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.fill_value.html#scipy.interpolate.interp1d.fill_value',
'scipy.interpolate.interp2d.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp2d.__call__.html#scipy.interpolate.interp2d.__call__',
'scipy.interpolate.approximate_taylor_polynomial': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.approximate_taylor_polynomial.html#scipy.interpolate.approximate_taylor_polynomial',
'scipy.interpolate.barycentric_interpolate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.barycentric_interpolate.html#scipy.interpolate.barycentric_interpolate',
'scipy.interpolate.bisplev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.bisplev.html#scipy.interpolate.bisplev',
'scipy.interpolate.bisplrep': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.bisplrep.html#scipy.interpolate.bisplrep',
'scipy.interpolate.griddata': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html#scipy.interpolate.griddata',
'scipy.interpolate.insert': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.insert.html#scipy.interpolate.insert',
'scipy.interpolate.interpn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interpn.html#scipy.interpolate.interpn',
'scipy.interpolate.krogh_interpolate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.krogh_interpolate.html#scipy.interpolate.krogh_interpolate',
'scipy.interpolate.lagrange': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.lagrange.html#scipy.interpolate.lagrange',
'scipy.interpolate.make_interp_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.make_interp_spline.html#scipy.interpolate.make_interp_spline',
'scipy.interpolate.make_lsq_spline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.make_lsq_spline.html#scipy.interpolate.make_lsq_spline',
'scipy.interpolate.pade': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.pade.html#scipy.interpolate.pade',
'scipy.interpolate.pchip_interpolate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.pchip_interpolate.html#scipy.interpolate.pchip_interpolate',
'scipy.interpolate.spalde': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.spalde.html#scipy.interpolate.spalde',
'scipy.interpolate.splantider': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splantider.html#scipy.interpolate.splantider',
'scipy.interpolate.splder': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splder.html#scipy.interpolate.splder',
'scipy.interpolate.splev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splev.html#scipy.interpolate.splev',
'scipy.interpolate.splint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splint.html#scipy.interpolate.splint',
'scipy.interpolate.splprep': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splprep.html#scipy.interpolate.splprep',
'scipy.interpolate.splrep': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.splrep.html#scipy.interpolate.splrep',
'scipy.interpolate.sproot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.sproot.html#scipy.interpolate.sproot'},
'scipy.io.arff': { 'scipy.io.arff.MetaData': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.MetaData.html#scipy.io.arff.MetaData',
'scipy.io.arff.ArffError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.ArffError.html#scipy.io.arff.ArffError.with_traceback',
'scipy.io.arff.MetaData.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.MetaData.__getitem__.html#scipy.io.arff.MetaData.__getitem__',
'scipy.io.arff.MetaData.names': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.MetaData.names.html#scipy.io.arff.MetaData.names',
'scipy.io.arff.MetaData.types': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.MetaData.types.html#scipy.io.arff.MetaData.types',
'scipy.io.arff.ParseArffError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.ParseArffError.html#scipy.io.arff.ParseArffError.with_traceback',
'scipy.io.arff.loadarff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.loadarff.html#scipy.io.arff.loadarff'},
'scipy.odr': { 'scipy.odr.Data': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.Data.html#scipy.odr.Data',
'scipy.odr.Model': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.Model.html#scipy.odr.Model',
'scipy.odr.ODR': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.ODR.html#scipy.odr.ODR',
'scipy.odr.Output': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.Output.html#scipy.odr.Output',
'scipy.odr.RealData': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.RealData.html#scipy.odr.RealData',
'scipy.odr.Data.set_meta': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.Data.set_meta.html#scipy.odr.Data.set_meta',
'scipy.odr.Model.set_meta': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.Model.set_meta.html#scipy.odr.Model.set_meta',
'scipy.odr.ODR.restart': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.ODR.restart.html#scipy.odr.ODR.restart',
'scipy.odr.ODR.run': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.ODR.run.html#scipy.odr.ODR.run',
'scipy.odr.ODR.set_iprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.ODR.set_iprint.html#scipy.odr.ODR.set_iprint',
'scipy.odr.ODR.set_job': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.ODR.set_job.html#scipy.odr.ODR.set_job',
'scipy.odr.OdrError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.OdrError.html#scipy.odr.OdrError.with_traceback',
'scipy.odr.OdrStop.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.OdrStop.html#scipy.odr.OdrStop.with_traceback',
'scipy.odr.OdrWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.OdrWarning.html#scipy.odr.OdrWarning.with_traceback',
'scipy.odr.Output.pprint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.Output.pprint.html#scipy.odr.Output.pprint',
'scipy.odr.RealData.set_meta': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.RealData.set_meta.html#scipy.odr.RealData.set_meta',
'scipy.odr.odr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.odr.html#scipy.odr.odr',
'scipy.odr.polynomial': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.odr.polynomial.html#scipy.odr.polynomial'},
'scipy.sparse.linalg': { 'scipy.sparse.linalg.LinearOperator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.html#scipy.sparse.linalg.LinearOperator',
'scipy.sparse.linalg.SuperLU': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.SuperLU.html#scipy.sparse.linalg.SuperLU',
'scipy.sparse.linalg.ArpackError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.ArpackError.html#scipy.sparse.linalg.ArpackError.with_traceback',
'scipy.sparse.linalg.ArpackNoConvergence.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.ArpackNoConvergence.html#scipy.sparse.linalg.ArpackNoConvergence.with_traceback',
'scipy.sparse.linalg.LinearOperator.H': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.H.html#scipy.sparse.linalg.LinearOperator.H',
'scipy.sparse.linalg.LinearOperator.T': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.T.html#scipy.sparse.linalg.LinearOperator.T',
'scipy.sparse.linalg.LinearOperator.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.__call__.html#scipy.sparse.linalg.LinearOperator.__call__',
'scipy.sparse.linalg.LinearOperator.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.__mul__.html#scipy.sparse.linalg.LinearOperator.__mul__',
'scipy.sparse.linalg.LinearOperator.adjoint': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.adjoint.html#scipy.sparse.linalg.LinearOperator.adjoint',
'scipy.sparse.linalg.LinearOperator.dot': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.dot.html#scipy.sparse.linalg.LinearOperator.dot',
'scipy.sparse.linalg.LinearOperator.matmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.matmat.html#scipy.sparse.linalg.LinearOperator.matmat',
'scipy.sparse.linalg.LinearOperator.matvec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.matvec.html#scipy.sparse.linalg.LinearOperator.matvec',
'scipy.sparse.linalg.LinearOperator.rmatmat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.rmatmat.html#scipy.sparse.linalg.LinearOperator.rmatmat',
'scipy.sparse.linalg.LinearOperator.rmatvec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.rmatvec.html#scipy.sparse.linalg.LinearOperator.rmatvec',
'scipy.sparse.linalg.LinearOperator.transpose': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.LinearOperator.transpose.html#scipy.sparse.linalg.LinearOperator.transpose',
'scipy.sparse.linalg.MatrixRankWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.MatrixRankWarning.html#scipy.sparse.linalg.MatrixRankWarning.with_traceback',
'scipy.sparse.linalg.SuperLU.solve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.SuperLU.solve.html#scipy.sparse.linalg.SuperLU.solve',
'scipy.sparse.linalg.aslinearoperator': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.aslinearoperator.html#scipy.sparse.linalg.aslinearoperator',
'scipy.sparse.linalg.bicg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicg.html#scipy.sparse.linalg.bicg',
'scipy.sparse.linalg.bicgstab': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicgstab.html#scipy.sparse.linalg.bicgstab',
'scipy.sparse.linalg.cg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.cg.html#scipy.sparse.linalg.cg',
'scipy.sparse.linalg.cgs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.cgs.html#scipy.sparse.linalg.cgs',
'scipy.sparse.linalg.eigs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs',
'scipy.sparse.linalg.eigsh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.eigsh.html#scipy.sparse.linalg.eigsh',
'scipy.sparse.linalg.expm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.expm.html#scipy.sparse.linalg.expm',
'scipy.sparse.linalg.expm_multiply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.expm_multiply.html#scipy.sparse.linalg.expm_multiply',
'scipy.sparse.linalg.factorized': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.factorized.html#scipy.sparse.linalg.factorized',
'scipy.sparse.linalg.gcrotmk': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.gcrotmk.html#scipy.sparse.linalg.gcrotmk',
'scipy.sparse.linalg.gmres': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.gmres.html#scipy.sparse.linalg.gmres',
'scipy.sparse.linalg.inv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.inv.html#scipy.sparse.linalg.inv',
'scipy.sparse.linalg.lgmres': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lgmres.html#scipy.sparse.linalg.lgmres',
'scipy.sparse.linalg.lobpcg': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html#scipy.sparse.linalg.lobpcg',
'scipy.sparse.linalg.lsmr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsmr.html#scipy.sparse.linalg.lsmr',
'scipy.sparse.linalg.lsqr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsqr.html#scipy.sparse.linalg.lsqr',
'scipy.sparse.linalg.minres': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.minres.html#scipy.sparse.linalg.minres',
'scipy.sparse.linalg.norm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.norm.html#scipy.sparse.linalg.norm',
'scipy.sparse.linalg.onenormest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.onenormest.html#scipy.sparse.linalg.onenormest',
'scipy.sparse.linalg.qmr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.qmr.html#scipy.sparse.linalg.qmr',
'scipy.sparse.linalg.spilu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.spilu.html#scipy.sparse.linalg.spilu',
'scipy.sparse.linalg.splu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.splu.html#scipy.sparse.linalg.splu',
'scipy.sparse.linalg.spsolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.spsolve.html#scipy.sparse.linalg.spsolve',
'scipy.sparse.linalg.spsolve_triangular': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.spsolve_triangular.html#scipy.sparse.linalg.spsolve_triangular',
'scipy.sparse.linalg.svds': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.svds.html#scipy.sparse.linalg.svds',
'scipy.sparse.linalg.use_solver': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.use_solver.html#scipy.sparse.linalg.use_solver'},
'scipy.spatial.transform': { 'scipy.spatial.transform.Rotation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html#scipy.spatial.transform.Rotation',
'scipy.spatial.transform.RotationSpline': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.RotationSpline.html#scipy.spatial.transform.RotationSpline',
'scipy.spatial.transform.Slerp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Slerp.html#scipy.spatial.transform.Slerp',
'scipy.spatial.transform.Rotation.__getitem__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.__getitem__.html#scipy.spatial.transform.Rotation.__getitem__',
'scipy.spatial.transform.Rotation.__len__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.__len__.html#scipy.spatial.transform.Rotation.__len__',
'scipy.spatial.transform.Rotation.__mul__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.__mul__.html#scipy.spatial.transform.Rotation.__mul__',
'scipy.spatial.transform.Rotation.align_vectors': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.align_vectors.html#scipy.spatial.transform.Rotation.align_vectors',
'scipy.spatial.transform.Rotation.apply': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.apply.html#scipy.spatial.transform.Rotation.apply',
'scipy.spatial.transform.Rotation.as_dcm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_dcm.html#scipy.spatial.transform.Rotation.as_dcm',
'scipy.spatial.transform.Rotation.as_euler': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_euler.html#scipy.spatial.transform.Rotation.as_euler',
'scipy.spatial.transform.Rotation.as_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_matrix.html#scipy.spatial.transform.Rotation.as_matrix',
'scipy.spatial.transform.Rotation.as_quat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_quat.html#scipy.spatial.transform.Rotation.as_quat',
'scipy.spatial.transform.Rotation.as_rotvec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_rotvec.html#scipy.spatial.transform.Rotation.as_rotvec',
'scipy.spatial.transform.Rotation.create_group': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.create_group.html#scipy.spatial.transform.Rotation.create_group',
'scipy.spatial.transform.Rotation.from_dcm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_dcm.html#scipy.spatial.transform.Rotation.from_dcm',
'scipy.spatial.transform.Rotation.from_euler': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_euler.html#scipy.spatial.transform.Rotation.from_euler',
'scipy.spatial.transform.Rotation.from_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_matrix.html#scipy.spatial.transform.Rotation.from_matrix',
'scipy.spatial.transform.Rotation.from_quat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_quat.html#scipy.spatial.transform.Rotation.from_quat',
'scipy.spatial.transform.Rotation.from_rotvec': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_rotvec.html#scipy.spatial.transform.Rotation.from_rotvec',
'scipy.spatial.transform.Rotation.identity': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.identity.html#scipy.spatial.transform.Rotation.identity',
'scipy.spatial.transform.Rotation.inv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.inv.html#scipy.spatial.transform.Rotation.inv',
'scipy.spatial.transform.Rotation.magnitude': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.magnitude.html#scipy.spatial.transform.Rotation.magnitude',
'scipy.spatial.transform.Rotation.match_vectors': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.match_vectors.html#scipy.spatial.transform.Rotation.match_vectors',
'scipy.spatial.transform.Rotation.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.mean.html#scipy.spatial.transform.Rotation.mean',
'scipy.spatial.transform.Rotation.random': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html#scipy.spatial.transform.Rotation.random',
'scipy.spatial.transform.Rotation.reduce': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.reduce.html#scipy.spatial.transform.Rotation.reduce',
'scipy.spatial.transform.RotationSpline.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.RotationSpline.__call__.html#scipy.spatial.transform.RotationSpline.__call__',
'scipy.spatial.transform.Slerp.__call__': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Slerp.__call__.html#scipy.spatial.transform.Slerp.__call__'},
'scipy.constants': { 'scipy.constants.ConstantWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.ConstantWarning.html#scipy.constants.ConstantWarning.with_traceback',
'scipy.constants.convert_temperature': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.convert_temperature.html#scipy.constants.convert_temperature',
'scipy.constants.find': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.find.html#scipy.constants.find',
'scipy.constants.lambda2nu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.lambda2nu.html#scipy.constants.lambda2nu',
'scipy.constants.nu2lambda': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.nu2lambda.html#scipy.constants.nu2lambda',
'scipy.constants.precision': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.precision.html#scipy.constants.precision',
'scipy.constants.unit': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.unit.html#scipy.constants.unit',
'scipy.constants.value': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.constants.value.html#scipy.constants.value'},
'scipy.io.wavfile': { 'scipy.io.wavfile.WavFileWarning.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.WavFileWarning.html#scipy.io.wavfile.WavFileWarning.with_traceback',
'scipy.io.wavfile.read': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.read.html#scipy.io.wavfile.read',
'scipy.io.wavfile.write': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.write.html#scipy.io.wavfile.write'},
'scipy.sparse.csgraph': { 'scipy.sparse.csgraph.NegativeCycleError.with_traceback': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.NegativeCycleError.html#scipy.sparse.csgraph.NegativeCycleError.with_traceback',
'scipy.sparse.csgraph.bellman_ford': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.bellman_ford.html#scipy.sparse.csgraph.bellman_ford',
'scipy.sparse.csgraph.breadth_first_order': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.breadth_first_order.html#scipy.sparse.csgraph.breadth_first_order',
'scipy.sparse.csgraph.breadth_first_tree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.breadth_first_tree.html#scipy.sparse.csgraph.breadth_first_tree',
'scipy.sparse.csgraph.connected_components': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.connected_components.html#scipy.sparse.csgraph.connected_components',
'scipy.sparse.csgraph.construct_dist_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.construct_dist_matrix.html#scipy.sparse.csgraph.construct_dist_matrix',
'scipy.sparse.csgraph.csgraph_from_dense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.csgraph_from_dense.html#scipy.sparse.csgraph.csgraph_from_dense',
'scipy.sparse.csgraph.csgraph_from_masked': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.csgraph_from_masked.html#scipy.sparse.csgraph.csgraph_from_masked',
'scipy.sparse.csgraph.csgraph_masked_from_dense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.csgraph_masked_from_dense.html#scipy.sparse.csgraph.csgraph_masked_from_dense',
'scipy.sparse.csgraph.csgraph_to_dense': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.csgraph_to_dense.html#scipy.sparse.csgraph.csgraph_to_dense',
'scipy.sparse.csgraph.csgraph_to_masked': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.csgraph_to_masked.html#scipy.sparse.csgraph.csgraph_to_masked',
'scipy.sparse.csgraph.depth_first_order': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.depth_first_order.html#scipy.sparse.csgraph.depth_first_order',
'scipy.sparse.csgraph.depth_first_tree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.depth_first_tree.html#scipy.sparse.csgraph.depth_first_tree',
'scipy.sparse.csgraph.dijkstra': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.dijkstra.html#scipy.sparse.csgraph.dijkstra',
'scipy.sparse.csgraph.floyd_warshall': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.floyd_warshall.html#scipy.sparse.csgraph.floyd_warshall',
'scipy.sparse.csgraph.johnson': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.johnson.html#scipy.sparse.csgraph.johnson',
'scipy.sparse.csgraph.laplacian': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.laplacian.html#scipy.sparse.csgraph.laplacian',
'scipy.sparse.csgraph.maximum_bipartite_matching': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.maximum_bipartite_matching.html#scipy.sparse.csgraph.maximum_bipartite_matching',
'scipy.sparse.csgraph.maximum_flow': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.maximum_flow.html#scipy.sparse.csgraph.maximum_flow',
'scipy.sparse.csgraph.minimum_spanning_tree': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.minimum_spanning_tree.html#scipy.sparse.csgraph.minimum_spanning_tree',
'scipy.sparse.csgraph.reconstruct_path': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.reconstruct_path.html#scipy.sparse.csgraph.reconstruct_path',
'scipy.sparse.csgraph.reverse_cuthill_mckee': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.reverse_cuthill_mckee.html#scipy.sparse.csgraph.reverse_cuthill_mckee',
'scipy.sparse.csgraph.shortest_path': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.shortest_path.html#scipy.sparse.csgraph.shortest_path',
'scipy.sparse.csgraph.structural_rank': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.structural_rank.html#scipy.sparse.csgraph.structural_rank'},
'scipy.cluster.vq': { 'scipy.cluster.vq.kmeans': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.kmeans.html#scipy.cluster.vq.kmeans',
'scipy.cluster.vq.kmeans2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.kmeans2.html#scipy.cluster.vq.kmeans2',
'scipy.cluster.vq.vq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.vq.html#scipy.cluster.vq.vq',
'scipy.cluster.vq.whiten': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.whiten.html#scipy.cluster.vq.whiten'},
'scipy.fft': { 'scipy.fft.dct': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dct.html#scipy.fft.dct',
'scipy.fft.dctn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dctn.html#scipy.fft.dctn',
'scipy.fft.dst': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dst.html#scipy.fft.dst',
'scipy.fft.dstn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.dstn.html#scipy.fft.dstn',
'scipy.fft.fft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.fft.html#scipy.fft.fft',
'scipy.fft.fft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.fft2.html#scipy.fft.fft2',
'scipy.fft.fftfreq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.fftfreq.html#scipy.fft.fftfreq',
'scipy.fft.fftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.fftn.html#scipy.fft.fftn',
'scipy.fft.fftshift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.fftshift.html#scipy.fft.fftshift',
'scipy.fft.get_workers': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.get_workers.html#scipy.fft.get_workers',
'scipy.fft.hfft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.hfft.html#scipy.fft.hfft',
'scipy.fft.hfft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.hfft2.html#scipy.fft.hfft2',
'scipy.fft.hfftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.hfftn.html#scipy.fft.hfftn',
'scipy.fft.idct': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.idct.html#scipy.fft.idct',
'scipy.fft.idctn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.idctn.html#scipy.fft.idctn',
'scipy.fft.idst': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.idst.html#scipy.fft.idst',
'scipy.fft.idstn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.idstn.html#scipy.fft.idstn',
'scipy.fft.ifft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.ifft.html#scipy.fft.ifft',
'scipy.fft.ifft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.ifft2.html#scipy.fft.ifft2',
'scipy.fft.ifftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.ifftn.html#scipy.fft.ifftn',
'scipy.fft.ifftshift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.ifftshift.html#scipy.fft.ifftshift',
'scipy.fft.ihfft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.ihfft.html#scipy.fft.ihfft',
'scipy.fft.ihfft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.ihfft2.html#scipy.fft.ihfft2',
'scipy.fft.ihfftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.ihfftn.html#scipy.fft.ihfftn',
'scipy.fft.irfft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.irfft.html#scipy.fft.irfft',
'scipy.fft.irfft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.irfft2.html#scipy.fft.irfft2',
'scipy.fft.irfftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.irfftn.html#scipy.fft.irfftn',
'scipy.fft.next_fast_len': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.next_fast_len.html#scipy.fft.next_fast_len',
'scipy.fft.register_backend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.register_backend.html#scipy.fft.register_backend',
'scipy.fft.rfft': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.rfft.html#scipy.fft.rfft',
'scipy.fft.rfft2': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.rfft2.html#scipy.fft.rfft2',
'scipy.fft.rfftfreq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.rfftfreq.html#scipy.fft.rfftfreq',
'scipy.fft.rfftn': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.rfftn.html#scipy.fft.rfftn',
'scipy.fft.set_backend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.set_backend.html#scipy.fft.set_backend',
'scipy.fft.set_global_backend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.set_global_backend.html#scipy.fft.set_global_backend',
'scipy.fft.set_workers': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.set_workers.html#scipy.fft.set_workers',
'scipy.fft.skip_backend': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fft.skip_backend.html#scipy.fft.skip_backend'},
'scipy.fftpack.convolve': { 'scipy.fftpack.convolve.convolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.convolve.convolve.html#scipy.fftpack.convolve.convolve',
'scipy.fftpack.convolve.convolve_z': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.convolve.convolve_z.html#scipy.fftpack.convolve.convolve_z',
'scipy.fftpack.convolve.destroy_convolve_cache': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.convolve.destroy_convolve_cache.html#scipy.fftpack.convolve.destroy_convolve_cache',
'scipy.fftpack.convolve.init_convolution_kernel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.fftpack.convolve.init_convolution_kernel.html#scipy.fftpack.convolve.init_convolution_kernel'},
'scipy.linalg.blas': { 'scipy.linalg.blas.find_best_blas_type': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.blas.find_best_blas_type.html#scipy.linalg.blas.find_best_blas_type',
'scipy.linalg.blas.get_blas_funcs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.blas.get_blas_funcs.html#scipy.linalg.blas.get_blas_funcs'},
'scipy.linalg.interpolative': { 'scipy.linalg.interpolative.estimate_rank': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.estimate_rank.html#scipy.linalg.interpolative.estimate_rank',
'scipy.linalg.interpolative.estimate_spectral_norm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.estimate_spectral_norm.html#scipy.linalg.interpolative.estimate_spectral_norm',
'scipy.linalg.interpolative.estimate_spectral_norm_diff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.estimate_spectral_norm_diff.html#scipy.linalg.interpolative.estimate_spectral_norm_diff',
'scipy.linalg.interpolative.id_to_svd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.id_to_svd.html#scipy.linalg.interpolative.id_to_svd',
'scipy.linalg.interpolative.interp_decomp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.interp_decomp.html#scipy.linalg.interpolative.interp_decomp',
'scipy.linalg.interpolative.rand': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.rand.html#scipy.linalg.interpolative.rand',
'scipy.linalg.interpolative.reconstruct_interp_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.reconstruct_interp_matrix.html#scipy.linalg.interpolative.reconstruct_interp_matrix',
'scipy.linalg.interpolative.reconstruct_matrix_from_id': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.reconstruct_matrix_from_id.html#scipy.linalg.interpolative.reconstruct_matrix_from_id',
'scipy.linalg.interpolative.reconstruct_skel_matrix': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.reconstruct_skel_matrix.html#scipy.linalg.interpolative.reconstruct_skel_matrix',
'scipy.linalg.interpolative.seed': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.seed.html#scipy.linalg.interpolative.seed',
'scipy.linalg.interpolative.svd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.interpolative.svd.html#scipy.linalg.interpolative.svd'},
'scipy.linalg.lapack': { 'scipy.linalg.lapack.cgegv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lapack.cgegv.html#scipy.linalg.lapack.cgegv',
'scipy.linalg.lapack.dgegv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lapack.dgegv.html#scipy.linalg.lapack.dgegv',
'scipy.linalg.lapack.get_lapack_funcs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lapack.get_lapack_funcs.html#scipy.linalg.lapack.get_lapack_funcs',
'scipy.linalg.lapack.sgegv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lapack.sgegv.html#scipy.linalg.lapack.sgegv',
'scipy.linalg.lapack.zgegv': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lapack.zgegv.html#scipy.linalg.lapack.zgegv'},
'scipy.misc': { 'scipy.misc.ascent': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.ascent.html#scipy.misc.ascent',
'scipy.misc.central_diff_weights': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.central_diff_weights.html#scipy.misc.central_diff_weights',
'scipy.misc.derivative': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.derivative.html#scipy.misc.derivative',
'scipy.misc.electrocardiogram': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.electrocardiogram.html#scipy.misc.electrocardiogram',
'scipy.misc.face': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.face.html#scipy.misc.face'},
'scipy.ndimage': { 'scipy.ndimage.affine_transform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.affine_transform.html#scipy.ndimage.affine_transform',
'scipy.ndimage.binary_closing': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_closing.html#scipy.ndimage.binary_closing',
'scipy.ndimage.binary_dilation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_dilation.html#scipy.ndimage.binary_dilation',
'scipy.ndimage.binary_erosion': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_erosion.html#scipy.ndimage.binary_erosion',
'scipy.ndimage.binary_fill_holes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_fill_holes.html#scipy.ndimage.binary_fill_holes',
'scipy.ndimage.binary_hit_or_miss': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_hit_or_miss.html#scipy.ndimage.binary_hit_or_miss',
'scipy.ndimage.binary_opening': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_opening.html#scipy.ndimage.binary_opening',
'scipy.ndimage.binary_propagation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_propagation.html#scipy.ndimage.binary_propagation',
'scipy.ndimage.black_tophat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.black_tophat.html#scipy.ndimage.black_tophat',
'scipy.ndimage.center_of_mass': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.center_of_mass.html#scipy.ndimage.center_of_mass',
'scipy.ndimage.convolve': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.convolve.html#scipy.ndimage.convolve',
'scipy.ndimage.convolve1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.convolve1d.html#scipy.ndimage.convolve1d',
'scipy.ndimage.correlate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.correlate.html#scipy.ndimage.correlate',
'scipy.ndimage.correlate1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.correlate1d.html#scipy.ndimage.correlate1d',
'scipy.ndimage.distance_transform_bf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.distance_transform_bf.html#scipy.ndimage.distance_transform_bf',
'scipy.ndimage.distance_transform_cdt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.distance_transform_cdt.html#scipy.ndimage.distance_transform_cdt',
'scipy.ndimage.distance_transform_edt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.distance_transform_edt.html#scipy.ndimage.distance_transform_edt',
'scipy.ndimage.extrema': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.extrema.html#scipy.ndimage.extrema',
'scipy.ndimage.find_objects': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.find_objects.html#scipy.ndimage.find_objects',
'scipy.ndimage.fourier_ellipsoid': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.fourier_ellipsoid.html#scipy.ndimage.fourier_ellipsoid',
'scipy.ndimage.fourier_gaussian': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.fourier_gaussian.html#scipy.ndimage.fourier_gaussian',
'scipy.ndimage.fourier_shift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.fourier_shift.html#scipy.ndimage.fourier_shift',
'scipy.ndimage.fourier_uniform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.fourier_uniform.html#scipy.ndimage.fourier_uniform',
'scipy.ndimage.gaussian_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.gaussian_filter.html#scipy.ndimage.gaussian_filter',
'scipy.ndimage.gaussian_filter1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.gaussian_filter1d.html#scipy.ndimage.gaussian_filter1d',
'scipy.ndimage.gaussian_gradient_magnitude': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.gaussian_gradient_magnitude.html#scipy.ndimage.gaussian_gradient_magnitude',
'scipy.ndimage.gaussian_laplace': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.gaussian_laplace.html#scipy.ndimage.gaussian_laplace',
'scipy.ndimage.generate_binary_structure': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.generate_binary_structure.html#scipy.ndimage.generate_binary_structure',
'scipy.ndimage.generic_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.generic_filter.html#scipy.ndimage.generic_filter',
'scipy.ndimage.generic_filter1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.generic_filter1d.html#scipy.ndimage.generic_filter1d',
'scipy.ndimage.generic_gradient_magnitude': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.generic_gradient_magnitude.html#scipy.ndimage.generic_gradient_magnitude',
'scipy.ndimage.generic_laplace': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.generic_laplace.html#scipy.ndimage.generic_laplace',
'scipy.ndimage.geometric_transform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.geometric_transform.html#scipy.ndimage.geometric_transform',
'scipy.ndimage.grey_closing': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.grey_closing.html#scipy.ndimage.grey_closing',
'scipy.ndimage.grey_dilation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.grey_dilation.html#scipy.ndimage.grey_dilation',
'scipy.ndimage.grey_erosion': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.grey_erosion.html#scipy.ndimage.grey_erosion',
'scipy.ndimage.grey_opening': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.grey_opening.html#scipy.ndimage.grey_opening',
'scipy.ndimage.histogram': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.histogram.html#scipy.ndimage.histogram',
'scipy.ndimage.iterate_structure': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.iterate_structure.html#scipy.ndimage.iterate_structure',
'scipy.ndimage.label': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.label.html#scipy.ndimage.label',
'scipy.ndimage.labeled_comprehension': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.labeled_comprehension.html#scipy.ndimage.labeled_comprehension',
'scipy.ndimage.laplace': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.laplace.html#scipy.ndimage.laplace',
'scipy.ndimage.map_coordinates': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage.map_coordinates',
'scipy.ndimage.maximum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.maximum.html#scipy.ndimage.maximum',
'scipy.ndimage.maximum_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.maximum_filter.html#scipy.ndimage.maximum_filter',
'scipy.ndimage.maximum_filter1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.maximum_filter1d.html#scipy.ndimage.maximum_filter1d',
'scipy.ndimage.maximum_position': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.maximum_position.html#scipy.ndimage.maximum_position',
'scipy.ndimage.mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.mean.html#scipy.ndimage.mean',
'scipy.ndimage.median': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.median.html#scipy.ndimage.median',
'scipy.ndimage.median_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.median_filter.html#scipy.ndimage.median_filter',
'scipy.ndimage.minimum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.minimum.html#scipy.ndimage.minimum',
'scipy.ndimage.minimum_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.minimum_filter.html#scipy.ndimage.minimum_filter',
'scipy.ndimage.minimum_filter1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.minimum_filter1d.html#scipy.ndimage.minimum_filter1d',
'scipy.ndimage.minimum_position': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.minimum_position.html#scipy.ndimage.minimum_position',
'scipy.ndimage.morphological_gradient': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphological_gradient.html#scipy.ndimage.morphological_gradient',
'scipy.ndimage.morphological_laplace': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphological_laplace.html#scipy.ndimage.morphological_laplace',
'scipy.ndimage.percentile_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.percentile_filter.html#scipy.ndimage.percentile_filter',
'scipy.ndimage.prewitt': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.prewitt.html#scipy.ndimage.prewitt',
'scipy.ndimage.rank_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.rank_filter.html#scipy.ndimage.rank_filter',
'scipy.ndimage.rotate': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.rotate.html#scipy.ndimage.rotate',
'scipy.ndimage.shift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.shift.html#scipy.ndimage.shift',
'scipy.ndimage.sobel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.sobel.html#scipy.ndimage.sobel',
'scipy.ndimage.spline_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.spline_filter.html#scipy.ndimage.spline_filter',
'scipy.ndimage.spline_filter1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.spline_filter1d.html#scipy.ndimage.spline_filter1d',
'scipy.ndimage.standard_deviation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.standard_deviation.html#scipy.ndimage.standard_deviation',
'scipy.ndimage.sum': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.sum.html#scipy.ndimage.sum',
'scipy.ndimage.uniform_filter': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.uniform_filter.html#scipy.ndimage.uniform_filter',
'scipy.ndimage.uniform_filter1d': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.uniform_filter1d.html#scipy.ndimage.uniform_filter1d',
'scipy.ndimage.variance': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.variance.html#scipy.ndimage.variance',
'scipy.ndimage.watershed_ift': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.watershed_ift.html#scipy.ndimage.watershed_ift',
'scipy.ndimage.white_tophat': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.white_tophat.html#scipy.ndimage.white_tophat',
'scipy.ndimage.zoom': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.zoom.html#scipy.ndimage.zoom'},
'scipy.signal.windows': { 'scipy.signal.windows.barthann': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.barthann.html#scipy.signal.windows.barthann',
'scipy.signal.windows.bartlett': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.bartlett.html#scipy.signal.windows.bartlett',
'scipy.signal.windows.blackman': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.blackman.html#scipy.signal.windows.blackman',
'scipy.signal.windows.blackmanharris': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.blackmanharris.html#scipy.signal.windows.blackmanharris',
'scipy.signal.windows.bohman': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.bohman.html#scipy.signal.windows.bohman',
'scipy.signal.windows.boxcar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.boxcar.html#scipy.signal.windows.boxcar',
'scipy.signal.windows.chebwin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.chebwin.html#scipy.signal.windows.chebwin',
'scipy.signal.windows.cosine': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.cosine.html#scipy.signal.windows.cosine',
'scipy.signal.windows.dpss': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.dpss.html#scipy.signal.windows.dpss',
'scipy.signal.windows.exponential': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.exponential.html#scipy.signal.windows.exponential',
'scipy.signal.windows.flattop': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.flattop.html#scipy.signal.windows.flattop',
'scipy.signal.windows.gaussian': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.gaussian.html#scipy.signal.windows.gaussian',
'scipy.signal.windows.general_cosine': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.general_cosine.html#scipy.signal.windows.general_cosine',
'scipy.signal.windows.general_gaussian': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.general_gaussian.html#scipy.signal.windows.general_gaussian',
'scipy.signal.windows.general_hamming': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.general_hamming.html#scipy.signal.windows.general_hamming',
'scipy.signal.windows.get_window': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.get_window.html#scipy.signal.windows.get_window',
'scipy.signal.windows.hamming': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.hamming.html#scipy.signal.windows.hamming',
'scipy.signal.windows.hann': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.hann.html#scipy.signal.windows.hann',
'scipy.signal.windows.hanning': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.hanning.html#scipy.signal.windows.hanning',
'scipy.signal.windows.kaiser': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.kaiser.html#scipy.signal.windows.kaiser',
'scipy.signal.windows.nuttall': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.nuttall.html#scipy.signal.windows.nuttall',
'scipy.signal.windows.parzen': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.parzen.html#scipy.signal.windows.parzen',
'scipy.signal.windows.slepian': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.slepian.html#scipy.signal.windows.slepian',
'scipy.signal.windows.triang': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.triang.html#scipy.signal.windows.triang',
'scipy.signal.windows.tukey': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.windows.tukey.html#scipy.signal.windows.tukey'},
'scipy.spatial.distance': { 'scipy.spatial.distance.braycurtis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.braycurtis.html#scipy.spatial.distance.braycurtis',
'scipy.spatial.distance.canberra': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.canberra.html#scipy.spatial.distance.canberra',
'scipy.spatial.distance.cdist': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html#scipy.spatial.distance.cdist',
'scipy.spatial.distance.chebyshev': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.chebyshev.html#scipy.spatial.distance.chebyshev',
'scipy.spatial.distance.cityblock': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cityblock.html#scipy.spatial.distance.cityblock',
'scipy.spatial.distance.correlation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.correlation.html#scipy.spatial.distance.correlation',
'scipy.spatial.distance.cosine': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cosine.html#scipy.spatial.distance.cosine',
'scipy.spatial.distance.dice': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.dice.html#scipy.spatial.distance.dice',
'scipy.spatial.distance.directed_hausdorff': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.directed_hausdorff.html#scipy.spatial.distance.directed_hausdorff',
'scipy.spatial.distance.euclidean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.euclidean.html#scipy.spatial.distance.euclidean',
'scipy.spatial.distance.hamming': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.hamming.html#scipy.spatial.distance.hamming',
'scipy.spatial.distance.is_valid_dm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.is_valid_dm.html#scipy.spatial.distance.is_valid_dm',
'scipy.spatial.distance.is_valid_y': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.is_valid_y.html#scipy.spatial.distance.is_valid_y',
'scipy.spatial.distance.jaccard': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jaccard.html#scipy.spatial.distance.jaccard',
'scipy.spatial.distance.jensenshannon': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jensenshannon.html#scipy.spatial.distance.jensenshannon',
'scipy.spatial.distance.kulsinski': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.kulsinski.html#scipy.spatial.distance.kulsinski',
'scipy.spatial.distance.mahalanobis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.mahalanobis.html#scipy.spatial.distance.mahalanobis',
'scipy.spatial.distance.minkowski': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.minkowski.html#scipy.spatial.distance.minkowski',
'scipy.spatial.distance.num_obs_dm': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.num_obs_dm.html#scipy.spatial.distance.num_obs_dm',
'scipy.spatial.distance.num_obs_y': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.num_obs_y.html#scipy.spatial.distance.num_obs_y',
'scipy.spatial.distance.pdist': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html#scipy.spatial.distance.pdist',
'scipy.spatial.distance.rogerstanimoto': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.rogerstanimoto.html#scipy.spatial.distance.rogerstanimoto',
'scipy.spatial.distance.russellrao': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.russellrao.html#scipy.spatial.distance.russellrao',
'scipy.spatial.distance.seuclidean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.seuclidean.html#scipy.spatial.distance.seuclidean',
'scipy.spatial.distance.sokalmichener': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.sokalmichener.html#scipy.spatial.distance.sokalmichener',
'scipy.spatial.distance.sokalsneath': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.sokalsneath.html#scipy.spatial.distance.sokalsneath',
'scipy.spatial.distance.sqeuclidean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.sqeuclidean.html#scipy.spatial.distance.sqeuclidean',
'scipy.spatial.distance.squareform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html#scipy.spatial.distance.squareform',
'scipy.spatial.distance.wminkowski': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.wminkowski.html#scipy.spatial.distance.wminkowski',
'scipy.spatial.distance.yule': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.yule.html#scipy.spatial.distance.yule'},
'scipy.stats.contingency': { 'scipy.stats.contingency.expected_freq': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.contingency.expected_freq.html#scipy.stats.contingency.expected_freq',
'scipy.stats.contingency.margins': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.contingency.margins.html#scipy.stats.contingency.margins'},
'scipy.stats.mstats': { 'scipy.stats.mstats.argstoarray': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.argstoarray.html#scipy.stats.mstats.argstoarray',
'scipy.stats.mstats.brunnermunzel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.brunnermunzel.html#scipy.stats.mstats.brunnermunzel',
'scipy.stats.mstats.chisquare': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.chisquare.html#scipy.stats.mstats.chisquare',
'scipy.stats.mstats.compare_medians_ms': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.compare_medians_ms.html#scipy.stats.mstats.compare_medians_ms',
'scipy.stats.mstats.count_tied_groups': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.count_tied_groups.html#scipy.stats.mstats.count_tied_groups',
'scipy.stats.mstats.describe': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.describe.html#scipy.stats.mstats.describe',
'scipy.stats.mstats.f_oneway': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.f_oneway.html#scipy.stats.mstats.f_oneway',
'scipy.stats.mstats.find_repeats': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.find_repeats.html#scipy.stats.mstats.find_repeats',
'scipy.stats.mstats.friedmanchisquare': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.friedmanchisquare.html#scipy.stats.mstats.friedmanchisquare',
'scipy.stats.mstats.gmean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.gmean.html#scipy.stats.mstats.gmean',
'scipy.stats.mstats.hdmedian': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.hdmedian.html#scipy.stats.mstats.hdmedian',
'scipy.stats.mstats.hdquantiles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.hdquantiles.html#scipy.stats.mstats.hdquantiles',
'scipy.stats.mstats.hdquantiles_sd': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.hdquantiles_sd.html#scipy.stats.mstats.hdquantiles_sd',
'scipy.stats.mstats.hmean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.hmean.html#scipy.stats.mstats.hmean',
'scipy.stats.mstats.idealfourths': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.idealfourths.html#scipy.stats.mstats.idealfourths',
'scipy.stats.mstats.kendalltau': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.kendalltau.html#scipy.stats.mstats.kendalltau',
'scipy.stats.mstats.kendalltau_seasonal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.kendalltau_seasonal.html#scipy.stats.mstats.kendalltau_seasonal',
'scipy.stats.mstats.kruskal': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.kruskal.html#scipy.stats.mstats.kruskal',
'scipy.stats.mstats.kruskalwallis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.kruskalwallis.html#scipy.stats.mstats.kruskalwallis',
'scipy.stats.mstats.ks_1samp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.ks_1samp.html#scipy.stats.mstats.ks_1samp',
'scipy.stats.mstats.ks_2samp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.ks_2samp.html#scipy.stats.mstats.ks_2samp',
'scipy.stats.mstats.ks_twosamp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.ks_twosamp.html#scipy.stats.mstats.ks_twosamp',
'scipy.stats.mstats.kstest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.kstest.html#scipy.stats.mstats.kstest',
'scipy.stats.mstats.kurtosis': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.kurtosis.html#scipy.stats.mstats.kurtosis',
'scipy.stats.mstats.kurtosistest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.kurtosistest.html#scipy.stats.mstats.kurtosistest',
'scipy.stats.mstats.linregress': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.linregress.html#scipy.stats.mstats.linregress',
'scipy.stats.mstats.mannwhitneyu': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mannwhitneyu.html#scipy.stats.mstats.mannwhitneyu',
'scipy.stats.mstats.median_cihs': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.median_cihs.html#scipy.stats.mstats.median_cihs',
'scipy.stats.mstats.meppf': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.meppf.html#scipy.stats.mstats.meppf',
'scipy.stats.mstats.mjci': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mjci.html#scipy.stats.mstats.mjci',
'scipy.stats.mstats.mode': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mode.html#scipy.stats.mstats.mode',
'scipy.stats.mstats.moment': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.moment.html#scipy.stats.mstats.moment',
'scipy.stats.mstats.mquantiles': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mquantiles.html#scipy.stats.mstats.mquantiles',
'scipy.stats.mstats.mquantiles_cimj': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mquantiles_cimj.html#scipy.stats.mstats.mquantiles_cimj',
'scipy.stats.mstats.msign': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.msign.html#scipy.stats.mstats.msign',
'scipy.stats.mstats.normaltest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.normaltest.html#scipy.stats.mstats.normaltest',
'scipy.stats.mstats.obrientransform': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.obrientransform.html#scipy.stats.mstats.obrientransform',
'scipy.stats.mstats.pearsonr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.pearsonr.html#scipy.stats.mstats.pearsonr',
'scipy.stats.mstats.plotting_positions': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.plotting_positions.html#scipy.stats.mstats.plotting_positions',
'scipy.stats.mstats.pointbiserialr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.pointbiserialr.html#scipy.stats.mstats.pointbiserialr',
'scipy.stats.mstats.rankdata': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.rankdata.html#scipy.stats.mstats.rankdata',
'scipy.stats.mstats.rsh': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.rsh.html#scipy.stats.mstats.rsh',
'scipy.stats.mstats.scoreatpercentile': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.scoreatpercentile.html#scipy.stats.mstats.scoreatpercentile',
'scipy.stats.mstats.sem': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.sem.html#scipy.stats.mstats.sem',
'scipy.stats.mstats.sen_seasonal_slopes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.sen_seasonal_slopes.html#scipy.stats.mstats.sen_seasonal_slopes',
'scipy.stats.mstats.siegelslopes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.siegelslopes.html#scipy.stats.mstats.siegelslopes',
'scipy.stats.mstats.skew': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.skew.html#scipy.stats.mstats.skew',
'scipy.stats.mstats.skewtest': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.skewtest.html#scipy.stats.mstats.skewtest',
'scipy.stats.mstats.spearmanr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.spearmanr.html#scipy.stats.mstats.spearmanr',
'scipy.stats.mstats.theilslopes': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.theilslopes.html#scipy.stats.mstats.theilslopes',
'scipy.stats.mstats.tmax': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.tmax.html#scipy.stats.mstats.tmax',
'scipy.stats.mstats.tmean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.tmean.html#scipy.stats.mstats.tmean',
'scipy.stats.mstats.tmin': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.tmin.html#scipy.stats.mstats.tmin',
'scipy.stats.mstats.trim': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trim.html#scipy.stats.mstats.trim',
'scipy.stats.mstats.trima': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trima.html#scipy.stats.mstats.trima',
'scipy.stats.mstats.trimboth': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimboth.html#scipy.stats.mstats.trimboth',
'scipy.stats.mstats.trimmed_mean': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimmed_mean.html#scipy.stats.mstats.trimmed_mean',
'scipy.stats.mstats.trimmed_mean_ci': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimmed_mean_ci.html#scipy.stats.mstats.trimmed_mean_ci',
'scipy.stats.mstats.trimmed_std': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimmed_std.html#scipy.stats.mstats.trimmed_std',
'scipy.stats.mstats.trimmed_stde': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimmed_stde.html#scipy.stats.mstats.trimmed_stde',
'scipy.stats.mstats.trimmed_var': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimmed_var.html#scipy.stats.mstats.trimmed_var',
'scipy.stats.mstats.trimr': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimr.html#scipy.stats.mstats.trimr',
'scipy.stats.mstats.trimtail': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.trimtail.html#scipy.stats.mstats.trimtail',
'scipy.stats.mstats.tsem': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.tsem.html#scipy.stats.mstats.tsem',
'scipy.stats.mstats.ttest_1samp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.ttest_1samp.html#scipy.stats.mstats.ttest_1samp',
'scipy.stats.mstats.ttest_ind': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.ttest_ind.html#scipy.stats.mstats.ttest_ind',
'scipy.stats.mstats.ttest_onesamp': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.ttest_onesamp.html#scipy.stats.mstats.ttest_onesamp',
'scipy.stats.mstats.ttest_rel': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.ttest_rel.html#scipy.stats.mstats.ttest_rel',
'scipy.stats.mstats.tvar': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.tvar.html#scipy.stats.mstats.tvar',
'scipy.stats.mstats.variation': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.variation.html#scipy.stats.mstats.variation',
'scipy.stats.mstats.winsorize': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.winsorize.html#scipy.stats.mstats.winsorize',
'scipy.stats.mstats.zmap': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.zmap.html#scipy.stats.mstats.zmap',
'scipy.stats.mstats.zscore': 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.zscore.html#scipy.stats.mstats.zscore'}},
'settings': {'lib_path': 'nbdev_scipy'}}
| 185.514462 | 302 | 0.681224 |
71c55f66f473fe60da7f556a02120b7fdc8316c0 | 6,286 | py | Python | cmake/templates/_setup_util.py | po1/catkin | 85556d6776337e9cb1970f0f1733687dbbcb8a42 | [
"BSD-3-Clause"
] | null | null | null | cmake/templates/_setup_util.py | po1/catkin | 85556d6776337e9cb1970f0f1733687dbbcb8a42 | [
"BSD-3-Clause"
] | null | null | null | cmake/templates/_setup_util.py | po1/catkin | 85556d6776337e9cb1970f0f1733687dbbcb8a42 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file only exists for backward compatibility with workspaces built with catkin 0.5.58 or older.'''
from __future__ import print_function
import argparse
import os
import sys
CATKIN_MARKER_FILE = '.catkin'
def get_workspaces(include_fuerte=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
paths = [path for path in os.environ.get(env_name, '').split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte'))]
return workspaces
def get_reversed_workspaces(exclude=None):
'''Return a newline separated list of workspaces in CMAKE_PREFIX_PATH in reverse order and remove any occurrences of EXCLUDE.'''
paths = [p for p in reversed(get_workspaces()) if p != exclude]
return '\n'.join(paths)
def prefix_env(name, new_paths_str):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
environ_paths = [i for i in os.environ.get(name, '').split(os.pathsep) if i]
checked_paths = []
new_paths = [v for v in new_paths_str.split(os.pathsep) if v != '']
for path in new_paths:
# exclude any path already in env and any path we already added
if path not in environ_paths and path not in checked_paths:
checked_paths.append(path)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def remove_from_env(name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
env_paths = [path for path in os.environ.get(name, '').split(os.pathsep) if path]
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in get_workspaces(include_fuerte=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
return os.pathsep.join(env_paths)
def _parse_arguments():
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--get-reversed-workspaces', action='store_true', help='Get workspaces based on CMAKE_PREFIX_PATH in reverse order')
group.add_argument('--prefix', action='store_true', help='Prepend a unique value to an environment variable')
group.add_argument('--remove', action='store_true', help='Remove the prefix for each workspace in CMAKE_PREFIX_PATH from the environment variable')
parser.add_argument('--name', nargs='?', help='The name of the environment variable')
parser.add_argument('--value', help='The value')
args = parser.parse_args()
# verify correct argument combination
if (args.prefix or args.remove) and args.name is None:
raise RuntimeError('Argument "--name" must be passed for "%s"' % ('--prefix' if args.prefix else '--remove'))
if args.get_reversed_workspaces and args.name is not None:
raise RuntimeError('Argument "--name" must not be passed for "--get-reversed-workspaces"')
return args
if __name__ == '__main__':
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
exit(1)
# dispatch to requested operation
if args.get_reversed_workspaces:
print(get_reversed_workspaces(args.value))
elif args.prefix:
print(prefix_env(args.name, args.value))
elif args.remove:
print(remove_from_env(args.name, args.value))
| 44.58156 | 158 | 0.719695 |
c4b84cb644212b613cbded9e818199120821c78c | 6,140 | py | Python | Test/MakingText.py | shpkc/DailyBigdata | 66fe0fa7b73d054e8b14975a655fdb40b11be014 | [
"Apache-2.0"
] | null | null | null | Test/MakingText.py | shpkc/DailyBigdata | 66fe0fa7b73d054e8b14975a655fdb40b11be014 | [
"Apache-2.0"
] | 16 | 2020-01-28T22:54:04.000Z | 2022-03-11T23:53:56.000Z | Test/MakingText.py | shpkc/DailyBigdata | 66fe0fa7b73d054e8b14975a655fdb40b11be014 | [
"Apache-2.0"
] | null | null | null | def Text():
import pandas as pd
from datetime import timedelta,date
import os
today = int(date.today().strftime('%Y%m%d'))
yesterday = date.today() - timedelta(1)
yesterday = int(yesterday.strftime('%Y%m%d'))
month = int(date.today().strftime('%m'))
day = int(date.today().strftime('%d'))
os.mkdir('./Crawled Data/{}/text'.format(today))
dataframe = pd.read_csv('./Crawled Data/{}/{}_KTR_KTS_dataframe'.format(today, today))
dataframe = dataframe.drop_duplicates(subset = "Keyword")
dataframe = dataframe.sort_values(by='Total_KTR', ascending=False).iloc[0:20]
keywords = dataframe.iloc[0:10]['Keyword'].values.tolist()
KTR = dataframe.iloc[0:10]['Total_KTR_change'].values.tolist()
KTS = dataframe.iloc[0:10]['KTS_change'].values.tolist()
related_keywords = dataframe['Related_Keywords'].iloc[0:10].values.tolist()
main_text = "[📰Daily Bigdata , {}/{}]\n\n[🌏오늘의 키워드🌏]\n\n\
1. {}\n\
2. {}\n\
3. {}\n\
4. {}\n\
5. {}\n\
6. {}\n\
7. {}\n\
8. {}\n\
9. {}\n\
10. {}\n\n\
[💵오늘의 증시💵]\n\
코스피 2,230.50(+1.84)\n\
코스닥 743.38(-3.95)\n\
환 율 1,125.0(-1.0)\n\
\n\
📌영상으로 보고 싶으시다면?\n\
데일리 빅데이터 유튜브!!📌\n\
\n\n\
🚨 데일리 빅데이터를 이용한 투자 피해에는 책임을 지지 않으며 🚨\n\
분석 자료의 해석은 개인마다 다를 수 있습니다".format(month, day,\
keywords[0],keywords[1],keywords[2],keywords[3],\
keywords[4],keywords[5],keywords[6],keywords[7],\
keywords[8],keywords[9])
file = open("./Crawled Data/{}/text/{}_main_text.txt".format(today,today), "w")
file.write(main_text)
file.close()
top1_5_text = "🔎 종합 키워드 분석 TOP1~5\n\
\n\
1. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
2. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
3. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
4. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
5. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
📌영상으로 보고 싶으시다면?\n\
데일리 빅데이터 유튜브!!📌".format(keywords[0], KTR[0], KTS[0], related_keywords[0],\
keywords[1], KTR[1], KTS[1], related_keywords[1],\
keywords[2], KTR[2], KTS[2], related_keywords[2],\
keywords[3], KTR[3], KTS[3], related_keywords[3],\
keywords[4], KTR[4], KTS[4], related_keywords[4])
file = open("./Crawled Data/{}/text/{}_top1_5_text.txt".format(today,today), "w")
file.write(top1_5_text)
file.close()
url_dataframe = pd.read_csv('./Crawled Data/{}/{}_max_url'.format(today, today))
max_url = ''
for keyword in keywords:
for i in range(80):
if keyword == url_dataframe.iloc[i]['Keyword']:
max_url = max_url+keyword+' '+url_dataframe.iloc[i]['Max_Url']+'\n'
break
file = open("./Crawled Data/{}/text/{}_max_url.txt".format(today,today), "w")
file.write(max_url)
file.close()
top6_10_text = "🔎 종합 키워드 분석 TOP1~5\n\
\n\
6. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
7. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
8. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
9. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
10. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
📌영상으로 보고 싶으시다면?\n\
데일리 빅데이터 유튜브!!📌".format(keywords[5], KTR[5], KTS[5], related_keywords[5],\
keywords[6], KTR[6], KTS[6], related_keywords[6],\
keywords[7], KTR[7], KTS[7], related_keywords[7],\
keywords[8], KTR[8], KTS[8], related_keywords[8],\
keywords[9], KTR[9], KTS[9], related_keywords[9])
file = open("./Crawled Data/{}/text/{}_top6_10_text.txt".format(today,today), "w")
file.write(top6_10_text)
file.close()
today_KTR_KTS = pd.read_csv('./Crawled Data/{}/{}_KTR_KTS_dataframe'.format(today, today))
topics = ['society', 'politics', 'economic', 'foreign', 'culture',
'entertain', 'sports', 'digital']
topics_emoji = ['🌉','⚖','💲','🌏','🎼','🎤','⚽','💻']
topics_kr = ['사회', '정치', '경제', '국제', '문화', '연예', '스포츠', 'IT']
kr_index = 0
for topic in topics:
dataframe = today_KTR_KTS[today_KTR_KTS['Topic'] == topic].sort_values(by='Total_KTR', ascending=False)
keywords = dataframe.iloc[0:10]['Keyword'].values.tolist()
KTR = dataframe.iloc[0:10]['Total_KTR_change'].values.tolist()
KTS = dataframe.iloc[0:10]['KTS_change'].values.tolist()
related_keywords = dataframe['Related_Keywords'].iloc[0:10].values.tolist()
text = "{} {} 키워드\n\n\
1. {}\n\
2. {}\n\
3. {}\n\
4. {}\n\
5. {}\n\
6. {}\n\
7. {}\n\
8. {}\n\
9. {}\n\
10. {}\n\
\n\
[🔎키워드 분석🔍]\n\n\
1. {}\n\
관심도 * : {}\n\
감정도 : {}\n\
{}\n\
\n\
2. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
3. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
4. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
5. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
6. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
7. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
8. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
9. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
10. {}\n\
관심도 : {}\n\
감정도 : {}\n\
{}\n\
\n\
* = 당일 평균 키워드 대비 백분율\n\
\n\
📌영상으로 보고 싶으시다면?\n\
데일리 빅데이터 유튜브!!📌".format(topics_emoji[kr_index], topics_kr[kr_index],\
keywords[0],keywords[1],keywords[2],keywords[3],\
keywords[4],keywords[5],keywords[6],keywords[7],\
keywords[8],keywords[9],\
keywords[0],KTR[0],KTS[0],related_keywords[0],\
keywords[1],KTR[1],KTS[1],related_keywords[1],\
keywords[2],KTR[2],KTS[2],related_keywords[2],\
keywords[3],KTR[3],KTS[3],related_keywords[3],\
keywords[4],KTR[4],KTS[4],related_keywords[4],\
keywords[5],KTR[5],KTS[5],related_keywords[5],\
keywords[6],KTR[6],KTS[6],related_keywords[6],\
keywords[7],KTR[7],KTS[7],related_keywords[7],\
keywords[8],KTR[8],KTS[8],related_keywords[8],\
keywords[9],KTR[9],KTS[9],related_keywords[9])
kr_index+=1
file = open("./Crawled Data/{}/text/{}_{}_text.txt".format(today,today, topic), "w")
file.write(text)
file.close()
print("모든 작업 완료") | 25.061224 | 111 | 0.516938 |
1557cd0d2497af89771497ea109c453a92cf22db | 11,532 | py | Python | privex/helpers/setuppy/bump.py | Privex/python-helpers | 1c976ce5b0e2c5241ea0bdf330bd6701b5e31153 | [
"X11"
] | 12 | 2019-06-18T11:17:41.000Z | 2021-09-13T23:00:21.000Z | privex/helpers/setuppy/bump.py | Privex/python-helpers | 1c976ce5b0e2c5241ea0bdf330bd6701b5e31153 | [
"X11"
] | 1 | 2019-10-13T07:34:44.000Z | 2019-10-13T07:34:44.000Z | privex/helpers/setuppy/bump.py | Privex/python-helpers | 1c976ce5b0e2c5241ea0bdf330bd6701b5e31153 | [
"X11"
] | 4 | 2019-10-10T10:15:09.000Z | 2021-05-16T01:55:48.000Z | """
Automated Python package version bumping
Summary
-------
Included is a standalone function :py:func:`.bump_version` - which when called, loads the file
:py:attr:`.settings.VERSION_FILE` , extracts the current version, bumps the requested part of the version,
then replaces the version line inside of the file so that it contains the newly bumped version.
There's also a setup.py command class :py:class:`.BumpCommand` which allows you to use :py:func:`.bump_version`
as a ``setup.py`` command, making it simple to increment your package's version without having to manually edit
the file.
If the package :py:mod:`semver` is detected, the version bumping helper :py:func:`.bump_version` becomes available.
If the module :py:mod:`distutils` is detected, the setup.py command class :py:class:`.BumpCommand` also becomes
available.
How to version your package for best compatibility
--------------------------------------------------
To avoid having to write your own version detection / replacement functions, we recommend placing your package
version inside of a python module file, such as ``__init__.py`` - as the string variable ``VERSION``
Example, ``mypackage/__init__.py``
.. code-block:: python
from mypackage.somemodule import x
from mypackage.othermodule import y
VERSION = '1.2.3'
If you cannot store your package version this way for some reason, then you can write a custom detection/replacement
function and register it as the default.
See the docs for :py:func:`.bump_version` to learn how to write a custom version detection/replacement function.
Using the BumpCommand distutils command in your setup.py
--------------------------------------------------------
For :class:`.BumpCommand` to function, you must at least set :py:attr:`privex.helpers.settings.VERSION_FILE`
to an absolute path to the file which contains your package version attribute.
If your package version isn't defined as shown in the previous section on how to version your package,
then you'll **also need to set up custom version detection/replacement functions**.
(see docs at :py:func:`.bump_version`)
Below is an example ``setup.py`` file, which configures the VERSION_FILE setting to point to ``mypackage/__init__.py``
relative to the folder setup.py is in, then calls :py:func:`setuptools.setup` with the package version and command
dictionary.
.. code-block:: python
from os import join, dirname, abspath
from setuptools import setup, find_packages
from privex.helpers import settings, BumpCommand
# If you placed your version in your package __init__ then you can import it for use in setup.py building
from mypackage import VERSION
# This results in an absolute path to the folder where this setup.py file is contained
BASE_DIR = dirname(abspath(__file__))
# The file which contains "VERSION = '1.2.3'"
settings.VERSION_FILE = join(BASE_DIR, 'mypackage', '__init__.py')
# Register BumpCommand as a command in your setup() function.
setup(
version=VERSION,
cmdclass={
'bump': BumpCommand
},
);
Basic usage of the bump command with setup.py
---------------------------------------------
Once you've configured :py:attr:`privex.helpers.settings.VERSION_FILE` and registered the command class in the
``setup()`` function, you can now use the ``bump`` command from your ``setup.py`` and it will automatically bump
your version.
Below is an example of basic usage. If you need more help on usage, type ``./setup.py bump --help`` - or for
detailed documentation on the command, see the class documentation :class:`.BumpCommand`
.. code-block:: bash
./setup.py bump --patch
# Bumping 'patch' version part
# Updating version stored in file @ /tmp/helpers/privex/helpers/__init__.py
# Package version has been bumped from 2.0.0 to 2.0.1 and written to the file
# /tmp/helpers/privex/helpers/__init__.py
./setup.py bump --minor
# ... version has been bumped from 2.0.0 to 2.1.0 and written to the file ...
"""
import re
import logging
import semver
from privex.helpers import settings
try:
from privex.helpers.setuppy.commands import BumpCommand
except ImportError:
pass
log = logging.getLogger(__name__)
_find_ver = re.compile(r'VERSION = \'?\"?([0-9a-zA-Z-._]+)\'?\"?')
"""Regex used for finding/replacing version line"""
def version_replace(data: str, old_version: str, new_version: str) -> str:
"""
Replace the version line in ``data`` containing ``old_version`` with a version line containing ``new_version``
Example::
>>> data = "# Example\\nVERSION = '1.2.3' # Some comment"
>>> version_replace(data=data, old_version='1.2.3', new_version='1.3.0')
"# Example\\nVERSION = '1.3.0' # Some comment"
As shown in the above example, it shouldn't affect surrounding lines, or even in-line comments.
Note: ``old_version`` isn't really used by this function. It exists for compatibility with user drop-in
replacement functions that may need to know the old version to replace it.
:param str data: The string contents containing ``VERSION = 'x.y.z'``
:param str old_version: The existing version number, e.g. ``'1.2.3'``
:param str new_version: The new version to replace it with, e.g. ``'1.3.0'``
:return str replaced: ``data`` with the VERSION line updated.
"""
return _find_ver.sub(f"VERSION = '{new_version}'", data)
def get_current_ver(data: str = None):
return str(_find_ver.search(data).group(1))
default_replace_func = version_replace
"""If no version replacement function is passed to :py:func:`.bump_version`, then this function will be used."""
default_current_ver = get_current_ver
"""If no version retrieval function is passed to :py:func:`.bump_version`, then this function will be used."""
def bump_version(part='patch', dry=False, **kwargs):
"""
Bump semver version and replace version line inside of :py:attr:`.settings.VERSION_FILE`
* Obtains the current package version using ``version_func``
* Uses :py:mod:`semver` to increment the ``part`` portion and resets any lower portions to zero
* Reads the file :py:attr:`.settings.VERSION_FILE` and passes it to ``replace_func`` along with the original
version and new bumped version to obtain the modified file contents.
* Writes the file contents containing the updated version number back to :py:attr:`.settings.VERSION_FILE`
Basic usage:
>>> from privex.helpers import settings, setuppy
>>> bump_version('minor')
If you want to use this function outside of privex-helpers, for your own package/project, ensure you
adjust the settings and version functions as required.
To change the file which contains your package version, as well as the function used to get the
current version::
>>> import mypackage
>>> from privex.helpers import settings
>>> settings.VERSION_FILE = '/home/john/mypackage/mypackage/__init__.py'
If you use the same version line format at privex-helpers, like this::
VERSION = '1.2.3' # In-line comments are fine, as are double quotes instead of single quotes.
Then you don't need to make a custom version retrieval or replacement function.
Otherwise... this is how you write and register a custom version retrieval and replacement function:
.. code-block:: python
:force:
import re
from privex.helpers.setuppy import bump
# Regex to find the string: version='x.y.z'
# and extract the version x.y.z on it's own.
my_regex = re.compile(r'version=\'?\"?([0-9a-zA-Z-._]+)\'?\"?')
def my_version_finder(data: str):
return str(my_regex.search(data).group(1))
# Set your function `my_version_finder` as the default used to obtain the current package version
bump.default_current_ver = my_version_finder
def my_version_replacer(data: str, old_version: str, new_version: str):
# This is an example of a version replacer if you just put your version straight into setup.py
return data.replace(f"version='{old_version}'", f"version='{new_version}'")
# Alternatively use regex substitution
return my_regex.sub(f"version='{new_version}'", data)
# Set your function `my_version_replacer` as the default used to replace the version in a file.
bump.default_replace_func = my_version_replacer
:param bool dry: If set to ``True``, will only return the modified file contents instead of overwriting
the file :py:attr:`.settings.VERSION_FILE`
:param str part: The part of the version to bump: ``patch``, ``minor``, ``major``, ``build`` or ``prerelease``
:key callable replace_func: Custom version replacement function. Should take the arguments
(data, old_version, new_version) and return ``data`` with the version line replaced.
:key callable version_func: Custom version retrieval function. Takes no args, returns curr version as a string.
:key str token: If using part ``build`` or ``prerelease``, this overrides the version token
:return:
"""
replace_func = kwargs.get('replace_func', default_replace_func)
get_version = kwargs.get('version_func', default_current_ver)
ver_path = settings.VERSION_FILE
log.debug('Reading file %s to replace version line', ver_path)
with open(ver_path) as fp:
ver_file = str(fp.read())
curr_ver = get_version(ver_file)
new_ver = _bump_version(version=curr_ver, part=part, **kwargs)
log.debug('Current version: %s ||| Bumped version: %s', curr_ver, new_ver)
new_ver_file = replace_func(data=ver_file, old_version=curr_ver, new_version=new_ver)
if dry:
log.debug('Dry kwarg was True. Returning modified file instead of outputting it.')
return new_ver_file
log.debug('Attempting to write updated contents back to %s', ver_path)
with open(ver_path, 'w') as fp:
fp.write(new_ver_file)
return new_ver, curr_ver
def _bump_version(version: str, part: str, **kwargs) -> str:
"""
Bumps the semver part ``part`` in the version ``version`` and returns it as a string.
Used internally by :py:func:`.bump_version`
>>> _bump_version('1.2.3', 'minor')
'1.3.0'
:param version: The version to bump as a string e.g. ``'1.2.3'```
:param part: The part of the version to bump, e.g. ``minor`` or ``major``
:key str token: If using part ``build`` or ``prerelease``, this overrides the version token
:raises AttributeError: If ``part`` isn't a supported version part.
:return str new_ver: The bumped version number
"""
bumps = dict(
minor=semver.bump_minor,
major=semver.bump_major,
patch=semver.bump_patch,
build=semver.bump_build,
prerelease=semver.bump_prerelease,
pre=semver.bump_prerelease,
)
if part not in bumps:
raise AttributeError(f'The "part" argument must be one of the following: {",".join(bumps.keys())}')
ver_args = dict(version=version)
if 'token' in kwargs:
ver_args['token'] = kwargs['token']
print('_bump_version token:', ver_args['token'])
new_ver = bumps[part](**ver_args)
return new_ver
| 40.749117 | 118 | 0.681495 |
9ffa53f322962c61cd4104b4de552e87188b6a42 | 2,907 | py | Python | pyzoo/zoo/examples/tensorflow/tfpark/tf_optimizer/train_lenet.py | respecteverything/analytics-zoo | a8843c73c24b5026d93d46fa9268eb41a958cf6d | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/examples/tensorflow/tfpark/tf_optimizer/train_lenet.py | respecteverything/analytics-zoo | a8843c73c24b5026d93d46fa9268eb41a958cf6d | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/examples/tensorflow/tfpark/tf_optimizer/train_lenet.py | respecteverything/analytics-zoo | a8843c73c24b5026d93d46fa9268eb41a958cf6d | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from zoo import init_nncontext
from zoo.tfpark import TFOptimizer, TFDataset
from bigdl.optim.optimizer import *
import numpy as np
import sys
from bigdl.dataset import mnist
from bigdl.dataset.transformer import *
sys.path.append("/tmp/models/slim") # add the slim library
from nets import lenet
slim = tf.contrib.slim
def main(max_epoch, data_num):
sc = init_nncontext()
# get data, pre-process and create TFDataset
def get_data_rdd(dataset):
(images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", dataset)
image_rdd = sc.parallelize(images_data[:data_num])
labels_rdd = sc.parallelize(labels_data[:data_num])
rdd = image_rdd.zip(labels_rdd) \
.map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
np.array(rec_tuple[1])])
return rdd
training_rdd = get_data_rdd("train")
testing_rdd = get_data_rdd("test")
dataset = TFDataset.from_rdd(training_rdd,
names=["features", "labels"],
shapes=[[28, 28, 1], []],
types=[tf.float32, tf.int32],
batch_size=280,
val_rdd=testing_rdd
)
# construct the model from TFDataset
images, labels = dataset.tensors
with slim.arg_scope(lenet.lenet_arg_scope()):
logits, end_points = lenet.lenet(images, num_classes=10, is_training=True)
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels))
# create a optimizer
optimizer = TFOptimizer.from_loss(loss, Adam(1e-3),
val_outputs=[logits],
val_labels=[labels],
val_method=Top1Accuracy(), model_dir="/tmp/lenet/")
# kick off training
optimizer.optimize(end_trigger=MaxEpoch(max_epoch))
saver = tf.train.Saver()
saver.save(optimizer.sess, "/tmp/lenet/model")
if __name__ == '__main__':
max_epoch = 5
data_num = 60000
if len(sys.argv) > 1:
max_epoch = int(sys.argv[1])
data_num = int(sys.argv[2])
main(max_epoch, data_num)
| 35.024096 | 96 | 0.632267 |
c0c3218d0657d99331263bbd770e36ce83c68438 | 201 | py | Python | siteforms/__init__.py | idlesign/django-siteforms | 86661d6f899e842c32802511ac0a13db1eed671d | [
"BSD-3-Clause"
] | 16 | 2020-06-23T19:41:35.000Z | 2020-08-15T14:34:21.000Z | siteforms/__init__.py | idlesign/django-siteforms | 86661d6f899e842c32802511ac0a13db1eed671d | [
"BSD-3-Clause"
] | null | null | null | siteforms/__init__.py | idlesign/django-siteforms | 86661d6f899e842c32802511ac0a13db1eed671d | [
"BSD-3-Clause"
] | null | null | null |
VERSION = (0, 9, 1)
"""Application version number tuple."""
VERSION_STR = '.'.join(map(str, VERSION))
"""Application version number string."""
default_app_config = 'siteforms.apps.SiteformsConfig' | 20.1 | 53 | 0.711443 |
c2367d086225c9deb31a3de5b85f100c466e3e59 | 1,709 | py | Python | server/gists.py | macobo/grader-webapp | 2d5c5511472792d21f864de47267e33ddbd17c90 | [
"MIT"
] | 1 | 2021-11-02T16:38:29.000Z | 2021-11-02T16:38:29.000Z | server/gists.py | macobo/grader-webapp | 2d5c5511472792d21f864de47267e33ddbd17c90 | [
"MIT"
] | null | null | null | server/gists.py | macobo/grader-webapp | 2d5c5511472792d21f864de47267e33ddbd17c90 | [
"MIT"
] | null | null | null | from server import app, db
from flask import Blueprint, request, jsonify, abort
from .utils import dump_json
mod = Blueprint('gists', __name__)
import string
import random
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def free_name(collection = None):
while True:
_id = id_generator()
if not collection.find_one({ "name": _id }):
return _id
@mod.route('/api/gists', methods=['GET'])
@dump_json
def list_gists():
gists = db.db.gists
return {"results" : list(gists.find({"post.public": True}))}
@app.route('/api/gists/<name>', methods=['GET'])
@dump_json
def get_gist(name):
app.logger.info(name)
post = db.db.gists.find_one({ "name": name }, sort=[("version", -1)])
if post is None:
abort(404)
return post
@app.route('/api/gists', methods=['POST'])
@dump_json
def post_gist():
post_data, name = request.json['post'], request.json.get('name', None)
app.logger.debug(request.json)
data = {"name": name, "post": post_data, "versions": [post_data], "version": 1}
if not name:
data['name'] = free_name(db.db.gists)
else:
post = db.db.gists.find_one({"name": name}, sort=[("version", -1)])
if post:
data = post
data['version'] += 1
data['post'] = post_data
data['versions'].append(post_data)
db.db.gists.save(data)
return data
@app.route('/api/gists/<name>/update_name', methods=['POST'])
@dump_json
def update_gist(name):
post = db.db.gists.find_one({"name": name})
post['name'] = request.json['new_name']
db.db.gists.save(post)
return post | 29.982456 | 83 | 0.627267 |
beb09787ec93c789daf795e01d19d0edd395647b | 3,893 | py | Python | tests/unit/s3/test_lifecycle.py | ChimeraCoder/boto | fa886a95e0f2c09b15e0f10c7244a9b02df9007d | [
"MIT"
] | 1 | 2021-08-13T09:07:07.000Z | 2021-08-13T09:07:07.000Z | tests/unit/s3/test_lifecycle.py | ChimeraCoder/boto | fa886a95e0f2c09b15e0f10c7244a9b02df9007d | [
"MIT"
] | null | null | null | tests/unit/s3/test_lifecycle.py | ChimeraCoder/boto | fa886a95e0f2c09b15e0f10c7244a9b02df9007d | [
"MIT"
] | null | null | null | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import AWSMockServiceTestCase
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
from boto.s3.lifecycle import Rule, Lifecycle, Transition
class TestS3LifeCycle(AWSMockServiceTestCase):
connection_class = S3Connection
def default_body(self):
return """
<LifecycleConfiguration>
<Rule>
<ID>rule-1</ID>
<Prefix>prefix/foo</Prefix>
<Status>Enabled</Status>
<Transition>
<Days>30</Days>
<StorageClass>GLACIER</StorageClass>
</Transition>
<Expiration>
<Days>365</Days>
</Expiration>
</Rule>
<Rule>
<ID>rule-2</ID>
<Prefix>prefix/bar</Prefix>
<Status>Disabled</Status>
<Transition>
<Date>2012-12-31T00:00:000Z</Date>
<StorageClass>GLACIER</StorageClass>
</Transition>
</Rule>
</LifecycleConfiguration>
"""
def test_parse_lifecycle_response(self):
self.set_http_response(status_code=200)
bucket = Bucket(self.service_connection, 'mybucket')
response = bucket.get_lifecycle_config()
self.assertEqual(len(response), 2)
rule = response[0]
self.assertEqual(rule.id, 'rule-1')
self.assertEqual(rule.prefix, 'prefix/foo')
self.assertEqual(rule.status, 'Enabled')
self.assertEqual(rule.expiration, 365)
transition = rule.transition
self.assertEqual(transition.days, 30)
self.assertEqual(transition.storage_class, 'GLACIER')
self.assertEqual(response[1].transition.date, '2012-12-31T00:00:000Z')
def test_expiration_with_no_transition(self):
lifecycle = Lifecycle()
lifecycle.add_rule('myid', 'prefix', 'Enabled', 30)
xml = lifecycle.to_xml()
self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
def test_expiration_is_optional(self):
t = Transition(days=30, storage_class='GLACIER')
r = Rule('myid', 'prefix', 'Enabled', expiration=None,
transition=t)
xml = r.to_xml()
self.assertIn(
'<Transition><StorageClass>GLACIER</StorageClass><Days>30</Days>',
xml)
def test_expiration_with_expiration_and_transition(self):
t = Transition(date='2012-11-30T00:00:000Z', storage_class='GLACIER')
r = Rule('myid', 'prefix', 'Enabled', expiration=30, transition=t)
xml = r.to_xml()
self.assertIn(
'<Transition><StorageClass>GLACIER</StorageClass>'
'<Date>2012-11-30T00:00:000Z</Date>', xml)
self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
| 40.134021 | 78 | 0.653994 |
85b54e94e86ccf7d9c76c326e191c3bc4de9fa8c | 3,505 | py | Python | sly/apps/shrink/tests/test_forms.py | ppolle/sly | 3458b5c58a75c65fdc72b91430e56ec37d2060e0 | [
"MIT"
] | null | null | null | sly/apps/shrink/tests/test_forms.py | ppolle/sly | 3458b5c58a75c65fdc72b91430e56ec37d2060e0 | [
"MIT"
] | 3 | 2021-03-19T23:38:19.000Z | 2021-06-10T23:02:29.000Z | sly/apps/shrink/tests/test_forms.py | ppolle/sly | 3458b5c58a75c65fdc72b91430e56ec37d2060e0 | [
"MIT"
] | null | null | null | from django.test import TestCase
from sly.apps.shrink.forms import UrlForm, RegisterUserForm, UserAuthForm
# Create your tests here.
class UrlFormTests(TestCase):
def test_valid_form(self):
data = {
'url': 'https://www.nation.co.ke/lifestyle/1190-1190-5p56avz/index.html',
'short_code':'weifbwie'
}
form = UrlForm(data=data)
self.assertTrue(form.is_valid())
def test_form_valid_without_short_code_field(self):
data = {
'url':'https://www.nation.co.ke/lifestyle/1190-1190-5p56avz/index.html'
}
form = UrlForm(data=data)
self.assertTrue(form.is_valid())
def test_form_invalid(self):
data = {
'short_code':'test'
}
form = UrlForm(data=data)
if form.errors.items():
error = dict(form.errors.items())
self.assertTrue(error.keys(), error.values())
self.assertFalse(form.is_valid())
def test_invalid_url(self):
data = {
'url':'abc'
}
form = UrlForm(data=data)
if form.errors.items():
error = dict(form.errors.items())
self.assertEqual(error['url'][0], "This field has to be a proper URL")
self.assertFalse(form.is_valid())
def test_ommit_own_url(self):
data = {
'url':'http://example.com'
}
form = UrlForm(data=data)
if form.errors.items():
error = dict(form.errors.items())
self.assertEqual(error['url'][0], "You cannot shorten a URL from this site")
self.assertFalse(form.is_valid())
def test_a_short_code_cant_be_used_twice(self):
data = {
'url': 'https://www.nation.co.ke/lifestyle/1190-1190-5p56avz/index.html',
'short_code':'weifbwie'
}
form = UrlForm(data=data)
if form.errors.items():
error = dict(form.errors.items)
self.assertTrue(error['short_code'][0], "A short url with that shortcode already exists, please try another code")
self.assertTrue(form.is_valid())
class UserAuthFormTests(TestCase):
def test_valid_form(self):
data = {
'username':'ppolle',
'password1':'password'
}
form = UserAuthForm(data=data)
if form.errors.items():
field, error = form.errors.items()[0]
self.assertTrue(field, error)
self.assertTrue(form.is_valid())
def test_invalid_form(self):
data = {
'username':'ppolle'}
form = UserAuthForm(data=data)
if form.errors.items():
error = dict(form.errors.items())
self.assertTrue(error.keys(), error.values())
self.assertFalse(form.is_valid())
class RegisterUserFormTests(TestCase):
def test_form_valid(self):
data = {'first_name':'Peter',
'last_name':'Polle',
'email':'peterpolle@gmail.com',
'password1':'iamTHEBIGBOSS1234',
'password2':'iamTHEBIGBOSS1234',
'username':'ppolle'
}
form = RegisterUserForm(data=data)
if form.errors.items():
field, error = form.errors.items()[0]
self.assertEqual(field, error)
self.assertTrue(form.is_valid())
def test_form_invalid(self):
data = {'first_name':'Peter',
'last_name':'Polle',
'email':'peter@polle@gmail.com',
'password1':'iamTHEBIGBOSS1234',
'password2':'iamTHEBIGBOSS1234',
}
form = RegisterUserForm(data=data)
self.assertFalse(form.is_valid())
def test_password1_must_be_equal_to_password2(self):
data = {'first_name':'Peter',
'last_name':'Polle',
'email':'peterpolle@gmail.com',
'password1':'iamTHEBIGBOSS1234',
'password2':'iamTHEBIGBOSS123',
'username':'ppolle'
}
form = RegisterUserForm(data=data)
if form.errors.items():
error = dict(form.errors.items())
self.assertEqual(error['password2'][0], "The two password fields didn't match.")
self.assertFalse(form.is_valid())
| 26.353383 | 117 | 0.692725 |
8132157b345baf58142efc8ec4f98b590c18473f | 1,625 | py | Python | backintime/candles_providers/timeframe_dump/timeframe_dump.py | akim-mukhtarov/backtesting | 2d0491b919885eeddd62c4079c9c7292381cb4f9 | [
"MIT"
] | null | null | null | backintime/candles_providers/timeframe_dump/timeframe_dump.py | akim-mukhtarov/backtesting | 2d0491b919885eeddd62c4079c9c7292381cb4f9 | [
"MIT"
] | null | null | null | backintime/candles_providers/timeframe_dump/timeframe_dump.py | akim-mukhtarov/backtesting | 2d0491b919885eeddd62c4079c9c7292381cb4f9 | [
"MIT"
] | null | null | null | from .timeframe_dump_scheme import TimeframeDumpScheme
from .utils import to_candle
from ..candles_provider import CandlesProvider
from ...timeframes import Timeframes
import pandas as pd
import datetime
class TimeframeDump(CandlesProvider):
def __init__(
self,
filename: str,
timeframe_tag: Timeframes,
scheme: TimeframeDumpScheme = TimeframeDumpScheme()
):
# scheme specifies indexes to use for fetching candle' open time and OHLC info
self._scheme = scheme
self._data = pd.read_csv(
filename,
sep=';',
parse_dates=[scheme.open_time_idx, scheme.close_time_idx]
)
self._gen = None
super().__init__(timeframe_tag)
def current_date(self):
if not self._start_date:
return None
ticks = self.get_ticks()
time_passed = datetime.timedelta(
seconds=ticks*self.candle_duration())
return self._start_date + time_passed
def next(self) -> None:
if not self._gen:
self._gen = iter(self._data.iterrows())
_, row = next(self._gen)
open_time = row[self._scheme.open_time_idx]
if self._start_date:
# skip rows until desired date
while open_time < self._start_date:
_, row = next(self._gen)
open_time = row[self._scheme.open_time_idx]
self._candle_buffer.open_time = open_time
to_candle(row, scheme, self._candle_buffer)
self._tick_counter.increment()
| 30.660377 | 87 | 0.606769 |
cf2610e897d3ed060d971132dfa54b8cc97e7eb2 | 23,401 | py | Python | bansync/bansync.py | jack1142/SinbadCogs-1 | e0f24c0dbc3f845aa7a37ca96d00ee59494911ca | [
"BSD-Source-Code"
] | null | null | null | bansync/bansync.py | jack1142/SinbadCogs-1 | e0f24c0dbc3f845aa7a37ca96d00ee59494911ca | [
"BSD-Source-Code"
] | null | null | null | bansync/bansync.py | jack1142/SinbadCogs-1 | e0f24c0dbc3f845aa7a37ca96d00ee59494911ca | [
"BSD-Source-Code"
] | null | null | null | from __future__ import annotations
import asyncio
import io
import json
import logging
from datetime import datetime
from typing import (
AsyncIterator,
Collection,
Dict,
Generator,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
import discord
from discord.ext.commands import Greedy
from redbot.core import checks, commands
from redbot.core.bot import Red
from redbot.core.config import Config
from redbot.core.data_manager import cog_data_path
from redbot.core.modlog import create_case
from redbot.core.utils.chat_formatting import box, pagify
from .converters import MentionOrID, ParserError, SyndicatedConverter
GuildList = List[discord.Guild]
GuildSet = Set[discord.Guild]
UserLike = Union[discord.Member, discord.User]
def mock_user(idx: int) -> UserLike:
return cast(discord.User, discord.Object(id=idx))
class AddOnceHandler(logging.FileHandler):
"""
Red's hot reload logic will break my logging if I don't do this.
"""
log = logging.getLogger("red.sinbadcogs.bansync")
for handler in log.handlers:
# Red hotreload shit.... can't use isinstance, need to check not already added.
if handler.__class__.__name__ == "AddOnceHandler":
break
else:
fp = cog_data_path(raw_name="BanSync") / "unhandled_exceptions.log"
handler = AddOnceHandler(fp)
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
style="%",
)
handler.setFormatter(formatter)
log.addHandler(handler)
class BanSync(commands.Cog):
"""
Synchronize your bans.
"""
__version__ = "340.0.0"
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
def __init__(self, bot: Red, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot: Red = bot
self.config = Config.get_conf(self, identifier=78631113035100160)
self.config.register_global(
excluded_from_automatic=[],
per_request_base_ratelimit=0.02, # Approximately accurate.
fractional_usage=0.5,
opted_into_automatic=[],
)
@commands.group()
async def bansyncset(self, ctx: commands.Context):
"""
Options for bansync
"""
pass
@checks.guildowner_or_permissions(administrator=True)
@commands.guild_only()
@bansyncset.command()
async def automaticoptout(self, ctx: commands.GuildContext):
"""
This allows you to opt a server out of being selected for some actions
The current things it will prevent:
mjolnir|globalban
bansync with automatic destinations
syndicatebans with automatic destinations
Things it will not prevent:
bansync with an explicit choice to include the server.
syndicatebans with explicit destinations
The default (as of April 29, 2020) is being opted out.
No settings for the prior version were re-purposed, and will require a new opt-in.
This is due to a specific question in the application process for verified bots
and ensuring that this cog can be used on those bots.
"""
async with self.config.opted_into_automatic() as opts:
if ctx.guild.id not in opts:
return await ctx.send(
"This server is not currently opted into automatic actions."
)
opts.remove(ctx.guild.id)
await ctx.tick()
@checks.guildowner_or_permissions(administrator=True)
@commands.guild_only()
@bansyncset.command()
async def automaticoptin(self, ctx: commands.GuildContext):
"""
This allows you to opt into certain automatic actions.
The current things it will opt into:
mjolnir|globalban
bansync with automatic destinations
syndicatebans with automatic destinations
Things which do not require an opt-in:
bansync with an explicit choice to include the server.
syndicatebans with explicit destinations
The default (as of April 29, 2020) is being opted out.
No settings for the prior version were re-purposed, and will require a new opt-in.
This is due to a specific question in the application process for verified bots
and ensuring that this cog can be used on those bots.
"""
async with self.config.opted_into_automatic() as opts:
if ctx.guild.id in opts:
return await ctx.send(
"This server has already opted into automatic actions."
)
opts.append(ctx.guild.id)
await ctx.tick()
@commands.bot_has_permissions(ban_members=True, attach_files=True)
@checks.admin_or_permissions(ban_members=True)
@commands.guild_only()
@commands.command(name="exportbans")
async def exportbans(self, ctx: commands.GuildContext):
"""
Exports current servers bans to json
"""
bans = await ctx.guild.bans()
data = [b.user.id for b in bans]
to_file = json.dumps(data).encode()
fp = io.BytesIO(to_file)
fp.seek(0)
filename = f"{ctx.message.id}-bans.json"
try:
await ctx.send(
ctx.author.mention, files=[discord.File(fp, filename=filename)]
)
except discord.HTTPException:
await ctx.send(
(
"You have a very interesting ban list to be too large to send, open an issue."
)
)
@commands.bot_has_permissions(ban_members=True)
@checks.admin_or_permissions(ban_members=True)
@commands.guild_only()
@commands.command(name="importbans")
async def importbans(self, ctx: commands.GuildContext):
"""
Imports bans from json
"""
if not ctx.message.attachments:
return await ctx.send(
"You definitely need to supply me an exported ban list to be imported."
)
fp = io.BytesIO()
a = ctx.message.attachments[0]
await a.save(fp)
try:
data = json.load(fp)
if not isinstance(data, list) or not all(isinstance(x, int) for x in data):
raise TypeError() from None
except (json.JSONDecodeError, TypeError):
return await ctx.send("That wasn't an exported ban list")
current_bans = await ctx.guild.bans()
to_ban = set(data) - {b.user.id for b in current_bans}
if not to_ban:
return await ctx.send(
"That list doesn't contain anybody not already banned."
)
async with ctx.typing():
exit_codes = [
await self.ban_or_hackban(
ctx.guild,
idx,
mod=ctx.author,
reason=f"Imported ban by {ctx.author}({ctx.author.id})",
)
for idx in to_ban
]
if all(exit_codes):
await ctx.message.add_reaction("\N{HAMMER}")
elif not any(exit_codes):
await ctx.send("You are not worthy")
else:
await ctx.send(
"I got some of those, but other's couldn't be banned for some reason."
)
@commands.command(name="bulkban")
async def bulkban(self, ctx: commands.Context, *ids: int):
"""
bulk global bans by id
"""
rsn = f"Global ban authorized by {ctx.author}({ctx.author.id})"
async with ctx.typing():
results = {i: await self.targeted_global_ban(ctx, i, rsn) for i in set(ids)}
if all(results.values()):
await ctx.message.add_reaction("\N{HAMMER}")
elif not any(results.values()):
await ctx.send("You are not worthy")
else:
await ctx.send(
"I got some of those, but other's couldn't be banned for some reason."
)
async def can_sync(self, guild: discord.Guild, mod: UserLike) -> bool:
"""
Determines if the specified user should
be considered allowed to sync bans to the specified guild
Parameters
----------
guild: discord.Guild
mod: discord.User
Returns
-------
bool
Whether the user is considered to be allowed to sync bans to the specified guild
"""
if not guild.me.guild_permissions.ban_members:
return False
mod_member = guild.get_member(mod.id)
if mod_member:
if mod_member.guild_permissions.ban_members:
return True
if await self.bot.is_admin(mod_member):
return True
if await self.bot.is_owner(mod):
return True
return False
async def ban_filter(
self, guild: discord.Guild, mod: UserLike, target: discord.abc.Snowflake
) -> bool:
"""
Determines if the specified user can ban another specified user in a guild
Parameters
----------
guild: discord.Guild
mod: discord.User
target: discord.User
Returns
-------
bool
"""
# TODO: rewrite more maintainibly.
is_owner: bool = await self.bot.is_owner(mod)
mod_member = guild.get_member(mod.id)
if mod_member is None and not is_owner:
return False
# noted below lines contain a superflous condition covered above to help mypy
can_ban: bool = guild.me.guild_permissions.ban_members
if not is_owner and mod_member is not None: # note
can_ban &= mod_member.guild_permissions.ban_members
target_m = guild.get_member(target.id)
if target_m is not None and mod_member is not None: # note
can_ban &= guild.me.top_role > target_m.top_role or guild.me == guild.owner
can_ban &= target_m != guild.owner
if not is_owner:
can_ban &= (
mod_member.top_role > target_m.top_role or mod_member == guild.owner
)
return can_ban
async def ban_or_hackban(
self,
guild: discord.Guild,
_id: int,
*,
mod: UserLike,
reason: Optional[str] = None,
) -> bool:
"""
Attempts to ban a user in a guild, supressing errors and just returning a success or fail
Parameters
----------
guild: discord.Guild
_id: int
mod: discord.User
reason: :obj:`str`, optional
Returns
-------
bool
Whether the ban was successful
"""
member: Optional[UserLike] = guild.get_member(_id)
reason = reason or "Ban synchronization"
if member is None:
member = mock_user(_id)
if not await self.ban_filter(guild, mod, member):
return False
try:
await guild.ban(member, reason=reason, delete_message_days=0)
except discord.HTTPException:
return False
else:
await create_case(
self.bot, guild, datetime.utcnow(), "ban", member, mod, reason
)
return True
async def guild_discovery(
self,
ctx: commands.Context,
*,
excluded: Optional[Collection[discord.Guild]] = None,
considered: Optional[Collection[discord.Guild]] = None,
) -> AsyncIterator[discord.Guild]:
"""
Fetches guilds which can be considered for synchronization in the current context (lazily)
Parameters
----------
ctx: commands.Context
excluded: Set[discord.Guild]
considered: Set[discord.Guild
Yields
-------
discord.Guild
The next guild for use
"""
considered, excluded = considered or (), excluded or ()
for g in sorted(considered, key=lambda s: s.name):
if g not in excluded and await self.can_sync(g, ctx.author):
yield g
async def interactive(self, ctx: commands.Context, excluded: GuildSet):
guilds = [
g
async for g in self.guild_discovery(
ctx, excluded=excluded, considered=self.bot.guilds
)
]
if not guilds:
return -1
output = "\n".join(
(
*(f"{i}: {guild.name}" for i, guild in enumerate(guilds, 1)),
(
"Select a server to add to the sync list by number, "
"or -1 to stop adding servers"
),
)
)
page_gen = cast(Generator[str, None, None], pagify(output, delims=["\n"]))
try:
for page in page_gen:
await ctx.send(box(page))
finally:
page_gen.close()
def pred(m):
return m.channel == ctx.channel and m.author == ctx.author
try:
message = await self.bot.wait_for("message", check=pred, timeout=60)
except asyncio.TimeoutError:
return -2
else:
try:
message = int(message.content.strip())
if message == -1:
return -1
return guilds[message - 1]
except (ValueError, IndexError):
await ctx.send("That wasn't a valid choice")
return None
async def process_sync(
self,
*,
usr: discord.User,
sources: GuildSet,
dests: GuildSet,
auto: bool = False,
shred_ratelimits: bool = False,
) -> None:
"""
Processes a synchronization of bans
Parameters
----------
usr: discord.User
The user who authorized the synchronization
sources: Set[discord.Guild]
The guilds to sync from
dests: Set[discord.Guild]
The guilds to sync to
auto: bool
defaults as false, if provided destinations are augmented by the set of guilds which
are not a source.
shred_ratelimits: bool
defaults false, allows for bypassing anti-choke measures.
"""
bans: Dict[int, Set[discord.User]] = {}
banlist: Set[discord.User] = set()
if auto:
opt_ins: List[int] = await self.config.opted_into_automatic()
dests = {g for g in dests if g.id in opt_ins}
guilds: GuildSet = sources | dests
for guild in guilds:
bans[guild.id] = set()
try:
g_bans = {x.user for x in await guild.bans()}
except discord.HTTPException as exc:
log.exception(
"Unhandled exception during ban synchronization", exc_info=exc
)
else:
bans[guild.id].update(g_bans)
if guild in sources:
banlist.update(g_bans)
tasks = []
if shred_ratelimits and await self.bot.is_owner(usr):
artificial_delay = 0.0
else:
base_limt: float = await self.config.per_request_base_ratelimit()
fractional_usage: float = await self.config.fractional_usage()
artificial_delay = (base_limt / fractional_usage) * len(dests)
for guild in dests:
to_ban = banlist - bans[guild.id]
tasks.append(
self.do_bans_for_guild(
guild=guild,
mod=usr,
targets=to_ban,
artificial_delay=artificial_delay,
)
)
await asyncio.gather(*tasks)
async def do_bans_for_guild(
self,
*,
guild: discord.Guild,
mod: discord.User,
targets: Set[discord.User],
artificial_delay: float,
):
"""
This exists to speed up large syncs and consume ratelimits concurrently
"""
count = 0
for target in targets:
if await self.ban_filter(guild, mod, target):
await self.ban_or_hackban(
guild, target.id, mod=mod, reason="Ban synchronization"
)
if artificial_delay:
await asyncio.sleep(artificial_delay)
else:
count += 1
if count % 10:
# This is a safety measure to run infrequently
# but only if no per ban delay is there already.
await asyncio.sleep(0.1)
@commands.command(name="bansync")
async def ban_sync(self, ctx, auto=False):
"""
syncs bans across servers
"""
guilds: GuildSet = set()
if not auto:
while True:
s = await self.interactive(ctx, guilds)
if s == -1:
break
if s == -2:
return await ctx.send("You took too long, try again later")
if s is not None:
guilds.add(s)
elif auto is True:
opt_ins = await self.config.opted_into_automatic()
guilds = {
g
for g in self.bot.guilds
if g.id in opt_ins and await self.can_sync(g, ctx.author)
}
if len(guilds) < 2:
return await ctx.send("I need at least two servers to sync")
async with ctx.typing():
await self.process_sync(usr=ctx.author, sources=guilds, dests=guilds)
await ctx.tick()
@checks.is_owner()
@commands.command(name="syndicatebans")
async def syndicated_bansync(self, ctx, *, query: SyndicatedConverter):
"""
Push bans from one or more servers to one or more others.
This is not bi-directional, use `[p]bansync` for that.
Usage:
`[p]syndicatebans --sources id(s) [--destinations id(s) | --auto-destinations]`
"""
async with ctx.typing():
kwargs = query.to_dict()
await self.process_sync(**kwargs)
await ctx.tick()
@syndicated_bansync.error
async def syndicated_converter_handler(
self, ctx, wrapped_error: commands.CommandError
):
"""
Parameters
----------
ctx: commands.Context
wrapped_error: commmands.CommandError
"""
error = getattr(wrapped_error, "original", wrapped_error)
if isinstance(error, ParserError):
if error.args:
await ctx.send(error.args[0])
else:
await ctx.bot.on_command_error(ctx, wrapped_error, unhandled_by_cog=True)
@commands.command(name="mjolnir", aliases=["globalban"])
async def mjolnir(self, ctx, users: Greedy[MentionOrID], *, reason: str = ""):
"""
Swing the heaviest of ban hammers
"""
async with ctx.typing():
banned = [
await self.targeted_global_ban(ctx, user.id, rsn=reason)
for user in users
]
if any(banned):
await ctx.message.add_reaction("\N{HAMMER}")
else:
await ctx.send("You are not worthy")
@commands.command()
async def unglobalban(self, ctx, users: Greedy[MentionOrID], *, reason: str = ""):
"""
To issue forgiveness.
Or to fix a fuckup.
"""
async def unban(
guild: discord.Guild, *user_ids: int, rsn: Optional[str] = None
) -> Tuple[discord.Guild, Set[int]]:
failures = set()
it = iter(user_ids)
for user_id in it:
try:
await guild.unban(discord.Object(id=user_id), reason=rsn)
except discord.NotFound:
pass # Not banned, don't count this as a failure
except discord.Forbidden:
failures.add(user_id)
break # This can happen due to 2FA or a permission cache issue
except discord.HTTPException as exc:
log.exception(
"Details of failed ban for user id %d in guild with id %d",
user_id,
guild.id,
exc_info=exc,
)
break
for skipped_by_break in it:
failures.add(skipped_by_break)
return guild, failures
to_consider: GuildSet = {
g
for g in self.bot.guilds
if g.id in await self.config.opted_into_automatic()
}
guilds = [
g
async for g in self.guild_discovery(
ctx, excluded=None, considered=to_consider
)
]
if not guilds:
return await ctx.send(
"No guilds are currently opted into automatic actions "
"(Manual unbans or opt-ins required)"
)
uids = [u.id for u in users]
tasks = [unban(guild, *uids, rsn=(reason or None)) for guild in guilds]
async with ctx.typing():
results = await asyncio.gather(*tasks)
body = "\n\n".join(
f"Unsuccessful unbans (by user ID) in guild: "
f"{guild.name}({guild.id}):\n{', '.join(map(str, fails))}"
for (guild, fails) in results
if fails
)
if not body:
return await ctx.tick()
message = (
f"Some unbans were unsuccesful, see below for a list of failures.\n\n{body}"
)
page_gen = cast(Generator[str, None, None], pagify(message))
try:
for page in page_gen:
await ctx.send(page)
finally:
page_gen.close()
async def targeted_global_ban(
self,
ctx: commands.Context,
user: Union[discord.Member, int],
rsn: Optional[str] = None,
) -> bool:
"""
Bans a user everywhere the current moderator is allowed to,
except the exclusions in config
Parameters
----------
ctx: commands.Context
context the ban was issued from.
user: Union[discord.Member, int]
the target of the ban
rsn: :obj:`str`, optional
the reason to pass to discord for the ban.
Returns
-------
bool
Whether the user was banned from at least 1 guild by this action.
"""
# passing an empty string reason to the gateway is bad.
rsn = rsn or None
_id: int = getattr(user, "id", user)
opt_ins: GuildSet = {
g
for g in self.bot.guilds
if g.id in await self.config.opted_into_automatic()
}
exit_codes = [
await self.ban_or_hackban(guild, _id, mod=ctx.author, reason=rsn)
async for guild in self.guild_discovery(ctx, considered=opt_ins)
]
return any(exit_codes)
| 31.665765 | 98 | 0.559421 |
dae43a42eb15816cf6cd611868e1746cbb365fae | 42 | py | Python | pdip/integrator/connection/types/sql/dialects/oracle/__init__.py | ahmetcagriakca/pdip | c4c16d5666a740154cabdc6762cd44d98b7bdde8 | [
"MIT"
] | 2 | 2021-12-09T21:07:46.000Z | 2021-12-11T22:18:01.000Z | pdip/integrator/connection/types/sql/dialects/oracle/__init__.py | PythonDataIntegrator/pdip | c4c16d5666a740154cabdc6762cd44d98b7bdde8 | [
"MIT"
] | null | null | null | pdip/integrator/connection/types/sql/dialects/oracle/__init__.py | PythonDataIntegrator/pdip | c4c16d5666a740154cabdc6762cd44d98b7bdde8 | [
"MIT"
] | 3 | 2021-11-15T00:47:00.000Z | 2021-12-17T11:35:45.000Z | from .oracle_dialect import OracleDialect
| 21 | 41 | 0.880952 |
381b50cf2a4cabf03c93428f21fb58bb95b5316e | 1,135 | py | Python | classgrade/gradapp/migrations/0004_auto_20160921_2150.py | classgrade/classgrade | 144dcfc9579e6858ff4aa79835c76b9611ed73b2 | [
"MIT"
] | 5 | 2016-11-15T17:46:27.000Z | 2022-01-10T08:06:17.000Z | classgrade/gradapp/migrations/0004_auto_20160921_2150.py | classgrade/classgrade | 144dcfc9579e6858ff4aa79835c76b9611ed73b2 | [
"MIT"
] | 21 | 2016-11-07T14:58:22.000Z | 2021-02-02T21:41:12.000Z | classgrade/gradapp/migrations/0004_auto_20160921_2150.py | classgrade/classgrade | 144dcfc9579e6858ff4aa79835c76b9611ed73b2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-21 21:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gradapp', '0003_assignmentype_title'),
]
operations = [
migrations.RemoveField(
model_name='assignmentype',
name='list_student',
),
migrations.AddField(
model_name='assignmentype',
name='list_students',
field=models.FileField(blank=True, null=True, upload_to=''),
),
migrations.AlterField(
model_name='assignment',
name='document',
field=models.FileField(blank=True, null=True, upload_to=''),
),
migrations.AlterField(
model_name='assignmentype',
name='file_type',
field=models.CharField(default='ipynb', max_length=20),
),
migrations.AlterField(
model_name='assignmentype',
name='title',
field=models.CharField(default='', max_length=100),
),
]
| 28.375 | 72 | 0.577093 |
0fdadc5b5bfe8c596acdef7379a4f49c6522a956 | 4,441 | py | Python | dependencies/pystache/pystache/loader.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 21 | 2015-01-16T05:10:02.000Z | 2021-06-11T20:48:15.000Z | dependencies/pystache/pystache/loader.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 1 | 2019-09-09T12:10:27.000Z | 2020-05-22T10:12:14.000Z | dependencies/pystache/pystache/loader.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 2 | 2015-05-03T04:51:08.000Z | 2018-08-24T08:28:53.000Z | # coding: utf-8
"""
This module provides a Loader class for locating and reading templates.
"""
import os
import sys
from pystache import common
from pystache import defaults
from pystache.locator import Locator
# We make a function so that the current defaults take effect.
# TODO: revisit whether this is necessary.
def _make_to_unicode():
def to_unicode(s, encoding=None):
"""
Raises a TypeError exception if the given string is already unicode.
"""
if encoding is None:
encoding = defaults.STRING_ENCODING
return unicode(s, encoding, defaults.DECODE_ERRORS)
return to_unicode
class Loader(object):
"""
Loads the template associated to a name or user-defined object.
"""
def __init__(self, file_encoding=None, extension=None, to_unicode=None,
search_dirs=None):
"""
Construct a template loader instance.
Arguments:
extension: the template file extension. Pass False for no
extension (i.e. to use extensionless template files).
Defaults to the package default.
file_encoding: the name of the encoding to use when converting file
contents to unicode. Defaults to the package default.
search_dirs: the list of directories in which to search when loading
a template by name or file name. Defaults to the package default.
to_unicode: the function to use when converting strings of type
str to unicode. The function should have the signature:
to_unicode(s, encoding=None)
It should accept a string of type str and an optional encoding
name and return a string of type unicode. Defaults to calling
Python's built-in function unicode() using the package string
encoding and decode errors defaults.
"""
if extension is None:
extension = defaults.TEMPLATE_EXTENSION
if file_encoding is None:
file_encoding = defaults.FILE_ENCODING
if search_dirs is None:
search_dirs = defaults.SEARCH_DIRS
if to_unicode is None:
to_unicode = _make_to_unicode()
self.extension = extension
self.file_encoding = file_encoding
# TODO: unit test setting this attribute.
self.search_dirs = search_dirs
self.to_unicode = to_unicode
def _make_locator(self):
return Locator(extension=self.extension)
def unicode(self, s, encoding=None):
"""
Convert a string to unicode using the given encoding, and return it.
This function uses the underlying to_unicode attribute.
Arguments:
s: a basestring instance to convert to unicode. Unlike Python's
built-in unicode() function, it is okay to pass unicode strings
to this function. (Passing a unicode string to Python's unicode()
with the encoding argument throws the error, "TypeError: decoding
Unicode is not supported.")
encoding: the encoding to pass to the to_unicode attribute.
Defaults to None.
"""
if isinstance(s, unicode):
return unicode(s)
return self.to_unicode(s, encoding)
def read(self, path, encoding=None):
"""
Read the template at the given path, and return it as a unicode string.
"""
b = common.read(path)
if encoding is None:
encoding = self.file_encoding
return self.unicode(b, encoding)
# TODO: unit-test this method.
def load_name(self, name):
"""
Find and return the template with the given name.
Arguments:
name: the name of the template.
search_dirs: the list of directories in which to search.
"""
locator = self._make_locator()
path = locator.find_name(name, self.search_dirs)
return self.read(path)
# TODO: unit-test this method.
def load_object(self, obj):
"""
Find and return the template associated to the given object.
Arguments:
obj: an instance of a user-defined class.
search_dirs: the list of directories in which to search.
"""
locator = self._make_locator()
path = locator.find_object(obj, self.search_dirs)
return self.read(path)
| 28.286624 | 79 | 0.635217 |
958e682d3c5e0e5b185d17731f0bf798d4a38e12 | 682 | py | Python | kyu6/tests/test_autocomplete.py | juanshishido/codewars | d50d4ac07ddcc00fa2f7c367ec39cdb5e274d8da | [
"MIT"
] | null | null | null | kyu6/tests/test_autocomplete.py | juanshishido/codewars | d50d4ac07ddcc00fa2f7c367ec39cdb5e274d8da | [
"MIT"
] | null | null | null | kyu6/tests/test_autocomplete.py | juanshishido/codewars | d50d4ac07ddcc00fa2f7c367ec39cdb5e274d8da | [
"MIT"
] | null | null | null | import unittest
from kyu6.autocomplete import autocomplete
class TestAutocomplete(unittest.TestCase):
dictionary = ['abnormal', 'arm-wrestling', 'absolute', 'airplane',
'airport', 'amazing', 'apple', 'ball' ]
def test_ai(self):
self.assertEquals(['airplane', 'airport'],
autocomplete('ai', self.dictionary))
def test_a(self):
self.assertEquals(['abnormal', 'arm-wrestling',
'absolute', 'airplane', 'airport'],
autocomplete('a', self.dictionary))
def test_nonalpha(self):
self.assertEquals(['ball'], autocomplete('b$%^', self.dictionary))
| 31 | 74 | 0.582111 |
ad75c375d5ac63e18266769c3c87c3959c776c9b | 858 | py | Python | Server/Server/Captcha/Util.py | mythsman/CatpchaRecognition | 445985cd4df6147657a3b52ae38552016a771c7a | [
"MIT"
] | 7 | 2017-03-02T09:08:37.000Z | 2022-02-26T22:14:59.000Z | Server/Server/Captcha/Util.py | mythsman/CatpchaRecognition | 445985cd4df6147657a3b52ae38552016a771c7a | [
"MIT"
] | null | null | null | Server/Server/Captcha/Util.py | mythsman/CatpchaRecognition | 445985cd4df6147657a3b52ae38552016a771c7a | [
"MIT"
] | null | null | null | '''
Created on Jul 16, 2016
@author: myths
'''
import matplotlib.pyplot as plt
import cv2, numpy as np
def showImage(img):
plt.imshow(img)
plt.show()
def otsu(img):
_, im = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return im
def resize(img, width, height):
return cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
def bersen(img, blockSize=11, c=2):
return cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize, c)
def opening(img, kernelSize=2):
kernel = np.ones((kernelSize, kernelSize), np.uint8)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
return opening
def closing(img, kernelSize=2):
kernel = np.ones((kernelSize, kernelSize), np.uint8)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
return closing | 26.8125 | 107 | 0.710956 |
026bcf7f72396bfd75c236f0a18927b6f512b537 | 1,038 | py | Python | Dangerous/Golismero/tools/sqlmap/plugins/dbms/sybase/takeover.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | null | null | null | Dangerous/Golismero/tools/sqlmap/plugins/dbms/sybase/takeover.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | null | null | null | Dangerous/Golismero/tools/sqlmap/plugins/dbms/sybase/takeover.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | 1 | 2018-07-04T18:35:16.000Z | 2018-07-04T18:35:16.000Z | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.takeover import Takeover as GenericTakeover
class Takeover(GenericTakeover):
def __init__(self):
GenericTakeover.__init__(self)
def osCmd(self):
errMsg = "on Sybase it is not possible to execute commands"
raise SqlmapUnsupportedFeatureException(errMsg)
def osShell(self):
errMsg = "on Sybase it is not possible to execute commands"
raise SqlmapUnsupportedFeatureException(errMsg)
def osPwn(self):
errMsg = "on Sybase it is not possible to establish an "
errMsg += "out-of-band connection"
raise SqlmapUnsupportedFeatureException(errMsg)
def osSmb(self):
errMsg = "on Sybase it is not possible to establish an "
errMsg += "out-of-band connection"
raise SqlmapUnsupportedFeatureException(errMsg)
| 32.4375 | 67 | 0.712909 |
5daca6e3bd1c44a4f22e0d4b5ca5098e546032c5 | 798 | py | Python | backend/app.py | obdura/Connecting-React-Frontend-to-a-Flask-Backend | 9287fdf9a82b7238f65e46335b1adf2d05cfcd41 | [
"MIT"
] | 10 | 2021-09-03T06:34:10.000Z | 2022-03-16T05:30:40.000Z | backend/app.py | obdura/Connecting-React-Frontend-to-a-Flask-Backend | 9287fdf9a82b7238f65e46335b1adf2d05cfcd41 | [
"MIT"
] | null | null | null | backend/app.py | obdura/Connecting-React-Frontend-to-a-Flask-Backend | 9287fdf9a82b7238f65e46335b1adf2d05cfcd41 | [
"MIT"
] | 10 | 2021-09-03T06:34:19.000Z | 2022-03-31T12:56:20.000Z | # Import the required libraries
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_marshmallow import Marshmallow
from flask_cors import CORS
# Create various application instances
# Order matters: Initialize SQLAlchemy before Marshmallow
db = SQLAlchemy()
migrate = Migrate()
ma = Marshmallow()
cors = CORS()
def create_app():
"""Application-factory pattern"""
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///database.db"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Initialize extensions
# To use the application instances above, instantiate with an application:
db.init_app(app)
migrate.init_app(app, db)
ma.init_app(app)
cors.init_app(app)
return app
| 25.741935 | 78 | 0.75188 |
bbeff265b8d713035b422ad7c5663caaa24e1a95 | 404 | py | Python | sdk/python/pulumi_google_native/storage/__init__.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/storage/__init__.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/storage/__init__.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_google_native.storage.v1 as __v1
v1 = __v1
else:
v1 = _utilities.lazy_import('pulumi_google_native.storage.v1')
| 26.933333 | 80 | 0.730198 |
66606be3dc63805a74d475a2db8d5e4a91c76863 | 4,360 | py | Python | lib/galaxy_test/api/test_groups.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 1 | 2021-05-18T02:20:43.000Z | 2021-05-18T02:20:43.000Z | lib/galaxy_test/api/test_groups.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy_test/api/test_groups.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | null | null | null | import json
from galaxy_test.base.populators import DatasetPopulator
from ._framework import ApiTestCase
class GroupsApiTestCase(ApiTestCase):
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
def test_create_valid(self, group_name: str = None):
payload = self._build_valid_group_payload(group_name)
response = self._post("groups", payload, admin=True, json=True)
self._assert_status_code_is(response, 200)
group = response.json()[0] # POST /api/groups returns a list
self._assert_valid_group(group)
return group
def test_create_only_admin(self):
response = self._post("groups", json=True)
self._assert_status_code_is(response, 403)
def test_create_invalid_params_raises_400(self):
payload = self._build_valid_group_payload()
payload["name"] = None
response = self._post("groups", payload, admin=True, json=True)
self._assert_status_code_is(response, 400)
def test_create_duplicated_name_raises_409(self):
payload = self._build_valid_group_payload()
response = self._post("groups", payload, admin=True, json=True)
self._assert_status_code_is(response, 200)
response = self._post("groups", payload, admin=True, json=True)
self._assert_status_code_is(response, 409)
def test_index(self):
self.test_create_valid()
response = self._get("groups", admin=True)
self._assert_status_code_is(response, 200)
groups = response.json()
assert isinstance(groups, list)
assert len(groups) > 0
for group in groups:
self._assert_valid_group(group)
def test_index_only_admin(self):
response = self._get("groups")
self._assert_status_code_is(response, 403)
def test_show(self):
group = self.test_create_valid()
group_id = group["id"]
response = self._get(f"groups/{group_id}", admin=True)
self._assert_status_code_is(response, 200)
response_group = response.json()
self._assert_valid_group(response_group)
self._assert_has_keys(response_group, "users_url", "roles_url")
def test_show_only_admin(self):
group = self.test_create_valid()
group_id = group["id"]
response = self._get(f"groups/{group_id}")
self._assert_status_code_is(response, 403)
def test_show_unknown_raises_400(self):
group_id = "invalid-group-id"
response = self._get(f"groups/{group_id}", admin=True)
self._assert_status_code_is(response, 400)
def test_update(self):
group = self.test_create_valid(group_name="group-test")
group_id = group["id"]
updated_name = "group-test-updated"
update_payload = json.dumps({
"name": updated_name,
})
update_response = self._put(f"groups/{group_id}", data=update_payload, admin=True)
self._assert_status_code_is_ok(update_response)
def test_update_only_admin(self):
group = self.test_create_valid()
group_id = group["id"]
response = self._put(f"groups/{group_id}")
self._assert_status_code_is(response, 403)
def test_update_duplicating_name_raises_409(self):
group_a = self.test_create_valid()
group_b = self.test_create_valid()
# Update group_b with the same name as group_a
group_b_id = group_b["id"]
updated_name = group_a["name"]
update_payload = json.dumps({
"name": updated_name,
})
update_response = self._put(f"groups/{group_b_id}", data=update_payload, admin=True)
self._assert_status_code_is(update_response, 409)
def _assert_valid_group(self, group, assert_id=None):
self._assert_has_keys(group, "id", "name", "model_class", "url")
if assert_id is not None:
assert group["id"] == assert_id
def _build_valid_group_payload(self, name: str = None):
name = name or self.dataset_populator.get_random_name()
user_id = self.dataset_populator.user_id()
role_id = self.dataset_populator.user_private_role_id()
payload = {
"name": name,
"user_ids": [user_id],
"role_ids": [role_id],
}
return payload
| 36.949153 | 92 | 0.662156 |
4c0a51b9f0547b54fef80a0add8076eec0045f8f | 5,061 | py | Python | untypy/impl/generator.py | CodeSteak/untypy | b24c7e1a6e41cc86663e2f546508b21f8cbd719a | [
"MIT"
] | 1 | 2021-09-14T15:06:24.000Z | 2021-09-14T15:06:24.000Z | untypy/impl/generator.py | CodeSteak/untypy | b24c7e1a6e41cc86663e2f546508b21f8cbd719a | [
"MIT"
] | null | null | null | untypy/impl/generator.py | CodeSteak/untypy | b24c7e1a6e41cc86663e2f546508b21f8cbd719a | [
"MIT"
] | 1 | 2021-09-14T15:06:29.000Z | 2021-09-14T15:06:29.000Z | import collections.abc
import inspect
import sys
from collections import Generator
from typing import Any, Optional
from typing import Generator as OtherGenerator
from untypy.error import UntypyTypeError, UntypyAttributeError, Location
from untypy.interfaces import TypeChecker, TypeCheckerFactory, CreationContext, ExecutionContext
from untypy.util import CompoundTypeExecutionContext, NoResponsabilityWrapper
GeneratorTypeA = type(Generator[None, None, None])
GeneratorTypeB = type(OtherGenerator[None, None, None])
class GeneratorFactory(TypeCheckerFactory):
def create_from(self, annotation: Any, ctx: CreationContext) -> Optional[TypeChecker]:
if type(annotation) in [GeneratorTypeA, GeneratorTypeB] and annotation.__origin__ == collections.abc.Generator:
if len(annotation.__args__) != 3:
raise ctx.wrap(UntypyAttributeError(f"Expected 3 type arguments for Generator."))
(yield_checker, send_checker, return_checker) = list(
map(lambda a: ctx.find_checker(a), annotation.__args__))
if yield_checker is None:
raise ctx.wrap(UntypyAttributeError(f"The Yield Annotation of the Generator could not be resolved."))
if send_checker is None:
raise ctx.wrap(UntypyAttributeError(f"The Send Annotation of the Generator could not be resolved."))
if return_checker is None:
raise ctx.wrap(UntypyAttributeError(f"The Return Annotation of the Generator could not be resolved."))
return GeneratorChecker(yield_checker, send_checker, return_checker)
else:
return None
class GeneratorChecker(TypeChecker):
yield_checker: TypeChecker
send_checker: TypeChecker
return_checker: TypeChecker
def __init__(self, yield_checker: TypeChecker, send_checker: TypeChecker, return_checker: TypeChecker):
self.yield_checker = yield_checker
self.send_checker = send_checker
self.return_checker = return_checker
def may_be_wrapped(self) -> bool:
return True
def check_and_wrap(self, arg: Any, ctx: ExecutionContext) -> Any:
if not inspect.isgenerator(arg):
raise ctx.wrap(UntypyTypeError(arg, self.describe()))
me = self
yield_ctx = TypedGeneratorYieldReturnContext(arg, self, True, ctx)
return_ctx = TypedGeneratorYieldReturnContext(arg, self, False, ctx)
def wrapped():
try:
sent = None
while True:
value_yield = arg.send(sent)
# check value_yield (arg is responsable)
value_yield = me.yield_checker.check_and_wrap(value_yield, yield_ctx)
sent = yield value_yield
caller = sys._getframe(1)
# check sent value (caller is responsable)
sent = me.send_checker.check_and_wrap(sent, TypedGeneratorSendContext(caller, me, ctx))
except StopIteration as e:
# check value_returned (arg is responsable)
ret = me.return_checker.check_and_wrap(e.value, return_ctx)
return ret
return wrapped()
def describe(self) -> str:
return f"Generator[{self.yield_checker.describe()}, {self.send_checker.describe()}, {self.return_checker.describe()}]"
def base_type(self) -> Any:
return [GeneratorType]
class TypedGeneratorYieldReturnContext(CompoundTypeExecutionContext):
generator: Generator[Any, Any, Any]
def __init__(self, generator: Generator[Any, Any, Any], checker: GeneratorChecker, is_yield: bool,
upper: ExecutionContext):
self.generator = generator
# index in checkers list
if is_yield:
idx = 0
else:
idx = 2
super().__init__(upper, [checker.yield_checker, checker.send_checker, checker.return_checker], idx)
def name(self) -> str:
return "Generator"
def responsable(self) -> Optional[Location]:
try:
if hasattr(self.generator, 'gi_frame'):
return Location(
file=inspect.getfile(self.generator.gi_frame),
line_no=inspect.getsourcelines(self.generator.gi_frame)[1],
source_line="\n".join(inspect.getsourcelines(self.generator.gi_frame)[0]),
)
except OSError: # this call does not work all the time
pass
except TypeError:
pass
return None
class TypedGeneratorSendContext(CompoundTypeExecutionContext):
def __init__(self, stack: inspect.FrameInfo, checker: GeneratorChecker, upper: ExecutionContext):
self.stack = stack
super().__init__(NoResponsabilityWrapper(upper),
[checker.yield_checker, checker.send_checker, checker.return_checker], 1)
def name(self) -> str:
return "Generator"
def responsable(self) -> Optional[Location]:
return Location.from_stack(self.stack)
| 39.232558 | 126 | 0.659751 |
8077eab42f3064c246e80589bf687e989553355d | 28,620 | py | Python | app/weixin_utils/client.py | w940853815/weixin-robot | 73b7b447241c1a74a14b21c6c11fc652b30f7ebb | [
"Apache-2.0"
] | 12 | 2017-06-24T02:13:22.000Z | 2021-03-16T02:43:40.000Z | app/weixin_utils/client.py | w940853815/weixin-robot | 73b7b447241c1a74a14b21c6c11fc652b30f7ebb | [
"Apache-2.0"
] | 7 | 2017-04-08T07:45:19.000Z | 2020-01-06T05:50:30.000Z | app/weixin_utils/client.py | w940853815/weixin-robot | 73b7b447241c1a74a14b21c6c11fc652b30f7ebb | [
"Apache-2.0"
] | 1 | 2017-07-31T01:09:27.000Z | 2017-07-31T01:09:27.000Z | # -*- coding: utf-8 -*-
# coding=utf-8
__author__ = 'ruidong.wang@tsingdata.com'
import time
import six
import requests
from requests.compat import json as _json
from config_web import WEI_XIN_APP_ID, WEI_XIN_APP_SECRET
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class ClientException(Exception):
pass
def check_error(json):
"""
检测微信公众平台返回值中是否包含错误的返回码。
如果返回码提示有错误,抛出一个 :class:`ClientException` 异常。否则返回 True 。
"""
if "errcode" in json and json["errcode"] != 0:
raise ClientException("{}: {}".format(json["errcode"], json["errmsg"]))
return json
def to_text(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value)
class Client(object):
"""
微信 API 操作类
通过这个类可以方便的通过微信 API 进行一系列操作,比如主动发送消息、创建自定义菜单等
"""
def __init__(self):
self._token = None
self.token_expires_at = None
@property
def appid(self):
return WEI_XIN_APP_ID
@property
def appsecret(self):
return WEI_XIN_APP_SECRET
def request(self, method, url, **kwargs):
if "params" not in kwargs:
kwargs["params"] = {"access_token": self.token}
if isinstance(kwargs.get("data", ""), dict):
body = _json.dumps(kwargs["data"], ensure_ascii=False)
body = body.encode('utf8')
kwargs["data"] = body
r = requests.request(
method=method,
url=url,
**kwargs
)
r.raise_for_status()
json = r.json()
if check_error(json):
return json
def get(self, url, **kwargs):
return self.request(
method="get",
url=url,
**kwargs
)
def post(self, url, **kwargs):
return self.request(
method="post",
url=url,
**kwargs
)
def grant_token(self):
"""
获取 Access Token。
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/token",
params={
"grant_type": "client_credential",
"appid": self.appid,
"secret": self.appsecret
}
)
def get_access_token(self):
"""
判断现有的token是否过期。
用户需要多进程或者多机部署可以手动重写这个函数
来自定义token的存储,刷新策略。
:return: 返回token
"""
if self._token:
now = time.time()
if self.token_expires_at - now > 60:
return self._token
json = self.grant_token()
self._token = json["access_token"]
self.token_expires_at = int(time.time()) + json["expires_in"]
return self._token
@property
def token(self):
return self.get_access_token()
def get_ip_list(self):
"""
获取微信服务器IP地址。
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/getcallbackip"
)
def create_menu(self, menu_data):
"""
创建自定义菜单::
client.create_menu({
"button":[
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"type":"click",
"name":"歌手简介",
"key":"V1001_TODAY_SINGER"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}
]
}
]})
:param menu_data: Python 字典
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/create",
data=menu_data
)
def get_menu(self):
"""
查询自定义菜单。
:return: 返回的 JSON 数据包
"""
return self.get("https://api.weixin.qq.com/cgi-bin/menu/get")
def delete_menu(self):
"""
删除自定义菜单。
:return: 返回的 JSON 数据包
"""
return self.get("https://api.weixin.qq.com/cgi-bin/menu/delete")
def create_custom_menu(self, menu_data, matchrule):
"""
创建个性化菜单::
button = [
{
"type":"click",
"name":"今日歌曲",
"key":"V1001_TODAY_MUSIC"
},
{
"name":"菜单",
"sub_button":[
{
"type":"view",
"name":"搜索",
"url":"http://www.soso.com/"
},
{
"type":"view",
"name":"视频",
"url":"http://v.qq.com/"
},
{
"type":"click",
"name":"赞一下我们",
"key":"V1001_GOOD"
}]
}]
matchrule = {
"group_id":"2",
"sex":"1",
"country":"中国",
"province":"广东",
"city":"广州",
"client_platform_type":"2",
"language":"zh_CN"
}
client.create_custom_menu(button, matchrule)
:param menu_data: 如上所示的 Python 字典
:param matchrule: 如上所示的匹配规则
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/addconditional",
data={
"button": menu_data,
"matchrule": matchrule
}
)
def delete_custom_menu(self, menu_id):
"""
删除个性化菜单。
:param menu_id: 菜单的 ID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/delconditional",
data={
"menuid": menu_id
}
)
def match_custom_menu(self, user_id):
"""
测试个性化菜单匹配结果。
:param user_id: 要测试匹配的用户 ID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/trymatch",
data={
"user_id": user_id
}
)
def get_custom_menu_config(self):
"""
获取自定义菜单配置接口。
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info"
)
def add_custom_service_account(self, account, nickname, password):
"""
添加客服帐号。
:param account: 客服账号的用户名
:param nickname: 客服账号的昵称
:param password: 客服账号的密码
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/customservice/kfaccount/add",
data={
"kf_account": account,
"nickname": nickname,
"password": password
}
)
def update_custom_service_account(self, account, nickname, password):
"""
修改客服帐号。
:param account: 客服账号的用户名
:param nickname: 客服账号的昵称
:param password: 客服账号的密码
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/customservice/kfaccount/update",
data={
"kf_account": account,
"nickname": nickname,
"password": password
}
)
def delete_custom_service_account(self, account, nickname, password):
"""
删除客服帐号。
:param account: 客服账号的用户名
:param nickname: 客服账号的昵称
:param password: 客服账号的密码
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/customservice/kfaccount/del",
data={
"kf_account": account,
"nickname": nickname,
"password": password
}
)
def upload_custom_service_account_avatar(self, account, avatar):
"""
设置客服帐号的头像。
:param account: 客服账号的用户名
:param avatar: 头像文件,必须是 jpg 格式
:return: 返回的 JSON 数据包
"""
return self.post(
url="http://api.weixin.qq.com/customservice/kfaccount/uploadheadimg",
params={
"access_token": self.token,
"kf_account": account
},
files={
"media": avatar
}
)
def get_custom_service_account_list(self):
"""
获取所有客服账号。
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/customservice/getkflist"
)
def upload_media(self, media_type, media_file):
"""
上传临时多媒体文件。
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)和缩略图(thumb)
:param media_file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/media/upload",
params={
"access_token": self.token,
"type": media_type
},
files={
"media": media_file
}
)
def download_media(self, media_id):
"""
下载临时多媒体文件。
:param media_id: 媒体文件 ID
:return: requests 的 Response 实例
"""
return requests.get(
url="https://api.weixin.qq.com/cgi-bin/media/get",
params={
"access_token": self.token,
"media_id": media_id
}
)
def add_news(self, articles):
"""
新增永久图文素材::
articles = [{
"title": TITLE,
"thumb_media_id": THUMB_MEDIA_ID,
"author": AUTHOR,
"digest": DIGEST,
"show_cover_pic": SHOW_COVER_PIC(0 / 1),
"content": CONTENT,
"content_source_url": CONTENT_SOURCE_URL
}
# 若新增的是多图文素材,则此处应有几段articles结构,最多8段
]
client.add_news(articles)
:param articles: 如示例中的数组
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/material/add_news",
data={
"articles": articles
}
)
def upload_news_picture(self, file):
"""
上传图文消息内的图片。
:param file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/media/uploadimg",
params={
"access_token": self.token
},
files={
"media": file
}
)
def upload_permanent_media(self, media_type, media_file):
"""
上传其他类型永久素材。
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)和缩略图(thumb)
:param media_file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/material/add_material",
params={
"access_token": self.token,
"type": media_type
},
files={
"media": media_file
}
)
def upload_permanent_video(self, title, introduction, video):
"""
上传永久视频。
:param title: 视频素材的标题
:param introduction: 视频素材的描述
:param video: 要上传的视频,一个 File-object
:return: requests 的 Response 实例
"""
return requests.post(
url="https://api.weixin.qq.com/cgi-bin/material/add_material",
params={
"access_token": self.token,
"type": "video"
},
data={
"description": _json.dumps({
"title": title,
"introduction": introduction
}, ensure_ascii=False).encode("utf-8")
},
files={
"media": video
}
)
def download_permanent_media(self, media_id):
"""
获取永久素材。
:param media_id: 媒体文件 ID
:return: requests 的 Response 实例
"""
return requests.post(
url="https://api.weixin.qq.com/cgi-bin/material/get_material",
params={
"access_token": self.token
},
data=_json.dumps({
"media_id": media_id
}, ensure_ascii=False).encode("utf-8")
)
def delete_permanent_media(self, media_id):
"""
删除永久素材。
:param media_id: 媒体文件 ID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/material/del_material",
data={
"media_id": media_id
}
)
def update_news(self, update_data):
"""
修改永久图文素材::
update_data = {
"media_id":MEDIA_ID,
"index":INDEX,
"articles": {
"title": TITLE,
"thumb_media_id": THUMB_MEDIA_ID,
"author": AUTHOR,
"digest": DIGEST,
"show_cover_pic": SHOW_COVER_PIC(0 / 1),
"content": CONTENT,
"content_source_url": CONTENT_SOURCE_URL
}
}
client.update_news(update_data)
:param update_data: 更新的数据,要包含 media_id(图文素材的 ID),index(要更新的文章在图文消息中的位置),articles(新的图文素材数据)
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/material/update_news",
data=update_data
)
def get_media_count(self):
"""
获取素材总数。
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/material/get_materialcount"
)
def get_media_list(self, media_type, offset, count):
"""
获取素材列表。
:param media_type: 素材的类型,图片(image)、视频(video)、语音 (voice)、图文(news)
:param offset: 从全部素材的该偏移位置开始返回,0表示从第一个素材返回
:param count: 返回素材的数量,取值在1到20之间
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/material/batchget_material",
data={
"type": media_type,
"offset": offset,
"count": count
}
)
def create_group(self, name):
"""
创建分组。
:param name: 分组名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
name = to_text(name)
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/create",
data={"group": {"name": name}}
)
def get_groups(self):
"""
查询所有分组。
:return: 返回的 JSON 数据包
"""
return self.get("https://api.weixin.qq.com/cgi-bin/groups/get")
def get_group_by_id(self, openid):
"""
查询用户所在分组。
:param openid: 用户的OpenID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/getid",
data={"openid": openid}
)
def update_group(self, group_id, name):
"""
修改分组名。
:param group_id: 分组 ID,由微信分配
:param name: 分组名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/update",
data={"group": {
"id": int(group_id),
"name": to_text(name)
}}
)
def move_user(self, user_id, group_id):
"""
移动用户分组。
:param user_id: 用户 ID,即收到的 `Message` 的 source
:param group_id: 分组 ID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/members/update",
data={
"openid": user_id,
"to_groupid": group_id
}
)
def move_users(self, user_id_list, group_id):
"""
批量移动用户分组。
:param user_id_list: 用户 ID 的列表(长度不能超过50)
:param group_id: 分组 ID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/members/batchupdate",
data={
"openid_list": user_id_list,
"to_groupid": group_id
}
)
def delete_group(self, group_id):
"""
删除分组。
:param group_id: 要删除的分组的 ID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/groups/delete",
data={
"group": {
"id": group_id
}
}
)
def remark_user(self, user_id, remark):
"""
设置备注名。
:param user_id: 设置备注名的用户 ID
:param remark: 新的备注名,长度必须小于30字符
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/user/info/updateremark",
data={
"openid": user_id,
"remark": remark
}
)
def get_user_info(self, user_id, lang="zh_CN"):
"""
获取用户基本信息。
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param lang: 返回国家地区语言版本,zh_CN 简体,zh_TW 繁体,en 英语
:return: 返回的 JSON 数据包
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/user/info",
params={
"access_token": self.token,
"openid": user_id,
"lang": lang
}
)
def get_users_info(self, user_id_list, lang="zh_CN"):
"""
批量获取用户基本信息。
:param user_id_list: 用户 ID 的列表
:param lang: 返回国家地区语言版本,zh_CN 简体,zh_TW 繁体,en 英语
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/user/info/batchget",
data={
"user_list": [
{"openid": user_id,
"lang": lang} for user_id in user_id_list
]
}
)
def get_followers(self, first_user_id=None):
"""
获取关注者列表
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=获取关注者列表
:param first_user_id: 可选。第一个拉取的OPENID,不填默认从头开始拉取
:return: 返回的 JSON 数据包
"""
params = {
"access_token": self.token
}
if first_user_id:
params["next_openid"] = first_user_id
return self.get(
"https://api.weixin.qq.com/cgi-bin/user/get",
params=params
)
def send_text_message(self, user_id, content):
"""
发送文本消息。
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param content: 消息正文
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser": user_id,
"msgtype": "text",
"text": {"content": content}
}
)
def send_image_message(self, user_id, media_id):
"""
发送图片消息。
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param media_id: 图片的媒体ID。 可以通过 :func:`upload_media` 上传。
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser": user_id,
"msgtype": "image",
"image": {
"media_id": media_id
}
}
)
def send_voice_message(self, user_id, media_id):
"""
发送语音消息。
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param media_id: 发送的语音的媒体ID。 可以通过 :func:`upload_media` 上传。
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser": user_id,
"msgtype": "voice",
"voice": {
"media_id": media_id
}
}
)
def send_video_message(self, user_id, media_id,
title=None, description=None):
"""
发送视频消息。
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param media_id: 发送的视频的媒体ID。 可以通过 :func:`upload_media` 上传。
:param title: 视频消息的标题
:param description: 视频消息的描述
:return: 返回的 JSON 数据包
"""
video_data = {
"media_id": media_id,
}
if title:
video_data["title"] = title
if description:
video_data["description"] = description
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser": user_id,
"msgtype": "video",
"video": video_data
}
)
def send_music_message(self, user_id, url, hq_url, thumb_media_id,
title=None, description=None):
"""
发送音乐消息。
注意如果你遇到了缩略图不能正常显示的问题, 不要慌张; 目前来看是微信服务器端的问题。
对此我们也无能为力 ( `#197 <https://github.com/whtsky/WeRoBot/issues/197>`_ )
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param url: 音乐链接
:param hq_url: 高品质音乐链接,wifi环境优先使用该链接播放音乐
:param thumb_media_id: 缩略图的媒体ID。 可以通过 :func:`upload_media` 上传。
:param title: 音乐标题
:param description: 音乐描述
:return: 返回的 JSON 数据包
"""
music_data = {
"musicurl": url,
"hqmusicurl": hq_url,
"thumb_media_id": thumb_media_id
}
if title:
music_data["title"] = title
if description:
music_data["description"] = description
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser": user_id,
"msgtype": "music",
"music": music_data
}
)
# def send_article_message(self, user_id, articles):
# """
# 发送图文消息::
#
# articles = [
# {
# "title":"Happy Day",
# "description":"Is Really A Happy Day",
# "url":"URL",
# "picurl":"PIC_URL"
# },
# {
# "title":"Happy Day",
# "description":"Is Really A Happy Day",
# "url":"URL",
# "picurl":"PIC_URL"
# }
# ]
# client.send_acticle_message("user_id", acticles)
#
# :param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
# :param articles: 一个包含至多8个 article 字典或 Article 对象的数组
# :return: 返回的 JSON 数据包
# """
# if isinstance(articles[0], Article):
# formatted_articles = []
# for article in articles:
# result = article.args
# result["picurl"] = result.pop("img")
# formatted_articles.append(result)
# else:
# formatted_articles = articles
# return self.post(
# url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
# data={
# "touser": user_id,
# "msgtype": "news",
# "news": {
# "articles": formatted_articles
# }
# }
# )
def send_news_message(self, user_id, media_id):
"""
发送永久素材中的图文消息。
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param media_id: 媒体文件 ID
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser": user_id,
"msgtype": "mpnews",
"mpnews": {
"media_id": media_id
}
}
)
def create_qrcode(self, data):
"""
创建二维码。
:param data: 你要发送的参数 dict
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/qrcode/create",
data=data
)
def show_qrcode(self, ticket):
"""
通过ticket换取二维码。
:param ticket: 二维码 ticket 。可以通过 :func:`create_qrcode` 获取到
:return: 返回的 Request 对象
"""
return requests.get(
url="https://mp.weixin.qq.com/cgi-bin/showqrcode",
params={
"ticket": ticket
}
)
def send_template_message(self, user_id, template_id, data, url=''):
"""
发送模板消息
详情请参考 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param template_id: 模板 ID。
:param data: 用于渲染模板的数据。
:param url: 模板消息的可选链接。
:return: 返回的 JSON 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/template/send",
data={
"touser": user_id,
"template_id": template_id,
"url": url,
"data": data
}
)
if __name__ == '__main__':
c = Client()
print c.get_access_token()
c.create_custom_menu({
"button":[
{
"type":"click",
"name":"机器人简介",
"key":"V1001_TODAY_SUMMARY"
},
{
"type":"click",
"name":"开发人员",
"key":"V1001_TODAY_DEVELOPER"
},
{
"name":"功能",
"sub_button":[
{
"type":"click",
"name":"聊天",
"url":"V1001_TODAY_TALK"
},
{
"type":"view",
"name":"教我学习",
"url":"http://123.207.139.130"
},
{
"type":"click",
"name":"问问百度百科",
"key":"V1001_TODAY_BAIKE"
},
{
"type": "click",
"name": "和图灵机器人聊天",
"key" : "V1001_TODAY_TULING"
},
{
"type": "click",
"name": "问问知乎",
"key" : "V1001_TODAY_ZHIHU"
},
]
}
]},
matchrule={
"group_id" : "2",
"sex" : "1",
"country" : "中国",
"province" : "山西",
"city" : "太原",
"client_platform_type": "2",
"language" : "zh_CN"
}
)
| 28.003914 | 98 | 0.4471 |
737e6b4782c9dde363de803b8f50217d86852815 | 5,111 | py | Python | gocd_tools/defaults.py | rasmunk/gocd-tools | 2567756d993d1b323bde2b2903457df598f19dc6 | [
"MIT"
] | null | null | null | gocd_tools/defaults.py | rasmunk/gocd-tools | 2567756d993d1b323bde2b2903457df598f19dc6 | [
"MIT"
] | null | null | null | gocd_tools/defaults.py | rasmunk/gocd-tools | 2567756d993d1b323bde2b2903457df598f19dc6 | [
"MIT"
] | null | null | null | import os
from gocd_tools.utils import is_env_set
PACKAGE_NAME = "gocd-tools"
# API Request Defaults
CONTENT_TYPE = "application/json"
JSON_HEADER = {"Content-Type": CONTENT_TYPE}
API_VERSION_1 = "application/vnd.go.cd.v1+json"
API_VERSION_2 = "application/vnd.go.cd.v2+json"
API_VERSION_3 = "application/vnd.go.cd.v3+json"
API_VERSION_4 = "application/vnd.go.cd.v4+json"
API_VERSION_7 = "application/vnd.go.cd.v7+json"
ACCEPT_HEADER_1 = {"Accept": API_VERSION_1, **JSON_HEADER}
ACCEPT_HEADER_2 = {"Accept": API_VERSION_2, **JSON_HEADER}
ACCEPT_HEADER_3 = {"Accept": API_VERSION_3, **JSON_HEADER}
ACCEPT_HEADER_4 = {"Accept": API_VERSION_4, **JSON_HEADER}
ACCEPT_HEADER_7 = {"Accept": API_VERSION_7, **JSON_HEADER}
GITHUB_API_VERSION = "application/vnd.github.v3+json"
GITHUB_ACCEPT_HEADER = {"Accept": GITHUB_API_VERSION}
# API default endpoints
if "GOCD_BASE_URL" in os.environ:
GOCD_BASE_URL = os.environ["GOCD_BASE_URL"]
if GOCD_BASE_URL == "":
print(
"The require environment variable GOCD_BASE_URL was empty: {}".format(
GOCD_BASE_URL
)
)
exit(1)
# Github URLs
GITHUB_AUTH_URL = "https://api.github.com/user"
GITHUB_GOCD_AUTH_URL = ""
# Public URLs
GO_URL = "{}/go".format(GOCD_BASE_URL)
API_URL = "{}/api".format(GO_URL)
AUTH_URL = "{}/current_user".format(API_URL)
ELASTIC_AGENT_URL = "{}/elastic/profiles".format(API_URL)
ADMIN_URL = "{}/admin".format(API_URL)
SECURITY_URL = "{}/security".format(ADMIN_URL)
ROLE_URL = "{}/roles".format(SECURITY_URL)
# Restricted URLs
AUTHORIZATION_CONFIG_URL = "{}/auth_configs".format(SECURITY_URL)
CLUSTER_PROFILES_URL = "{}/elastic/cluster_profiles".format(ADMIN_URL)
PIPELINE_GROUPS_URL = "{}/pipeline_groups".format(ADMIN_URL)
CONFIG_REPO_URL = "{}/config_repos".format(ADMIN_URL)
TEMPLATE_URL = "{}/templates".format(ADMIN_URL)
SECRET_CONFIG_URL = "{}/secret_configs".format(ADMIN_URL)
# Server config
CONFIG_SERVER = "{}/config/server".format(ADMIN_URL)
ARTIFACT_CONFIG = "{}/artifact_config".format(CONFIG_SERVER)
if "GOCD_AUTH_TOKEN" in os.environ:
GOCD_AUTH_TOKEN = os.environ["GOCD_AUTH_TOKEN"]
# The GOCD_AUTH_TOKEN is the one generate within the GOCD server
# (Not GitHub)
else:
GOCD_AUTH_TOKEN = ""
if "GITHUB_AUTH_TOKEN" in os.environ:
GITHUB_AUTH_TOKEN = os.environ["GITHUB_AUTH_TOKEN"]
else:
GITHUB_AUTH_TOKEN = ""
AUTHORIZATION_HEADER = {
"Authorization": "bearer {}".format(GOCD_AUTH_TOKEN),
**ACCEPT_HEADER_1,
}
GITHUB_AUTHORIZATION_HEADER = {
"Authorization": "token {}".format(GITHUB_AUTH_TOKEN),
**GITHUB_ACCEPT_HEADER,
}
# Server defaults
GO_DATA_DIR = "/godata"
GO_SECRET_DIR = "/gosecret"
GO_SECRET_DB_FILE = "secrets.yml"
GO_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".{}".format(PACKAGE_NAME))
GO_PLUGIN_DIR = os.path.join(GO_DATA_DIR, "plugins")
# Other constants
GOCD_SECRET_PLUGIN = "gocd-file-based-secrets-plugin.jar"
BUNDLED_PLUGIN = "bundled"
EXTERNAL_PLUGIN = "external"
# Environment variables
ENV_GO_DATA_DIR = "GO_DATA_DIR"
ENV_GO_SECRET_DIR = "GO_SECRET_DIR"
ENV_GO_SECRET_DB_FILE = "GO_SECRET_DB_FILE"
ENV_GO_CONFIG_DIR = "GO_CONFIG_DIR"
ENV_GO_PLUGIN_DIR = "GO_PLUGIN_DIR"
# Default configuration input paths
default_base_path = GO_CONFIG_DIR
default_config_path = os.path.join(default_base_path, "config")
artifacts_config_path = os.path.join(default_config_path, "artifacts_config.yml")
authorization_config_path = os.path.join(
default_config_path, "authorization_configuration.yml"
)
cluster_profiles_path = os.path.join(default_config_path, "cluster_profiles.yml")
config_repositories_path = os.path.join(default_config_path, "config_repositories.yml")
elastic_agent_profile_path = os.path.join(
default_config_path, "elastic_agent_profiles.yml"
)
pipeline_group_configs_path = os.path.join(
default_config_path, "pipeline_group_configs.yml"
)
roles_path = os.path.join(default_config_path, "roles.yml")
secret_configs_path = os.path.join(default_config_path, "secret_configs.yml")
templates_path = os.path.join(default_config_path, "templates.yml")
# Datadir discover
def get_data_dir():
data_path, msg = is_env_set(ENV_GO_DATA_DIR)
if not data_path:
if GO_DATA_DIR:
return GO_DATA_DIR, ""
return False, msg
return data_path, ""
# Secrets discover
def get_secrets_dir_path():
env_dir_path, msg = is_env_set(ENV_GO_SECRET_DIR)
if not env_dir_path:
# Use the default as a backup
if GO_SECRET_DIR:
return GO_SECRET_DIR, ""
return False, msg
return env_dir_path, ""
def get_secrets_file_name():
secrets_name, msg = is_env_set(ENV_GO_SECRET_DB_FILE)
if not secrets_name:
# Use the default as a backup
if GO_SECRET_DB_FILE:
return GO_SECRET_DB_FILE, ""
return False, msg
return secrets_name, ""
def get_secrets_db_path():
dir_path, msg = get_secrets_dir_path()
if not dir_path:
return False, msg
file_name, msg = get_secrets_file_name()
if not file_name:
return False, msg
return os.path.join(dir_path, file_name), ""
| 30.975758 | 87 | 0.741734 |