hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71e77d479a5c19a10183f4785ab075fdd327612 | 380 | py | Python | vfio_isolate/action/action.py | spheenik/vfio-isolate | 6d6a1f0d5e5d84a5ad9911c635a81b86710d12d5 | [
"MIT"
] | 44 | 2020-05-03T15:03:32.000Z | 2022-03-23T19:03:23.000Z | vfio_isolate/action/action.py | darkguy2008/vfio-isolate | 6c16cf363a627f02202586a17df58522e097ef10 | [
"MIT"
] | 7 | 2020-08-18T10:17:14.000Z | 2022-01-14T14:18:47.000Z | vfio_isolate/action/action.py | darkguy2008/vfio-isolate | 6c16cf363a627f02202586a17df58522e097ef10 | [
"MIT"
] | 6 | 2020-06-02T05:29:34.000Z | 2022-02-04T17:12:40.000Z | from dataclasses import dataclass
from typing import Generator
@dataclass
class Execution:
action: type
params: object
class Action:
@classmethod
def can_execute(cls, p):
return True
@classmethod
def execute(cls, p):
pass
@classmethod
def record_undo(cls, p) -> Generator[Execution, None, None]:
return
yield
| 15.2 | 64 | 0.644737 |
f71eacb0cebaf99c989c8497d1bdf211436cdebe | 774 | py | Python | python_zabbix/client.py | zhenghuaHe/stu_python | e0937070248269527661ccf32e5bea048170ac17 | [
"Apache-2.0"
] | null | null | null | python_zabbix/client.py | zhenghuaHe/stu_python | e0937070248269527661ccf32e5bea048170ac17 | [
"Apache-2.0"
] | null | null | null | python_zabbix/client.py | zhenghuaHe/stu_python | e0937070248269527661ccf32e5bea048170ac17 | [
"Apache-2.0"
] | null | null | null | # -*- coding=utf-8 -*-
import socket
import psutil
import json
# 创建链接
# 生成一个socket对象
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 8888
# 请求连接服务端
sk.connect((host, port))
#获取信息
#获取主机名
hostname = socket.getfqdn(socket.gethostname())
#获取主机IP地址
host_ip = socket.gethostbyname(hostname)
#获取内存使用率
host_memory = str(psutil.virtual_memory().percent)
#获取CPU的使用率
host_cpu = str(psutil.cpu_percent(0))
#本机登录用户
host_user = str(psutil.users())
#写入字典
info = {"主机名:": hostname,"主机IP地址:": host_ip,"内存使用率:": host_memory,"CPU使用率:": host_cpu,"登录用户详情:": host_user}
result = json.dumps(info)
#发送数据
# sk.send(bytes(dict))
sk.send(result.encode('utf8'))
#接受信息
#接受小于1024字节的数据
msg = sk.recv(1024)
print(msg.decode('utf-8'))
#关闭连接
sk.close() | 16.125 | 107 | 0.719638 |
f71eb4e7d27b7bafa25c7ecc98bfe686ddc35042 | 6,389 | py | Python | app/service/send_notification.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | app/service/send_notification.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | app/service/send_notification.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | from flask import current_app
from notifications_utils.s3 import S3ObjectNotFound
from notifications_utils.s3 import s3download as utils_s3download
from sqlalchemy.orm.exc import NoResultFound
from app import create_random_identifier
from app.dao.notifications_dao import _update_notification_status
from app.dao.service_email_reply_to_dao import dao_get_reply_to_by_id
from app.dao.service_sms_sender_dao import dao_get_service_sms_senders_by_id
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.templates_dao import (
dao_get_template_by_id_and_service_id,
get_precompiled_letter_template,
)
from app.dao.users_dao import get_user_by_id
from app.letters.utils import (
get_letter_pdf_filename,
get_page_count,
move_uploaded_pdf_to_letters_bucket,
)
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
LETTER_TYPE,
NOTIFICATION_DELIVERED,
SMS_TYPE,
UPLOAD_LETTERS,
)
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.notifications.validators import (
check_service_has_permission,
check_service_over_daily_message_limit,
validate_and_format_recipient,
validate_template,
)
from app.v2.errors import BadRequestError
def validate_created_by(service, created_by_id):
user = get_user_by_id(created_by_id)
if service not in user.services:
message = 'Can’t create notification - {} is not part of the "{}" service'.format(user.name, service.name)
raise BadRequestError(message=message)
def create_one_off_reference(template_type):
if template_type == LETTER_TYPE:
return create_random_identifier()
return None
def send_one_off_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
template = dao_get_template_by_id_and_service_id(template_id=post_data["template_id"], service_id=service_id)
personalisation = post_data.get("personalisation", None)
validate_template(template.id, personalisation, service, template.template_type)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_and_format_recipient(
send_to=post_data["to"],
key_type=KEY_TYPE_NORMAL,
service=service,
notification_type=template.template_type,
allow_safelisted_recipients=False,
)
validate_created_by(service, post_data["created_by"])
sender_id = post_data.get("sender_id", None)
reply_to = get_reply_to_text(
notification_type=template.template_type,
sender_id=sender_id,
service=service,
template=template,
)
notification = persist_notification(
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["to"],
service=service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
created_by_id=post_data["created_by"],
reply_to_text=reply_to,
reference=create_one_off_reference(template.template_type),
)
if template.template_type == LETTER_TYPE and service.research_mode:
_update_notification_status(
notification,
NOTIFICATION_DELIVERED,
)
else:
send_notification_to_queue(
notification=notification,
research_mode=service.research_mode,
queue=template.queue_to_use(),
)
return {"id": str(notification.id)}
def get_reply_to_text(notification_type, sender_id, service, template):
reply_to = None
if sender_id:
try:
if notification_type == EMAIL_TYPE:
message = "Reply to email address not found"
reply_to = dao_get_reply_to_by_id(service.id, sender_id).email_address
elif notification_type == SMS_TYPE:
message = "SMS sender not found"
reply_to = dao_get_service_sms_senders_by_id(service.id, sender_id).get_reply_to_text()
except NoResultFound:
raise BadRequestError(message=message)
else:
reply_to = template.get_reply_to_text()
return reply_to
def send_pdf_letter_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
check_service_has_permission(LETTER_TYPE, service.permissions)
check_service_has_permission(UPLOAD_LETTERS, service.permissions)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_created_by(service, post_data["created_by"])
template = get_precompiled_letter_template(service.id)
file_location = "service-{}/{}.pdf".format(service.id, post_data["file_id"])
try:
letter = utils_s3download(current_app.config["TRANSIENT_UPLOADED_LETTERS"], file_location)
except S3ObjectNotFound as e:
current_app.logger.exception(
"Letter {}.pdf not in transient {} bucket".format(
post_data["file_id"], current_app.config["TRANSIENT_UPLOADED_LETTERS"]
)
)
raise e
# Getting the page count won't raise an error since admin has already checked the PDF is valid
billable_units = get_page_count(letter.read())
personalisation = {"address_line_1": post_data["filename"]}
# TODO: stop hard-coding postage as 'second' once we get postage from the admin
notification = persist_notification(
notification_id=post_data["file_id"],
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["filename"],
service=service,
personalisation=personalisation,
notification_type=LETTER_TYPE,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reference=create_one_off_reference(LETTER_TYPE),
client_reference=post_data["filename"],
created_by_id=post_data["created_by"],
billable_units=billable_units,
postage="second",
)
upload_filename = get_letter_pdf_filename(
notification.reference,
notification.service.crown,
is_scan_letter=False,
postage=notification.postage,
)
move_uploaded_pdf_to_letters_bucket(file_location, upload_filename)
return {"id": str(notification.id)}
| 34.722826 | 114 | 0.728283 |
f71ec8c0f4f79e7b5d54635479768ae036e669e3 | 251 | py | Python | CodeChef/JUNE20/EOEO.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | 2 | 2020-06-25T21:10:32.000Z | 2020-12-10T06:53:45.000Z | CodeChef/JUNE20/EOEO.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | null | null | null | CodeChef/JUNE20/EOEO.py | mishrakeshav/Competitive-Programming | b25dcfeec0fb9a9c71bf3a05644b619f4ca83dd2 | [
"MIT"
] | 3 | 2020-05-15T14:17:09.000Z | 2021-07-25T13:18:20.000Z |
if __name__ == '__main__':
for t in range(int(input())):
ts = int(input())
count = 0
while ts%2 == 0:
ts //= 2
if ts:
print(ts//2)
else:
print(0)
| 13.944444 | 33 | 0.346614 |
f71eca6b3f158fdbe0a1271729c857c9ffafb3c5 | 4,565 | py | Python | ValveAnnulusAnalysis/HeartValveLib/helpers.py | SlicerHeart/SlicerHeart | 5ead8d723f6dec67ea6065b847cb4f8dce5bef72 | [
"BSD-3-Clause"
] | 48 | 2016-04-13T10:22:53.000Z | 2022-03-21T16:31:41.000Z | ValveAnnulusAnalysis/HeartValveLib/helpers.py | SlicerHeart/SlicerHeart | 5ead8d723f6dec67ea6065b847cb4f8dce5bef72 | [
"BSD-3-Clause"
] | 14 | 2018-10-25T21:15:20.000Z | 2021-11-26T16:55:55.000Z | ValveAnnulusAnalysis/HeartValveLib/helpers.py | SlicerHeart/SlicerHeart | 5ead8d723f6dec67ea6065b847cb4f8dce5bef72 | [
"BSD-3-Clause"
] | 21 | 2017-09-12T08:20:36.000Z | 2021-10-30T02:22:11.000Z | """ collection of functions that are useful for several classes but non-specific to any """
import slicer
import logging
def getBinaryLabelmapRepresentation(segmentationNode, segmentID: str):
segmentLabelmap = slicer.vtkOrientedImageData()
segmentationNode.GetBinaryLabelmapRepresentation(segmentID, segmentLabelmap)
return segmentLabelmap
def getSpecificHeartValveModelNodes(phases: list):
heartValveModelNodes = []
for phase in phases:
try:
heartValveModelNodes.extend(list(getValveModelNodesMatchingPhase(phase)))
except ValueError as exc:
logging.warning(exc)
return heartValveModelNodes
def getSpecificHeartValveModelNodesMatchingPhaseAndType(phases: list, valveType: str, sort:bool=True):
valveModels = []
for valveModel in getAllHeartValveModelNodes():
if valveModel.getValveType() == valveType and getValvePhaseShortName(valveModel) in phases:
valveModels.append(valveModel)
if sort:
return sorted(valveModels, key=lambda valveModel: phases.index(getValvePhaseShortName(valveModel)))
return valveModels
def getSpecificHeartValveMeasurementNodes(identifier):
valveQuantificationLogic = slicer.modules.valvequantification.widgetRepresentation().self().logic
validMeasurementNodes = []
for measurementNode in getAllHeartValveMeasurementNodes():
measurementPreset = valveQuantificationLogic.getMeasurementPresetByMeasurementNode(measurementNode)
if not measurementPreset or measurementPreset.QUANTIFICATION_RESULTS_IDENTIFIER != identifier:
continue
validMeasurementNodes.append(measurementNode)
return validMeasurementNodes
def getFirstValveModelNodeMatchingPhase(phase='MS'):
for valveModelNode in getAllHeartValveModelNodes():
if getValvePhaseShortName(valveModelNode) == phase:
return valveModelNode
raise ValueError("Could not find valve for phase %s" % phase)
def getValveModelNodesMatchingPhase(phase):
for valveModelNode in getAllHeartValveModelNodes():
if getValvePhaseShortName(valveModelNode) == phase:
yield valveModelNode
def getFirstValveModelNodeMatchingPhaseAndType(phase, valveType):
for valveModel in getValveModelNodesMatchingPhase(phase):
if valveModel.getValveType() == valveType:
return valveModel
raise ValueError(f"Could not find valve with type {valveType} for phase {phase}")
def getValveModelNodesMatchingPhaseAndType(phase, valveType):
valveModels = []
for valveModel in getValveModelNodesMatchingPhase(phase):
if valveModel.getValveType() == valveType:
valveModels.append(valveModel)
return valveModels
def getAllHeartValveModelNodes():
import HeartValves
return map(HeartValves.getValveModel, getAllHeartValveNodes())
def getAllHeartValveNodes():
return getAllModuleSpecificScriptableNodes('HeartValve')
def getAllHeartValveMeasurementNodes():
return getAllModuleSpecificScriptableNodes('HeartValveMeasurement')
def getAllModuleSpecificScriptableNodes(moduleName):
return filter(lambda node: node.GetAttribute('ModuleName') == moduleName,
slicer.util.getNodesByClass('vtkMRMLScriptedModuleNode'))
def getHeartValveMeasurementNode(phase):
for measurementNode in getAllHeartValveMeasurementNodes():
cardiacCyclePhaseNames = getMeasurementCardiacCyclePhaseShortNames(measurementNode)
if len(cardiacCyclePhaseNames) == 1 and cardiacCyclePhaseNames[0] == phase:
return measurementNode
def getMeasurementCardiacCyclePhaseShortNames(measurementNode):
import ValveQuantification
valveQuantificationLogic = ValveQuantification.ValveQuantificationLogic()
return valveQuantificationLogic.getMeasurementCardiacCyclePhaseShortNames(measurementNode)
def getAllFilesWithExtension(directory, extension, file_name_only=False):
import os
import fnmatch
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*{}'.format(extension)):
files.append(filename if file_name_only else os.path.join(root, filename))
return files
def isMRBFile(mrb_file):
import os
return os.path.isfile(mrb_file) and mrb_file.lower().endswith(".mrb")
def getValveModelForSegmentationNode(segmentationNode):
for valveModel in getAllHeartValveModelNodes():
if valveModel.getLeafletSegmentationNode() is segmentationNode:
return valveModel
return None
def getValvePhaseShortName(valveModel):
cardiacPhase = valveModel.getCardiacCyclePhase()
cardiacCyclePhasePreset = valveModel.cardiacCyclePhasePresets[cardiacPhase]
return cardiacCyclePhasePreset['shortname'] | 35.664063 | 103 | 0.806134 |
f71ecd5b355fa5a6dd0a436c34d7ff7e754d15d2 | 1,464 | py | Python | tests/test_response.py | Vlsarro/pystexchapi | 27618002165fc536798c46c486e78caeb85905bf | [
"MIT"
] | null | null | null | tests/test_response.py | Vlsarro/pystexchapi | 27618002165fc536798c46c486e78caeb85905bf | [
"MIT"
] | null | null | null | tests/test_response.py | Vlsarro/pystexchapi | 27618002165fc536798c46c486e78caeb85905bf | [
"MIT"
] | null | null | null | import unittest
import json
import requests
from pystexchapi.response import StockExchangeResponseParser, APIResponse
from pystexchapi.exc import APIDataException, APIResponseParsingException
from tests import TICKER_RESPONSE, GENERIC_ERROR_RESPONSE
def raise_value_error():
raise ValueError()
class TestStockExchangeResponseParser(unittest.TestCase):
@staticmethod
def _make_response(content='', status_code=200) -> requests.Response:
response = requests.Response()
_content = content
response._content = _content
response.status_code = status_code
response.encoding = 'utf-8'
response.json = lambda: json.loads(_content)
return response
def test_parse(self):
resp = StockExchangeResponseParser.parse(self._make_response(content=TICKER_RESPONSE))
self.assertTrue(resp)
self.assertIsInstance(resp, APIResponse)
data = resp.data
self.assertIsInstance(data, list)
self.assertEqual(len(data), 1)
def test_raise_on_error(self):
response = self._make_response(content=GENERIC_ERROR_RESPONSE)
with self.assertRaises(APIDataException) as cm:
StockExchangeResponseParser.parse(response)
self.assertEqual(cm.exception.msg, 'Invalid request')
response.json = raise_value_error
with self.assertRaises(APIResponseParsingException):
StockExchangeResponseParser.parse(response)
| 31.826087 | 94 | 0.729508 |
f71f26b91d28f2fac45042a51478207f81d2160f | 2,660 | py | Python | python/day14.py | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | 2 | 2020-01-02T12:59:44.000Z | 2020-01-04T19:21:31.000Z | python/day14.py | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | null | null | null | python/day14.py | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | null | null | null | import re
import math
from collections import defaultdict
def parse(content):
return list(map(parse_line, content.strip().split("\n")))
def parse_line(row):
matches = re.findall(r"\s?(\d+) ([A-Z]+),? ", row.strip())
inputs = [(int(item[0]), item[1]) for item in matches]
output = re.match(r".+ => (\d+) ([A-Z]+)$", row.strip()).groups()
return inputs, (int(output[0]), output[1])
def requirements_mapping(reactions):
# Verify that there is only one rule per Chemical
assert len(reactions) == len(set(row[-1][1] for row in reactions))
return {row[-1][1]: (row[-1][0], row[0]) for row in reactions}
def min_usage(reactions, C="FUEL", I="ORE", how_many=1, usage=None, leftovers=None):
if usage is None:
usage = defaultdict(int)
if leftovers is None:
leftovers = defaultdict(int)
usage[C] += how_many
# if C == I:
if C not in reactions: # Generalize for any (C, I) pair
return usage, leftovers
extra = min(how_many, leftovers[C])
how_many -= extra
leftovers[C] -= extra
quantity, inputs = reactions[C]
coef = math.ceil(how_many / quantity)
for qty, name in inputs:
usage, leftovers = min_usage(reactions, name, I, coef * qty, usage, leftovers)
leftovers[C] += coef * quantity - how_many
return usage, defaultdict(int, {k: v for k, v in leftovers.items() if v})
def binary_search(func, low, high, expected):
while low < high:
mid = (low + high) // 2
result = func(mid)
if result < expected:
low = mid
else:
high = mid - 1
return low
def get_max_fuel(reactions, max_ore=1e12):
f = lambda x: min_usage(reactions, how_many=x)[0]["ORE"]
return binary_search(f, 0, 1000000, max_ore)
if __name__ == "__main__":
with open("../inputs/day14.input") as f:
reactions = parse(f.read())
mapping = requirements_mapping(reactions)
# Part I
necessary, waste = min_usage(mapping)
print(necessary["ORE"]) # 2486514
# Part II
value = get_max_fuel(mapping, 1e12)
print(value) # 998536
# Verify that we got the correct value
necessary, _ = min_usage(mapping, how_many=value)
assert necessary["ORE"] < 1e12
necessary, _ = min_usage(mapping, how_many=value + 1)
assert necessary["ORE"] > 1e12
# Actually, this could be solved linearly in constant time with 2 data points
x1, y1 = 1, 2486514
x2, y2 = 10000000, min_usage(mapping, how_many=10000000)[0]["ORE"]
# y = ax + b
slope = (y2 - y1) / (x2 - x1)
b = y1 - slope * x1
fuel = round((1e12 - b) / slope)
assert fuel == value
| 27.142857 | 86 | 0.616917 |
f71f38c1c62e7b6318d6a1664ac6ee8c0936729a | 8,714 | py | Python | utils/transformsgpu.py | drkostas/SemiSeg-Contrastive | af6b133400368911ef77f401b7673894fe6aa05c | [
"Apache-2.0"
] | 43 | 2021-07-26T13:13:12.000Z | 2022-03-27T13:06:44.000Z | utils/transformsgpu.py | drkostas/SemiSeg-Contrastive | af6b133400368911ef77f401b7673894fe6aa05c | [
"Apache-2.0"
] | 5 | 2021-08-08T03:06:44.000Z | 2022-02-15T06:34:57.000Z | utils/transformsgpu.py | drkostas/SemiSeg-Contrastive | af6b133400368911ef77f401b7673894fe6aa05c | [
"Apache-2.0"
] | 7 | 2021-11-07T10:16:32.000Z | 2022-03-28T08:51:06.000Z | '''
Code taken from https://github.com/WilhelmT/ClassMix
Slightly modified
'''
import kornia
import torch
import random
import torch.nn as nn
def normalize_rgb(data, dataset):
"""
Args:
data: data to normalize BxCxWxH
dataset: name of the dataset to normalize
Returns:
normalized data as (x-mean)/255
"""
if dataset == 'pascal_voc':
mean = (122.6789143, 116.66876762, 104.00698793) # rgb
elif dataset == 'cityscapes':
mean = (73.15835921, 82.90891754, 72.39239876) # rgb
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def normalize_bgr(data, dataset):
"""
Args:
data: data to normalize BxCxWxH
dataset: name of the dataset to normalize
Returns:
normalized data as (x-mean)/255
"""
if dataset == 'pascal_voc':
mean = (104.00698793, 116.66876762, 122.6789143) # bgr
elif dataset == 'cityscapes':
mean = (72.39239876, 82.90891754, 73.15835921) # bgr
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def grayscale(grayscale, data = None, target = None, probs = None):
"""
Args:
grayscale: boolean whether to apply grayscale augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data is converted from rgb to grayscale if [grayscale] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if grayscale and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomGrayscale(p=1.) )
data = seq(data)
return data, target, probs
def colorJitter(colorJitter, data = None, target = None, s=0.1, probs = None):
"""
Args:
colorJitter: boolean whether to apply colorJitter augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
s: brightness and contrast strength of the color jitter
Returns:
colorJitter is applied to data if [colorJitter] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if colorJitter and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.ColorJitter(brightness=s,contrast=s,saturation=s/2.,hue=s/3.))
data = seq(data/255.)*255. # assumes [0,1]
return data, target, probs
def gaussian_blur(blur, data = None, target = None, min_sigma=0.2, max_sigma=3, probs = None):
"""
Args:
blur: boolean whether to apply blur
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
min_sigma: minimum sigma value for the gaussian blur
max_sigma: maximum sigma value for the gaussian blur
Returns:
gaussian blur is applied to data if [blur] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if blur and data.shape[1]==3:
seq = nn.Sequential(kornia.filters.GaussianBlur2d(kernel_size=(23, 23), sigma=(min_sigma, max_sigma)))
data = seq(data)
return data, target, probs
def flip(flip, data = None, target = None, probs = None):
"""
Args:
flip: boolean whether to apply flip augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target and probs are flipped if the boolean flip is True
"""
if flip:
if not (data is None): data = torch.flip(data,(3,))
if not (target is None):
target = torch.flip(target,(2,))
if not (probs is None):
probs = torch.flip(probs,(2,))
return data, target, probs
def solarize(solarize, data = None, target = None, probs = None):
"""
Args:
solarize: boolean whether to apply solarize augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target, probs, where
data is solarized if [solarize] is True
"""
if not (data is None):
if solarize and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomSolarize((0, 1)))
data = seq(data.cpu()/255.).cuda()*255.
return data, target, probs
def mix(mask, data = None, target = None, probs = None):
"""
Applies classMix augmentation:
https://openaccess.thecvf.com/content/WACV2021/papers/Olsson_ClassMix_Segmentation-Based_Data_Augmentation_for_Semi-Supervised_Learning_WACV_2021_paper.pdf
Args:
mask: masks for applying ClassMix. A list of B elements of CxWxH tensors
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target and probs augmented with classMix
"""
if not (data is None):
if mask.shape[0] == data.shape[0]:
data = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * data[i] + mask[(i + 1) % data.shape[0]] * data[(i + 1) % data.shape[0]]).unsqueeze(0) for i in range(data.shape[0])])
if not (target is None):
target = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * target[i] + mask[(i + 1) % data.shape[0]] * target[(i + 1) % target.shape[0]]).unsqueeze(0) for i in range(target.shape[0])])
if not (probs is None):
probs = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * probs[i] + mask[(i + 1) % data.shape[0]] * probs[(i + 1) % probs.shape[0]]).unsqueeze(0) for i in range(probs.shape[0])])
return data, target, probs
def random_scale_crop(scale, data = None, target = None, ignore_label=255, probs = None):
"""
Args:
scale: scale ratio. Float
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
ignore_label: integeer value that defines the ignore class in the datasets for the labels
Returns:
data, target and prob, after applied a scaling operation. output resolution is preserve as the same as the input resolution WxH
"""
if scale != 1:
init_size_w = data.shape[2]
init_size_h = data.shape[3]
# scale data, labels and probs
data = nn.functional.interpolate(data, scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True)
if target is not None:
target = nn.functional.interpolate(target.unsqueeze(1).float(), scale_factor=scale, mode='nearest', recompute_scale_factor=True).long().squeeze(1)
if probs is not None:
probs = nn.functional.interpolate(probs.unsqueeze(1), scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True).squeeze(1)
final_size_w = data.shape[2]
final_size_h = data.shape[3]
diff_h = init_size_h - final_size_h
diff_w = init_size_w - final_size_w
if scale < 1: # add padding if needed
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), 0)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), 0)
data = pad(data)
if probs is not None:
probs = pad(probs)
# padding with ignore label to add to labels
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), ignore_label)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), ignore_label)
if target is not None:
target = pad(target)
else: # crop if needed
w = random.randint(0, data.shape[2] - init_size_w)
h = random.randint(0, data.shape[3] - init_size_h)
data = data [:,:,h:h+init_size_h,w:w + init_size_w]
if probs is not None:
probs = probs [:,h:h+init_size_h,w:w + init_size_w]
if target is not None:
target = target [:,h:h+init_size_h,w:w + init_size_w]
return data, target, probs
| 34.442688 | 192 | 0.61843 |
f71f63419874a18aec03723ca69a1e11494c93fe | 27 | py | Python | btd6_memory_info/generated/NinjaKiwi/LiNK/Lobbies/LatencyMeasurements/StatsExtensions/stats_extensions.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/NinjaKiwi/LiNK/Lobbies/LatencyMeasurements/StatsExtensions/stats_extensions.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/NinjaKiwi/LiNK/Lobbies/LatencyMeasurements/StatsExtensions/stats_extensions.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | class StatsExtensions: pass | 27 | 27 | 0.888889 |
f71f9baff849e1b3e85a4e00a676e11b093d2eb9 | 7,777 | py | Python | examples/vae.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 222 | 2019-02-13T07:56:28.000Z | 2022-03-28T07:07:54.000Z | examples/vae.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 107 | 2019-02-12T21:56:39.000Z | 2022-03-12T01:08:03.000Z | examples/vae.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 27 | 2017-11-14T17:58:15.000Z | 2019-01-14T01:36:09.000Z | """Example of an MLP in Myia.
Myia is still a work in progress, and this example may change in the future.
"""
import time
from dataclasses import dataclass
import numpy
import torch
from numpy.random import RandomState
from torchvision import datasets, transforms
import myia.public_api as pub
from myia import ArithmeticData, myia, value_and_grad
from myia.api import to_device
from myia.debug import traceback # noqa
from myia.operations import array_exp, array_pow, random_initialize
###########
# Options #
###########
dtype = "float32"
backend = "pytorch"
# backend = 'relay' # Uncomment to use relay backend
device_type = "cpu"
# device_type = 'cuda' # Uncomment to run on the gpu
backend_options_dict = {
"pytorch": {"device": device_type},
"relay": {"target": device_type, "device_id": 0},
}
backend_options = backend_options_dict[backend]
###############
# Hyperparams #
###############
lr = getattr(numpy, dtype)(0.01)
########
# Data #
########
# This just generates random data so we don't have to load a real dataset,
# but the model will work just as well on a real dataset.
def param(R, *size):
"""Generates a random array using the generator R."""
return numpy.array(R.rand(*size) * 2 - 1, dtype=dtype)
def generate_data(n, batch_size, input_size, target_size, *, seed=87):
"""Generate inputs and targets.
Generates n batches of samples of size input_size, matched with
a single target.
"""
R = RandomState(seed=seed)
return [
(param(R, batch_size, input_size), param(R, batch_size, target_size))
for i in range(n)
]
def mlp_parameters(*layer_sizes, seed=90909):
"""Generates parameters for a MLP given a list of layer sizes."""
R = RandomState(seed=seed)
parameters = []
for i, o in zip(layer_sizes[:-1], layer_sizes[1:]):
W = param(R, i, o)
b = param(R, 1, o)
parameters.append((W, b))
return parameters
#########
# Model #
#########
# We generate a MLP model with some arbitrary number of layers and tanh
# activations.
@dataclass(frozen=True)
class Linear(ArithmeticData):
"""Linear layer."""
W: "Weights array"
b: "Biases vector"
def apply(self, input):
"""Apply the layer."""
return input @ self.W + self.b
@dataclass(frozen=True)
class Tanh(ArithmeticData):
"""Tanh layer."""
def apply(self, input):
"""Apply the layer."""
return numpy.tanh(input)
@dataclass(frozen=True)
class Sequential(ArithmeticData):
"""Sequential layer, applies all sub-layers in order."""
layers: "Tuple of layers"
def apply(self, x):
"""Apply the layer."""
for layer in self.layers:
x = layer.apply(x)
return x
@dataclass(frozen=True)
class VAE(ArithmeticData):
"""Sequential layer, applies all sub-layers in order."""
fc1: "layer fc1"
fc21: "layer fc21"
fc22: "layer fc22"
fc3: "layer fc3"
fc4: "layer fc4"
def encode(self, x):
h1 = pub.relu(self.fc1.apply(x))
return self.fc21.apply(h1), self.fc22.apply(h1)
def reparameterize(self, mu, logvar, rstate):
std = array_exp(0.5 * logvar)
eps, rstate = pub.uniform(rstate, (2, 20), -1.0, 1.0)
return mu + eps * std, rstate
def decode(self, z):
h3 = pub.relu(self.fc3.apply(z))
return pub.sigmoid(self.fc4.apply(h3))
def forward(self, x, rstate):
mu, logvar = self.encode(pub.reshape(x, (-1, 784)))
z, rstate = self.reparameterize(mu, logvar, rstate)
return self.decode(z), mu, logvar, rstate
params = (
mlp_parameters(*(784, 400))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(20, 400))[0],
mlp_parameters(*(400, 784))[0],
)
model = VAE(
Linear(params[0][0], params[0][1]),
Linear(params[1][0], params[1][1]),
Linear(params[2][0], params[2][1]),
Linear(params[3][0], params[3][1]),
Linear(params[4][0], params[4][1]),
)
model = to_device(model, backend, backend_options, broaden=False)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = pub.binary_cross_entropy(
recon_x, pub.reshape(x, (-1, 784)), reduction="sum"
)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * pub._sum(1 + logvar - array_pow(mu, 2) - array_exp(logvar))
return BCE + KLD
def cost(model, data, rstate):
recon_batch, mu, logvar, _rstate = model.forward(data, rstate)
loss = loss_function(recon_batch, data, mu, logvar)
return loss.item(), _rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step(model, data, lr, rstate):
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
(_cost, rstate), dmodel = value_and_grad(cost, "model")(
model, data, rstate, dout=(1, 1)
)
return _cost, model - lr * dmodel, rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_eval(model, data, rstate):
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
return cost(model, data, rstate)
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_init_seed():
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
return random_initialize(1)
lr = getattr(numpy, dtype)(0.01)
if __name__ == "__main__":
seed = 123
cuda = False
batch_size = 2
epochs = 1
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=True,
download=True,
transform=transforms.ToTensor(),
),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
rand_state = step_init_seed()
for _ in range(epochs):
costs = []
t0 = time.time()
for i, (data, _) in enumerate(train_loader):
print("i", i + 1, "/", len(train_loader))
_cost, model, rand_state = step(
model, data.reshape((batch_size, 784)).numpy(), lr, rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("../data", train=False, transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
costs = []
t0 = time.time()
for i, (data, _) in enumerate(test_loader):
_cost, rand_state = step_eval(
model, data.reshape((batch_size, 784)).numpy(), rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
| 26.542662 | 80 | 0.626077 |
f71fa3db4ff531443af2a92cd1b1a2d567ddaf8d | 188 | py | Python | lightningrun.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | lightningrun.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | lightningrun.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | import os
os.system("git clone https://github.com/justteen/BUZZ-USERBOT /root/userbot && mkdir /root/userbot/bin/ && cd /root/userbot/ && chmod +x /usr/local/bin/* && python3 -m userbot")
| 62.666667 | 177 | 0.702128 |
f71fcd8293089c972b431387d1197b53dd7b564d | 516 | py | Python | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | null | null | null | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | 2 | 2018-12-20T20:10:52.000Z | 2019-02-04T17:44:01.000Z | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | null | null | null | import numpy as np
def make_histograms(x, bins=40, interval=[1e-1, 1]):
intervals = np.linspace(interval[0], interval[1], bins)
flat_x = x.reshape((x.shape[0], -1))
hist_x = np.zeros((x.shape[0], bins))
for i in range(1, bins):
mask = flat_x <= intervals[i]
mask = np.logical_and(mask, flat_x > intervals[i-1])
hist_x[:, i] = mask.sum(1)
return hist_x
def make_net_count(x, **kwargs):
flat_x = x.reshape((x.shape[0], -1))
sum_x = flat_x.sum(1)
return sum_x
| 28.666667 | 60 | 0.604651 |
f71fd21c199e5a31cb8e95fea4d6ad447b4eb6cf | 2,082 | py | Python | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | null | null | null | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | 36 | 2017-06-23T20:29:22.000Z | 2020-03-18T15:04:27.000Z | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | 12 | 2017-06-21T18:10:57.000Z | 2021-11-01T19:13:32.000Z | # -*- coding: utf-8 -*-
import adsputils
import unittest
import os
import json
import time
from inspect import currentframe, getframeinfo
from adsputils.exceptions import UnicodeHandlerError
def _read_file(fpath):
with open(fpath, 'r') as fi:
return fi.read()
class TestInit(unittest.TestCase):
def test_logging(self):
logdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../logs'))
foo_log = logdir + '/foo.bar.log'
if os.path.exists(foo_log):
os.remove(foo_log)
logger = adsputils.setup_logging('foo.bar')
logger.warning('first')
frameinfo = getframeinfo(currentframe())
#print foo_log
self.assertTrue(os.path.exists(foo_log))
c = _read_file(foo_log)
j = json.loads(c)
self.assertEqual(j['message'], 'first')
self.assertTrue('hostname' in j)
# verify warning has filename and linenumber
self.assertEqual(os.path.basename(frameinfo.filename), j['filename'])
self.assertEqual(j['lineno'], frameinfo.lineno - 1)
time.sleep(0.01)
# now multiline message
logger.warning(u'second\nthird')
logger.warning('last')
c = _read_file(foo_log)
found = False
msecs = False
for x in c.strip().split('\n'):
j = json.loads(x)
self.assertTrue(j)
if j['message'] == u'second\nthird':
found = True
t = adsputils.get_date(j['asctime'])
if t.microsecond > 0:
msecs = True
self.assertTrue(found)
self.assertTrue(msecs)
def test_u2asc(self):
input1 = 'benìtez, n'
input2 = u'izzet, sakallı'
output1 = adsputils.u2asc(input1)
output2 = adsputils.u2asc(input2)
self.assertEqual(output1,'benitez, n')
self.assertEqual(output2,u'izzet, sakalli')
input3 = input2.encode('utf16')
self.assertRaises(UnicodeHandlerError, adsputils.u2asc, input3)
if __name__ == '__main__':
unittest.main()
| 28.135135 | 87 | 0.604707 |
f71fdbd179d815f56f9c409701685cd66a7005c3 | 23,154 | py | Python | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import hmac
import json
import logging
import os
import re
import struct
import tempfile
import time
import uuid
from base64 import urlsafe_b64encode
from binascii import unhexlify
import m3u8
from Crypto.Cipher import AES
from tqdm import tqdm
def is_channel(url):
url = re.findall('(slot)', url)
if url:
return True
return False
yuu_log = logging.getLogger('yuu.abematv')
class AbemaTVDownloader:
def __init__(self, url, session):
self.key = None
self.iv = None
self.url = url
self.session = session
self.merge = True
if os.name == "nt":
self.yuu_folder = os.path.join(os.getenv('LOCALAPPDATA'), 'yuu_data')
sffx = '\\'
else:
self.yuu_folder = os.path.join(os.getenv('HOME'), '.yuu_data')
sffx = '/'
if not os.path.isdir(self.yuu_folder):
os.mkdir(self.yuu_folder)
self.temporary_folder = tempfile.mkdtemp(dir=self.yuu_folder)
self.temporary_folder = self.temporary_folder + sffx
self._aes = None
def setup_decryptor(self):
self.iv = unhexlify(self.iv)
self._aes = AES.new(self.key, AES.MODE_CBC, IV=self.iv)
def download_chunk(self, files, key, iv):
if iv.startswith('0x'):
self.iv = iv[2:]
else:
self.iv = iv
self.key = key
self.downloaded_files = []
self.setup_decryptor() # Initialize a new decryptor
try:
with tqdm(total=len(files), desc='Downloading', ascii=True, unit='file') as pbar:
for tsf in files:
outputtemp = self.temporary_folder + os.path.basename(tsf)
if outputtemp.find('?tver') != -1:
outputtemp = outputtemp[:outputtemp.find('?tver')]
with open(outputtemp, 'wb') as outf:
try:
vid = self.session.get(tsf)
vid = self._aes.decrypt(vid.content)
outf.write(vid)
except Exception as err:
yuu_log.error('Problem occured\nreason: {}'.format(err))
return None
pbar.update()
self.downloaded_files.append(outputtemp)
except KeyboardInterrupt:
yuu_log.warn('User pressed CTRL+C, cleaning up...')
return None
return self.downloaded_files
class AbemaTV:
def __init__(self, url, session):
self.session = session
self.type = 'AbemaTV'
self.yuu_logger = logging.getLogger('yuu.abematv.AbemaTV')
self.url = url
self.m3u8_url = None
self.resolution = None
self.resolution_o = None
self.device_id = None
self.is_m3u8 = False
self.est_filesize = None # In MiB
self.resolution_data = {
"1080p": ["4000kb/s", "AAC 192kb/s 2ch"],
"720p": ["2000kb/s", "AAC 160kb/s 2ch"],
"480p": ["900kb/s", "AAC 128kb/s 2ch"],
"360p": ["550kb/s", "AAC 128kb/s 2ch"],
"240p": ["240kb/s", "AAC 64kb/s 1ch"],
"180p": ["120kb/s", "AAC 64kb/s 1ch"]
}
self.bitrate_calculation = {
"1080p": 5175,
"720p": 2373,
"480p": 1367,
"360p": 878,
"240p": 292,
"180p": 179
}
self.authorization_required = False
self.authorized = False # Ignore for now
#self.authorize = True # Ignore for now
self.resumable = True
self._STRTABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
self._HKEY = b"3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E"
self._KEYPARAMS = {
"osName": "android",
"osVersion": "6.0.1",
"osLand": "ja_JP",
"osTimezone": "Asia/Tokyo",
"appId": "tv.abema",
"appVersion": "3.27.1"
}
self._MEDIATOKEN_API = "https://api.abema.io/v1/media/token"
self._LICENSE_API = "https://license.abema.io/abematv-hls"
self._USERAPI = "https://api.abema.io/v1/users"
self._PROGRAMAPI = 'https://api.abema.io/v1/video/programs/'
self._CHANNELAPI = 'https://api.abema.io/v1/media/slots/'
self._SERIESAPI = "https://api.abema.io/v1/video/series/"
# Use Chrome UA
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})
def __repr__(self):
return '<yuu.AbemaTV: URL={}, Resolution={}, Device ID={}, m3u8 URL={}>'.format(self.url, self.resolution, self.device_id, self.m3u8_url)
def get_downloader(self):
"""
Return a :class: of the Downloader
"""
return AbemaTVDownloader(self.url, self.session)
def resume_prepare(self):
"""
Add support for resuming files, this function will prepare everything to start resuming download.
"""
return None
def authorize(self, username, password):
if not self.device_id:
self.yuu_logger.info('{}: Fetching temporary token'.format(self.type))
res, reas = self.get_token() # Abema needs authorization header before authenticating
if not res:
return res, reas
_ENDPOINT_MAIL = 'https://api.abema.io/v1/auth/user/email'
_ENDPOINT_OTP = 'https://api.abema.io/v1/auth/oneTimePassword'
mail_regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if re.search(mail_regex, username):
_ENDPOINT_USE = _ENDPOINT_MAIL
_USERNAME_METHOD = 'email'
else:
_ENDPOINT_USE = _ENDPOINT_OTP
_USERNAME_METHOD = 'userId'
auth_ = {
_USERNAME_METHOD: username,
"password": password
}
res = self.session.post(_ENDPOINT_USE, json=auth_)
if res.status_code > 299:
res_j = res.json()
self.yuu_logger.debug('Abema Response: {}'.format(res_j['message']))
return False, 'Wrong {} and password combination'.format(_USERNAME_METHOD)
res_j = res.json()
self.yuu_logger.debug('Authentication Token: {}'.format(res_j['token']))
self.session.headers.update({'Authorization': 'bearer ' + res_j['token']})
self.authorized = True
return True, 'Authorized'
def get_token(self):
def key_secret(devid):
SECRETKEY = (b"v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9B"
b"Rbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$"
b"k9cD=3TxwWe86!x#Zyhe")
deviceid = devid.encode("utf-8")
ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode("utf-8")
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(SECRETKEY)
tmp = h.digest()
for _ in range(time_struct.tm_mon):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
tmp = h.digest()
for _ in range(time_struct.tm_mday % 5):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
tmp = h.digest()
for _ in range(time_struct.tm_hour % 5): # utc hour
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
finalize = urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
self.yuu_logger.debug('Secret Key: {}'.format(finalize))
return finalize
if self.authorized: # Ignore this if already login
return True, 'Success'
deviceid = str(uuid.uuid4())
self.yuu_logger.debug('Generated Device UUID: {}'.format(deviceid))
json_data = {"deviceId": deviceid, "applicationKeySecret": key_secret(deviceid)}
self.yuu_logger.debug('Generated applicationKeySecret: {}'.format(json_data['applicationKeySecret']))
self.yuu_logger.debug('Sending json data')
res = self.session.post(self._USERAPI, json=json_data).json()
try:
self.yuu_logger.debug('Data sent, getting token')
token = res['token']
self.yuu_logger.debug('User token: {}'.format(token))
except:
return None, 'Failed to get user token.'
self.device_id = deviceid
self.session.headers.update({'Authorization': 'bearer ' + token})
return 'Success', 'Success'
def parse(self, resolution=None, check_only=False):
"""
Function to parse abema url
"""
res_list = [
'180p', '240p', '360p', '480p', '720p', '1080p', 'best', 'worst'
]
if resolution not in res_list:
if not check_only:
return None, 'Unknown resolution: {}. (Check it with `-R`)'.format(resolution)
if resolution == 'best':
resolution = '1080p'
self.resolution_o = 'best'
if resolution == 'worst':
resolution = '180p'
# https://abema.tv/video/title/26-55 (series/playlists)
# https://api.abema.io/v1/video/series/26-55
# https://api.abema.io/v1/video/series/26-55/programs?seriesVersion=1577436473958778090&seasonId=26-55_s1&offset=0&order=seq&limit=40
series = re.search(r"(?P<series>title)/(?P<video_id>.*[^-_])", self.url)
if series:
video_id = series.group(2)
self.yuu_logger.info('Series url format detected, fetching all links...')
self.yuu_logger.debug('Requesting data to Abema API.')
req = self.session.get(self._SERIESAPI + video_id)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
m3u8_url_list = []
output_list = []
jsdata = req.json()
to_be_requested = "{api}{vid}/programs?seriesVersion={sv}&seasonId={si}&offset=0&order={od}"
season_data = jsdata['seasons']
if not season_data:
season_data = [{'id': ''}] # Assume film or some shit
version = jsdata['version']
prog_order = jsdata['programOrder']
for ns, season in enumerate(season_data, 1):
self.yuu_logger.info('Processing season ' + str(ns))
self.yuu_logger.debug('Requesting data to Abema API.')
req_season = self.session.get(to_be_requested.format(api=self._SERIESAPI, vid=video_id, sv=version, si=season['id'], od=prog_order))
if req_season.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_season.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_season.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
season_jsdata = req_season.json()
self.yuu_logger.debug('Processing total of {ep} episode for season {se}'.format(ep=len(season_jsdata['programs']), se=ns))
for nep, episode in enumerate(season_jsdata['programs'], 1):
free_episode = False
if 'label' in episode:
if 'free' in episode['label']:
free_episode = True
elif 'freeEndAt' in episode:
free_episode = True
if 'episode' in episode:
try:
episode_name = episode['episode']['title']
if not episode_name:
episode_name = episode_name['title']['number']
except KeyError:
episode_name = episode_name['title']['number']
else:
episode_name = nep
if not free_episode and not self.authorized:
self.yuu_logger.warn('Skipping episode {} (Not authorized and premium video)'.format(episode_name))
continue
self.yuu_logger.info('Processing episode {}'.format(episode_name))
req_ep = self.session.get(self._PROGRAMAPI + episode['id'])
if req_ep.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_ep.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_ep.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
ep_json = req_ep.json()
title = ep_json['series']['title']
epnum = ep_json['episode']['title']
hls = ep_json['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
m3u8_url_list.append(m3u8_url)
output_list.append(output_name)
self.resolution = resolution
self.m3u8_url = m3u8_url_list
if not output_list:
err_msg = "All video are for premium only, please provide login details."
else:
err_msg = "Success"
return output_list, err_msg
if '.m3u8' in self.url[-5:]:
reg = re.compile(r'(program|slot)\/[\w+-]+')
self.url = re.search(reg, m3u8)[0]
self.is_m3u8 = True
ep_link = self.url[self.url.rfind('/')+1:]
self.yuu_logger.debug('Requesting data to Abema API')
if is_channel(self.url):
req = self.session.get(self._CHANNELAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
output_name = jsdata['slot']['title']
if 'playback' in jsdata['slot']:
hls = jsdata['slot']['playback']['hls']
else:
hls = jsdata['slot']['chasePlayback']['hls'] # Compat
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Title: {}'.format(output_name))
else:
req = self.session.get(self._PROGRAMAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
if jsdata['mediaStatus']:
if 'drm' in jsdata['mediaStatus']:
if jsdata['mediaStatus']['drm']:
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
title = jsdata['series']['title']
epnum = jsdata['episode']['title']
hls = jsdata['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
self.yuu_logger.debug('Episode number: {}'.format(epnum))
self.resolution = resolution
self.m3u8_url = m3u8_url
return output_name, 'Success'
def parse_m3u8(self, m3u8_url):
self.yuu_logger.debug('Requesting m3u8')
r = self.session.get(m3u8_url)
self.yuu_logger.debug('Data requested')
if 'timeshift forbidden' in r.text:
return None, None, None, 'This video can\'t be downloaded for now.'
if r.status_code == 403:
return None, None, None, 'This video is geo-locked for Japan only.'
self.yuu_logger.debug('Parsing m3u8')
x = m3u8.loads(r.text)
files = x.files[1:]
if not files[0]:
files = files[1:]
if 'tsda' in files[5]:
# Assume DRMed
return None, None, None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
resgex = re.findall(r'(\d*)(?:\/\w+.ts)', files[0])[0]
keys_data = x.keys[0]
iv = x.keys[0].iv
ticket = x.keys[0].uri[18:]
parsed_files = []
for f in files:
if f.startswith('/tsvpg') or f.startswith('/tspg'):
f = 'https://ds-vod-abematv.akamaized.net' + f
parsed_files.append(f)
if self.resolution[:-1] != resgex:
if not self.resolution_o:
self.yuu_logger.warn('Changing resolution, from {} to {}p'.format(self.resolution, resgex))
self.resolution = resgex + 'p'
self.yuu_logger.debug('Total files: {}'.format(len(files)))
self.yuu_logger.debug('IV: {}'.format(iv))
self.yuu_logger.debug('Ticket key: {}'.format(ticket))
n = 0.0
for seg in x.segments:
n += seg.duration
self.est_filesize = round((round(n) * self.bitrate_calculation[self.resolution]) / 1024 / 6, 2)
return parsed_files, iv[2:], ticket, 'Success'
def get_video_key(self, ticket):
self.yuu_logger.debug('Sending parameter to API')
restoken = self.session.get(self._MEDIATOKEN_API, params=self._KEYPARAMS).json()
mediatoken = restoken['token']
self.yuu_logger.debug('Media token: {}'.format(mediatoken))
self.yuu_logger.debug('Sending ticket and media token to License API')
rgl = self.session.post(self._LICENSE_API, params={"t": mediatoken}, json={"kv": "a", "lt": ticket})
if rgl.status_code == 403:
return None, 'Access to this video are not allowed\nProbably a premium video or geo-locked.'
gl = rgl.json()
cid = gl['cid']
k = gl['k']
self.yuu_logger.debug('CID: {}'.format(cid))
self.yuu_logger.debug('K: {}'.format(k))
self.yuu_logger.debug('Summing up data with STRTABLE')
res = sum([self._STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i)) for i in range(len(k))])
self.yuu_logger.debug('Result: {}'.format(res))
self.yuu_logger.debug('Intepreting data')
encvk = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)
self.yuu_logger.debug('Encoded video key: {}'.format(encvk))
self.yuu_logger.debug('Hashing data')
h = hmac.new(unhexlify(self._HKEY), (cid + self.device_id).encode("utf-8"), digestmod=hashlib.sha256)
enckey = h.digest()
self.yuu_logger.debug('Second Encoded video key: {}'.format(enckey))
self.yuu_logger.debug('Decrypting result')
aes = AES.new(enckey, AES.MODE_ECB)
vkey = aes.decrypt(encvk)
self.yuu_logger.debug('Decrypted, Result: {}'.format(vkey))
return vkey, 'Success getting video key'
def resolutions(self, m3u8_uri):
self.yuu_logger.debug('Requesting data to API')
m3u8_ = m3u8_uri[:m3u8_uri.rfind('/')]
base_url = m3u8_[:m3u8_.rfind('/')] + '/'
m3u8_1080 = m3u8_[:m3u8_.rfind('/')] + '/1080/playlist.m3u8'
m3u8_720 = m3u8_[:m3u8_.rfind('/')] + '/720/playlist.m3u8'
m3u8_480 = m3u8_[:m3u8_.rfind('/')] + '/480/playlist.m3u8'
m3u8_360 = m3u8_[:m3u8_.rfind('/')] + '/360/playlist.m3u8'
m3u8_240 = m3u8_[:m3u8_.rfind('/')] + '/240/playlist.m3u8'
m3u8_180 = m3u8_[:m3u8_.rfind('/')] + '/180/playlist.m3u8'
rr_all = self.session.get(base_url + 'playlist.m3u8')
if 'timeshift forbidden' in rr_all.text:
return None, 'This video can\'t be downloaded for now.'
r_all = m3u8.loads(rr_all.text)
play_res = []
for r_p in r_all.playlists:
temp = []
temp.append(r_p.stream_info.resolution)
temp.append(base_url + r_p.uri)
play_res.append(temp)
resgex = re.compile(r'(\d*)(?:\/\w+.ts)')
ava_reso = []
for resdata in play_res:
reswh, m3u8_uri = resdata
resw, resh = reswh
self.yuu_logger.debug('Validating {}p resolution'.format(resh))
rres = m3u8.loads(self.session.get(m3u8_uri).text)
m3f = rres.files[1:]
if not m3f:
return None, 'This video can\'t be downloaded for now.'
self.yuu_logger.debug('Sample link: ' + m3f[5])
if 'tsda' in files[5]:
# Assume DRMed
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
if str(resh) in re.findall(resgex, m3f[5]):
ava_reso.append(
[
'{h}p'.format(h=resh),
'{w}x{h}'.format(w=resw, h=resh)
]
)
if ava_reso:
reso = [r[0] for r in ava_reso]
self.yuu_logger.debug('Resolution list: {}'.format(', '.join(reso)))
return ava_reso, 'Success'
def check_output(self, output=None, output_name=None):
if output:
fn_, ext_ = os.path.splitext(output)
if ext_ != 'ts':
output = fn_ + '.ts'
else:
output = '{x} ({m} {r}).ts'.format(x=output_name, m=self.type, r=self.resolution)
return output
| 38.914286 | 170 | 0.556319 |
f71fed6c463f4fb9305f4215a3d3f237674e9c98 | 6,399 | py | Python | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 3 | 2020-12-29T04:07:58.000Z | 2022-01-11T14:47:16.000Z | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 1 | 2021-01-02T10:28:07.000Z | 2021-01-04T18:01:42.000Z | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 1 | 2022-01-09T12:55:41.000Z | 2022-01-09T12:55:41.000Z | #!/usr/bin/env python
# pylint: disable=W0201
import sys
import argparse
import yaml
import numpy as np
# torch
import torch
import torch.nn as nn
import torch.optim as optim
# torchlight
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
from .processor import Processor
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv1d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class REC_Processor(Processor):
"""
Processor for Skeleton-based Action Recgnition
"""
def load_model(self):
# print("load model")
self.model = self.io.load_model(self.arg.model,
**(self.arg.model_args))
self.model.apply(weights_init)
self.loss = nn.CrossEntropyLoss()
# self.loss = nn.BCEWithLogitsLoss()
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
def adjust_lr(self):
if self.arg.optimizer == 'SGD' and self.arg.step:
lr = self.arg.base_lr * (
0.1**np.sum(self.meta_info['epoch'] >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
else:
self.lr = self.arg.base_lr
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
def train(self):
self.model.train()
self.adjust_lr()
loader = self.data_loader['train']
loss_value = []
result_frag = []
label_frag = []
# print("train")
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# forward
output = self.model(data)
result_frag.extend(
output.data.cpu().numpy().argmax(axis=1))
label_frag.extend(label.data.cpu().numpy())
# print(output)
loss = self.loss(output, label)
# print(label)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# statistics
self.iter_info['loss'] = loss.data.item()
self.iter_info['lr'] = '{:.6f}'.format(self.lr)
loss_value.append(self.iter_info['loss'])
self.show_iter_info()
self.meta_info['iter'] += 1
ac = accuracy_score(label_frag, result_frag)
# print(result_frag)
# print(label_frag)
print("train acc: {}".format(ac))
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
# self.io.print_timer()
def test(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# inference
with torch.no_grad():
output = self.model(data)
result_frag.append(output.data.cpu().numpy())
# get loss
if evaluation:
loss = self.loss(output, label)
loss_value.append(loss.item())
label_frag.append(label.data.cpu().numpy())
self.result = np.concatenate(result_frag)
# print(self.result)
if evaluation:
self.label = np.concatenate(label_frag)
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
# show top-k accuracy
for k in self.arg.show_topk:
self.show_topk(k)
top = self.result.argmax(axis=1)
print(top)
print(self.label)
cm = confusion_matrix(self.label, top)
print(cm)
@staticmethod
def get_parser(add_help=False):
# parameter priority: command line > config > default
parent_parser = Processor.get_parser(add_help=False)
parser = argparse.ArgumentParser(
add_help=add_help,
parents=[parent_parser],
description='Spatial Temporal Graph Convolution Network')
# region arguments yapf: disable
# evaluation
parser.add_argument('--show_topk', type=int,
default=[1], nargs='+', help='which Top K accuracy will be shown')
# optim
parser.add_argument('--base_lr', type=float,
default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[], nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--optimizer', default='SGD',
help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool,
default=True, help='use nesterov or not')
parser.add_argument('--weight_decay', type=float,
default=0.0001, help='weight decay for optimizer')
# endregion yapf: enable
return parser
| 32.482234 | 94 | 0.556493 |
f72005233f11455f1e95662ff8e8514dc68a23af | 3,738 | py | Python | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | 1 | 2021-09-05T14:18:00.000Z | 2021-09-05T14:18:00.000Z | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | null | null | null | letsencrypt/configuration.py | meehow/letsencrypt | 64073b234a6b87a574d873599a8d4dbf11729d5c | [
"Apache-2.0"
] | null | null | null | """Let's Encrypt user-supplied configuration."""
import os
import urlparse
import zope.interface
from acme import challenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import interfaces
class NamespaceConfig(object):
"""Configuration wrapper around :class:`argparse.Namespace`.
For more documentation, including available attributes, please see
:class:`letsencrypt.interfaces.IConfig`. However, note that
the following attributes are dynamically resolved using
:attr:`~letsencrypt.interfaces.IConfig.work_dir` and relative
paths defined in :py:mod:`letsencrypt.constants`:
- `accounts_dir`
- `csr_dir`
- `in_progress_dir`
- `key_dir`
- `renewer_config_file`
- `temp_checkpoint_dir`
:ivar namespace: Namespace typically produced by
:meth:`argparse.ArgumentParser.parse_args`.
:type namespace: :class:`argparse.Namespace`
"""
zope.interface.implements(interfaces.IConfig)
def __init__(self, namespace):
self.namespace = namespace
if self.simple_http_port == self.dvsni_port:
raise errors.Error(
"Trying to run SimpleHTTP and DVSNI "
"on the same port ({0})".format(self.dvsni_port))
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def server_path(self):
"""File path based on ``server``."""
parsed = urlparse.urlparse(self.namespace.server)
return (parsed.netloc + parsed.path).replace('/', os.path.sep)
@property
def accounts_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.ACCOUNTS_DIR, self.server_path)
@property
def backup_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.BACKUP_DIR)
@property
def csr_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.CSR_DIR)
@property
def in_progress_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.work_dir, constants.IN_PROGRESS_DIR)
@property
def key_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.KEY_DIR)
@property
def temp_checkpoint_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.work_dir, constants.TEMP_CHECKPOINT_DIR)
@property
def simple_http_port(self): # pylint: disable=missing-docstring
if self.namespace.simple_http_port is not None:
return self.namespace.simple_http_port
else:
return challenges.SimpleHTTPResponse.PORT
class RenewerConfiguration(object):
"""Configuration wrapper for renewer."""
def __init__(self, namespace):
self.namespace = namespace
def __getattr__(self, name):
return getattr(self.namespace, name)
@property
def archive_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.ARCHIVE_DIR)
@property
def live_dir(self): # pylint: disable=missing-docstring
return os.path.join(self.namespace.config_dir, constants.LIVE_DIR)
@property
def renewal_configs_dir(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWAL_CONFIGS_DIR)
@property
def renewer_config_file(self): # pylint: disable=missing-docstring
return os.path.join(
self.namespace.config_dir, constants.RENEWER_CONFIG_FILENAME)
| 32.789474 | 80 | 0.697164 |
f720323103b02c71f2a9840e6439b99bbd9ea402 | 42,611 | py | Python | nidmresults/objects/inference.py | mih/nidmresults | 438f7cce6abc4a4379b629bd76f4d427891e033f | [
"MIT"
] | 1 | 2018-12-04T16:53:45.000Z | 2018-12-04T16:53:45.000Z | nidmresults/objects/inference.py | mih/nidmresults | 438f7cce6abc4a4379b629bd76f4d427891e033f | [
"MIT"
] | 2 | 2018-04-11T14:01:38.000Z | 2019-05-29T15:14:49.000Z | nidmresults/objects/inference.py | cmaumet/nidmresults | 438f7cce6abc4a4379b629bd76f4d427891e033f | [
"MIT"
] | null | null | null | """
Objects describing the Inference activity, its inputs and outputs as specified
in NIDM-Results.
Specification: http://nidm.nidash.org/specs/nidm-results.html
@author: Camille Maumet <c.m.j.maumet@warwick.ac.uk>
@copyright: University of Warwick 2013-2014
"""
from nidmresults.objects.constants import *
from nidmresults.objects.generic import *
import uuid
from math import erf, sqrt
import rdflib
from prov.model import Literal
from prov.constants import XSD_FLOAT
from prov.model import Identifier
class Inference(object):
"""
Object representing an Inference step: including an Inference activity, its
inputs and outputs.
"""
def __init__(
self, inference, height_thresh, extent_thresh,
peak_criteria, cluster_criteria, disp_mask, excursion_set,
clusters, search_space, software_id):
super(Inference, self).__init__()
self.excursion_set = excursion_set
self.inference_act = inference
self.height_thresh = height_thresh
self.extent_thresh = extent_thresh
self.clusters = clusters
self.software_id = software_id
self.peak_criteria = peak_criteria
self.cluster_criteria = cluster_criteria
self.disp_mask = disp_mask
self.search_space = search_space
class InferenceActivity(NIDMObject):
"""
Object representing an Inference activity.
"""
def __init__(self, oid=None, tail=None, label=None, contrast_name=None,
inference_type=None, partial_degree=None):
super(InferenceActivity, self).__init__(oid=oid)
if inference_type is None:
self.type = NIDM_INFERENCE
else:
self.type = inference_type
self.prov_type = PROV['Activity']
if tail is None:
tail = NIDM_ONE_TAILED_TEST
self.tail = tail
if label is None:
label = "Inference"
if contrast_name:
label += ": " + contrast_name
self.label = label
self.partial_degree = partial_degree
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_Inference: <http://purl.org/nidash/nidm#NIDM_0000049>
prefix nidm_ConjunctionInference: <http://purl.org/nidash/nidm#NIDM_0000011>
prefix nidm_hasAlternativeHypothesis: <http://purl.org/nidash/nidm#NIDM_000009\
7>
prefix spm_PartialConjunctionInference: <http://purl.org/nidash/spm#SPM_000000\
5>
prefix spm_PartialConjunctionDegree: <http://purl.org/nidash/spm#SPM_0000015>
SELECT DISTINCT * WHERE {
{
""" + oid_var + """ a nidm_Inference: .
} UNION {
""" + oid_var + """ a nidm_ConjunctionInference: .
} UNION {
""" + oid_var + """ a spm_PartialConjunctionInference: .
}
""" + oid_var + """ rdfs:label ?label ;
a ?inference_type ;
nidm_hasAlternativeHypothesis: ?tail .
OPTIONAL {""" + oid_var + """ spm_PartialConjunctionDegree: ?partial_degree .} .
FILTER ( ?inference_type NOT IN (prov:Activity))
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# In FSL we have a single thresholding (extent, height) applied to all
# contrasts
# FIXME: Deal with two-tailed inference?
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_ALTERNATIVE_HYPOTHESIS, self.tail))
if self.partial_degree is not None:
atts += (
(SPM_PARTIAL_CONJUNCTION_DEGREE, self.partial_degree),)
self.add_attributes(atts)
class ExcursionSet(NIDMObject):
"""
Object representing a ExcursionSet entity.
"""
def __init__(self, location, coord_space, visu=None,
oid=None, fmt=None, label=None,
sha=None, filename=None, inference=None, suffix='',
clust_map=None, mip=None, num_clusters=None, p_value=None):
super(ExcursionSet, self).__init__(oid)
if not filename:
filename = 'ExcursionSet' + suffix + '.nii.gz'
else:
filename = location
self.filename = filename
self.file = NIDMFile(self.id, location, filename, sha)
self.type = NIDM_EXCURSION_SET_MAP
self.prov_type = PROV['Entity']
self.visu = visu
if label is None:
label = "Excursion Set Map"
self.label = label
self.coord_space = coord_space
self.clust_map = clust_map
self.mip = mip
# FIXME Not used for export yet (only for reading)
self.inference = inference
self.num_clusters = num_clusters
self.p_value = p_value
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ExcursionSetMap: <http://purl.org/nidash/nidm#NIDM_0000025>
prefix nidm_hasClusterLabelsMap: <http://purl.org/nidash/nidm#NIDM_0000098>
prefix nidm_hasMaximumIntensityProjection: <http://purl.org/nidash/nidm#NIDM_0\
000138>
prefix nidm_inCoordinateSpace: <http://purl.org/nidash/nidm#NIDM_0000104>
prefix nidm_numberOfSupraThresholdClusters: <http://purl.org/nidash/nidm#NIDM_\
0000111>
prefix nidm_pValue: <http://purl.org/nidash/nidm#NIDM_0000114>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ExcursionSetMap: ;
prov:atLocation ?location ;
rdfs:label ?label ;
dct:format ?fmt ;
nfo:fileName ?filename ;
crypto:sha512 ?sha .
OPTIONAL {""" + oid_var + """ nidm_numberOfSupraThresholdClusters: ?num_clusters .} .
OPTIONAL {""" + oid_var + """ nidm_pValue: ?p_value .} .
}
ORDER BY ?peak_label
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Excursion set" entity
self.add_attributes((
(PROV['type'], self.type),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label),
))
if self.visu is not None:
self.add_attributes((
(DC['description'], self.visu.id),
))
if self.clust_map is not None:
self.add_attributes((
(NIDM_HAS_CLUSTER_LABELS_MAP, self.clust_map.id),
))
if self.mip is not None:
self.add_attributes((
(NIDM_HAS_MAXIMUM_INTENSITY_PROJECTION, self.mip.id),
))
if self.num_clusters is not None:
self.add_attributes((
(NIDM_NUMBER_OF_CLUSTERS, self.num_clusters),
))
if self.p_value is not None:
self.add_attributes((
(NIDM_P_VALUE, self.p_value),
))
class ClusterLabelsMap(NIDMObject):
"""
Object representing a ClusterLabelsMap entity.
"""
def __init__(self, location, coord_space,
oid=None, fmt=None, label=None,
sha=None, filename=None, suffix='', temporary=False):
super(ClusterLabelsMap, self).__init__(oid)
if not filename:
filename = 'ClusterLabels' + suffix + '.nii.gz'
self.filename = filename
self.file = NIDMFile(self.id, location, filename, sha,
temporary=temporary)
self.type = NIDM_CLUSTER_LABELS_MAP
self.prov_type = PROV['Entity']
if label is None:
label = "Cluster Labels Map"
self.label = label
self.coord_space = coord_space
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterLabelsMap: <http://purl.org/nidash/nidm#NIDM_0000008>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterLabelsMap: ;
nfo:fileName ?filename ;
crypto:sha512 ?sha ;
prov:atLocation ?location ;
dct:format ?fmt .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Cluster Labels Map" entity
self.add_attributes((
(PROV['type'], self.type),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label)
))
class HeightThreshold(NIDMObject):
"""
Object representing a HeightThreshold entity.
"""
def __init__(self, stat_threshold=None, p_corr_threshold=None,
p_uncorr_threshold=None, threshold_type=None, value=None,
label=None, version={'num': '1.3.0'}, oid=None,
equiv_thresh=None):
super(HeightThreshold, self).__init__(oid=oid)
if not stat_threshold and not p_corr_threshold and \
not p_uncorr_threshold and not value:
raise Exception('No threshold defined')
if isinstance(threshold_type, str):
threshold_type = Identifier(threshold_type)
thresh_desc = ""
if stat_threshold is not None:
thresh_desc = "Z>" + str(stat_threshold)
if version['num'] == "1.0.0":
user_threshold_type = "Z-Statistic"
else:
threshold_type = OBO_STATISTIC
value = stat_threshold
elif p_uncorr_threshold is not None:
thresh_desc = "p<" + \
str(p_uncorr_threshold) + " (uncorrected)"
if version['num'] == "1.0.0":
user_threshold_type = "p-value uncorrected"
else:
threshold_type = NIDM_P_VALUE_UNCORRECTED_CLASS
value = p_uncorr_threshold
elif p_corr_threshold is not None:
thresh_desc = "p<" + str(p_corr_threshold) + " (FWE)"
if version['num'] == "1.0.0":
user_threshold_type = "p-value FWE"
else:
threshold_type = OBO_P_VALUE_FWER
value = p_corr_threshold
if version['num'] == "1.0.0":
self.user_threshold_type = user_threshold_type
self.p_uncorr_threshold = p_uncorr_threshold
self.p_corr_threshold = p_corr_threshold
self.stat_threshold = stat_threshold
else:
self.value = value
self.threshold_type = threshold_type
if not label:
self.label = "Height Threshold: " + thresh_desc
else:
self.label = label
self.type = NIDM_HEIGHT_THRESHOLD
self.prov_type = PROV['Entity']
self.equiv_thresh = equiv_thresh
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_HeightThreshold: <http://purl.org/nidash/nidm#NIDM_0000034>
prefix nidm_hasAlternativeHypothesis: <http://purl.org/nidash/nidm#NIDM_000009\
7>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_HeightThreshold: ;
a ?threshold_type ;
rdfs:label ?label ;
prov:value ?value .
FILTER ( ?threshold_type NOT IN (prov:Entity, nidm_HeightThreshold:) )
}
"""
return query
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = [
(PROV['type'], self.type),
(PROV['label'], self.label),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(PROV['value'], self.stat_threshold),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr_threshold),
(NIDM_P_VALUE_FWER, self.p_corr_threshold)
]
else:
atts += [
(PROV['type'], self.threshold_type),
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None])
class ExtentThreshold(NIDMObject):
"""
Object representing an ExtentThreshold entity.
"""
def __init__(self, extent=None, p_corr=None, p_uncorr=None,
extent_rsl=None, label=None, version={'num': '1.3.0'},
value=None, oid=None, equiv_thresh=None, threshold_type=None):
super(ExtentThreshold, self).__init__(oid=oid)
self.type = NIDM_EXTENT_THRESHOLD
self.prov_type = PROV['Entity']
thresh_desc = ""
if threshold_type is not None:
self.threshold_type = threshold_type
else:
if extent is not None:
thresh_desc = "k>" + str(extent)
# NIDM-Results 1.0.0
user_threshold_type = "Cluster-size in voxels"
# NIDM-Results > 1.0.0
threshold_type = OBO_STATISTIC
elif p_uncorr is not None:
thresh_desc = "p<" + str(self.p_uncorr) + " (uncorrected)"
# NIDM-Results 1.0.0
user_threshold_type = "p-value uncorrected"
# NIDM-Results > 1.0.0
threshold_type = NIDM_P_VALUE_UNCORRECTED_CLASS
value = p_uncorr
elif p_corr is not None:
thresh_desc = "p<" + str(p_corr) + " (FWE)"
# NIDM-Results 1.0.0
user_threshold_type = "p-value FWE"
# NIDM-Results > 1.0.0
threshold_type = OBO_P_VALUE_FWER
value = p_corr
else:
thresh_desc = "k>=0"
extent = 0
if version['num'] == "1.0.0":
p_uncorr = 1.0
p_corr = 1.0
user_threshold_type = None
else:
threshold_type = OBO_STATISTIC
self.threshold_type = threshold_type
self.value = value
if version['num'] == "1.0.0":
self.user_threshold_type = user_threshold_type
self.p_uncorr = p_uncorr
self.p_corr = p_corr
else:
self.threshold_type = threshold_type
self.extent = extent
self.extent_rsl = extent_rsl
if label is None:
self.label = "Extent Threshold: " + thresh_desc
else:
self.label = label
self.equiv_thresh = equiv_thresh
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ExtentThreshold: <http://purl.org/nidash/nidm#NIDM_0000026>
prefix nidm_clusterSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000084>
prefix nidm_clusterSizeInResels: <http://purl.org/nidash/nidm#NIDM_0000156>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ExtentThreshold: ;
a ?threshold_type ;
rdfs:label ?label .
OPTIONAL {""" + oid_var + """ prov:value ?value .} .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInVoxels: ?extent .} .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInResels: ?extent_rsl .} .
FILTER ( ?threshold_type NOT IN (prov:Entity, nidm_ExtentThreshold:) )
}
"""
return query
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = [
(PROV['type'], self.type),
]
atts += [
(PROV['label'], self.label)
]
if self.extent_rsl is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_RESELS, self.extent_rsl),
]
if self.extent is not None:
atts += [
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.extent),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr),
(NIDM_P_VALUE_FWER, self.p_corr)
]
else:
atts += [
(PROV['type'], self.threshold_type)
]
if self.value is not None:
atts += [
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None])
class Cluster(NIDMObject):
"""
Object representing a Cluster entity.
"""
def __init__(self, cluster_num, size, pFWER, peaks,
x=None, y=None, z=None, x_std=None, y_std=None, z_std=None,
suffix='', clust_size_resels=None, pFDR=None, punc=None,
label=None, oid=None, cog=None):
super(Cluster, self).__init__(oid=oid)
self.num = cluster_num
if cog is not None:
self.cog = cog
else:
if x and y and z:
self.cog = CenterOfGravity(
cluster_num, x=x, y=y, z=z, x_std=x_std, y_std=y_std,
z_std=z_std)
else:
self.cog = None
self.peaks = peaks
self.size = size
self.pFWER = pFWER
self.type = NIDM_SIGNIFICANT_CLUSTER
self.prov_type = PROV['Entity']
self.punc = punc
self.pFDR = pFDR
if not label:
cluster_naming = "Supra-Threshold Cluster"
self.label = "%s %04d" % (cluster_naming, self.num)
else:
self.label = label
self.clust_size_resels = clust_size_resels
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_SupraThresholdCluster: <http://purl.org/nidash/nidm#NIDM_0000070>
prefix nidm_clusterSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000084>
prefix nidm_clusterLabelId: <http://purl.org/nidash/nidm#NIDM_0000082>
prefix nidm_clusterSizeInResels: <http://purl.org/nidash/nidm#NIDM_0000156>
prefix nidm_pValueUncorrected: <http://purl.org/nidash/nidm#NIDM_0000116>
prefix nidm_pValueFWER: <http://purl.org/nidash/nidm#NIDM_0000115>
prefix nidm_qValueFDR: <http://purl.org/nidash/nidm#NIDM_0000119>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_SupraThresholdCluster: ;
rdfs:label ?label ;
nidm_clusterSizeInVoxels: ?size ;
nidm_clusterLabelId: ?cluster_num .
OPTIONAL {""" + oid_var + """ nidm_clusterSizeInResels: ?clust_size_resels .} .
OPTIONAL {""" + oid_var + """ nidm_pValueUncorrected: ?punc .} .
OPTIONAL {""" + oid_var + """ nidm_pValueFWER: ?pFWER .} .
OPTIONAL {""" + oid_var + """ nidm_qValueFDR: ?pFDR .} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
if nidm_version['num'] in ["1.0.0", "1.1.0"]:
self.label = self.label.replace("Supra-Threshold", "Significant")
# FIXME deal with multiple contrasts
atts = (
(PROV['type'], NIDM_SIGNIFICANT_CLUSTER),
(PROV['label'], self.label),
(NIDM_CLUSTER_LABEL_ID, self.num),
(NIDM_CLUSTER_SIZE_IN_VOXELS, self.size)
)
if self.clust_size_resels is not None:
atts = atts + (
(NIDM_CLUSTER_SIZE_IN_RESELS, self.clust_size_resels),
)
if self.punc is not None:
atts = atts + (
(NIDM_P_VALUE_UNCORRECTED,
Literal(self.punc, datatype=XSD_FLOAT)),
)
if self.pFDR is not None:
atts = atts + (
(NIDM_Q_VALUE_FDR, Literal(self.pFDR, datatype=XSD_FLOAT)),
)
if self.pFWER is not None:
atts = atts + (
(NIDM_P_VALUE_FWER, Literal(self.pFWER, datatype=XSD_FLOAT)),
)
self.add_attributes(atts)
class DisplayMaskMap(NIDMObject):
"""
Object representing a DisplayMaskMap entity.
"""
def __init__(self, contrast_num, mask_file, mask_num, coord_space,
sha=None, filename=None, fmt=None, label=None, oid=None,
derfrom_id=None, derfrom_filename=None, derfrom_fmt=None,
derfrom_sha=None, isderfrommap=False):
super(DisplayMaskMap, self).__init__(oid=oid)
if not filename:
filename = 'DisplayMask' + str(mask_num) + '.nii.gz'
self.file = NIDMFile(self.id, mask_file, filename,
sha=sha, fmt=fmt)
self.coord_space = coord_space
self.type = NIDM_DISPLAY_MASK_MAP
self.prov_type = PROV['Entity']
if not label:
self.label = "Display Mask Map " + str(mask_num)
else:
self.label = label
if derfrom_id is not None:
self.derfrom = DisplayMaskMap(
None, None, None,
coord_space=None, oid=derfrom_id,
filename=derfrom_filename, sha=derfrom_sha,
fmt=derfrom_fmt,
isderfrommap=True)
else:
self.derfrom = None
self.isderfrommap = isderfrommap
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_DisplayMaskMap: <http://purl.org/nidash/nidm#NIDM_0000020>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_DisplayMaskMap: ;
rdfs:label ?label ;
nfo:fileName ?filename ;
crypto:sha512 ?sha ;
prov:atLocation ?mask_file ;
dct:format ?fmt .
OPTIONAL {""" + oid_var + """ prov:wasDerivedFrom ?derfrom_id .
?derfrom_id a nidm_DisplayMaskMap: ;
nfo:fileName ?derfrom_filename ;
dct:format ?derfrom_fmt ;
crypto:sha512 ?derfrom_sha .
} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
atts = (
(PROV['type'], self.type),
)
if not self.isderfrommap:
atts = atts + (
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(PROV['label'], self.label))
self.add_attributes(atts)
class PeakCriteria(NIDMObject):
"""
Object representing a PeakCriteria entity.
"""
def __init__(self, contrast_num, peak_dist, num_peak=None, label=None,
oid=None):
super(PeakCriteria, self).__init__(oid=oid)
self.num_peak = num_peak
self.peak_dist = peak_dist
self.type = NIDM_PEAK_DEFINITION_CRITERIA
self.prov_type = PROV['Entity']
if not label:
self.label = "Peak Definition Criteria"
else:
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_PeakDefinitionCriteria: <http://purl.org/nidash/nidm#NIDM_0000063>
prefix nidm_minDistanceBetweenPeaks: <http://purl.org/nidash/nidm#NIDM_0000109>
prefix nidm_maxNumberOfPeaksPerCluster: <http://purl.org/nidash/nidm#NIDM_0000\
108>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_PeakDefinitionCriteria: ;
rdfs:label ?label ;
nidm_minDistanceBetweenPeaks: ?peak_dist .
OPTIONAL { """ + oid_var + """ nidm_maxNumberOfPeaksPerCluster: ?num_peak .} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
num_peak = ()
if self.num_peak:
num_peak = ((NIDM_MAX_NUMBER_OF_PEAKS_PER_CLUSTER, self.num_peak),)
# Create "Peak definition criteria" entity
self.add_attributes((
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_MIN_DISTANCE_BETWEEN_PEAKS, self.peak_dist)
) + num_peak)
class ClusterCriteria(NIDMObject):
"""
Object representing a ClusterCriteria entity.
"""
def __init__(self, contrast_num, connectivity, label=None, oid=None):
super(ClusterCriteria, self).__init__(oid=oid)
self.connectivity = connectivity
self.type = NIDM_CLUSTER_DEFINITION_CRITERIA
self.prov_type = PROV['Entity']
if not label:
self.label = ("Cluster Connectivity Criterion: " +
str(self.connectivity))
else:
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterDefinitionCriteria: <http://purl.org/nidash/nidm#NIDM_00000\
07>
prefix nidm_hasConnectivityCriterion: <http://purl.org/nidash/nidm#NIDM_000009\
9>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterDefinitionCriteria: ;
rdfs:label ?label ;
nidm_hasConnectivityCriterion: ?connectivity .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# Create "Cluster definition criteria" entity
if isinstance(self.connectivity, int):
if self.connectivity == 6:
self.connectivity = NIDM_VOXEL6CONNECTED
elif self.connectivity == 18:
self.connectivity = NIDM_VOXEL18CONNECTED
elif self.connectivity == 26:
self.connectivity = NIDM_VOXEL26CONNECTED
# FIXME if connectivity is missing
if self.connectivity is not None:
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_CONNECTIVITY_CRITERION, self.connectivity))
else:
atts = (
(PROV['type'], NIDM_CLUSTER_DEFINITION_CRITERIA),
(PROV['label'], label))
self.add_attributes(atts)
class CenterOfGravity(NIDMObject):
"""
Object representing a CenterOfGravity entity.
"""
def __init__(self, cluster_num, x=None, y=None, z=None, x_std=None,
y_std=None, z_std=None, oid=None, coord_vector=None,
coord_vector_std=None, label=None, coord_id=None):
# Note: coord_id argument is only here for compatibility
# with the query outputs
super(CenterOfGravity, self).__init__(oid=oid)
self.cluster_num = cluster_num
self.coordinate = Coordinate("%04d" % cluster_num, x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std,
coord_vector_std=coord_vector_std,
coord_vector=coord_vector, oid=coord_id)
self.type = NIDM_CLUSTER_CENTER_OF_GRAVITY
self.prov_type = PROV['Entity']
if label is None:
label = "Center of gravity " + str(self.cluster_num)
self.label = label
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_ClusterCenterOfGravity: <http://purl.org/nidash/nidm#NIDM_0000140>
prefix nidm_coordinateVector: <http://purl.org/nidash/nidm#NIDM_0000086>
prefix nidm_coordinateVectorInVoxels: <http://purl.org/nidash/nidm#NIDM_000013\
9>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_ClusterCenterOfGravity: ;
rdfs:label ?label ;
prov:atLocation ?coord_id .
?coord_id a nidm_Coordinate: ;
nidm_coordinateVector: ?coord_vector_std .
OPTIONAL { ?coord_id nidm_coordinateVectorInVoxels: ?coord_vector .} .
}
"""
return query
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
self.add_attributes((
(PROV['type'], self.type),
(PROV['label'], self.label),
(PROV['location'], self.coordinate.id)))
class SearchSpace(NIDMObject):
"""
Object representing a SearchSpace entity.
"""
def __init__(self, search_space_file, vol_in_voxels, vol_in_units,
vol_in_resels, resel_size_in_voxels,
random_field_stationarity, noise_fwhm_in_voxels,
noise_fwhm_in_units, coord_space,
expected_num_voxels=None, expected_num_clusters=None,
height_critical_fwe05=None, height_critical_fdr05=None,
extent_critical_fwe05=None, extent_critical_fdr05=None,
search_vol_geom=None, noise_roughness=None,
filename=None, sha=None, fmt=None,
label=None, oid=None):
super(SearchSpace, self).__init__(oid=oid)
if not filename:
filename = 'SearchSpaceMask.nii.gz'
self.file = NIDMFile(self.id, search_space_file, filename,
sha=sha, fmt=fmt)
self.coord_space = coord_space
self.resel_size_in_voxels = resel_size_in_voxels
self.search_volume_in_voxels = vol_in_voxels
self.search_volume_in_units = vol_in_units
self.search_volume_in_resels = vol_in_resels
self.rf_stationarity = random_field_stationarity
self.noise_fwhm_in_voxels = noise_fwhm_in_voxels
self.noise_fwhm_in_units = noise_fwhm_in_units
self.type = NIDM_SEARCH_SPACE_MASK_MAP
self.prov_type = PROV['Entity']
self.label = "Search Space Mask Map"
self.expected_num_voxels = expected_num_voxels
self.expected_num_clusters = expected_num_clusters
self.height_critical_fwe05 = height_critical_fwe05
self.height_critical_fdr05 = height_critical_fdr05
self.extent_critical_fwe05 = extent_critical_fwe05
self.extent_critical_fdr05 = extent_critical_fdr05
self.search_vol_geom = search_vol_geom
self.noise_roughness = noise_roughness
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_SearchSpaceMaskMap: <http://purl.org/nidash/nidm#NIDM_0000068>
prefix nidm_expectedNumberOfVoxelsPerCluster: <http://purl.org/nidash/nidm#NID\
M_0000143>
prefix nidm_expectedNumberOfClusters: <http://purl.org/nidash/nidm#NIDM_000014\
1>
prefix nidm_heightCriticalThresholdFWE05: <http://purl.org/nidash/nidm#NIDM_00\
00147>
prefix nidm_heightCriticalThresholdFDR05: <http://purl.org/nidash/nidm#NIDM_00\
00146>
prefix nidm_searchVolumeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000121>
prefix nidm_searchVolumeInUnits: <http://purl.org/nidash/nidm#NIDM_0000136>
prefix nidm_searchVolumeInResels: <http://purl.org/nidash/nidm#NIDM_0000149>
prefix nidm_reselSizeInVoxels: <http://purl.org/nidash/nidm#NIDM_0000148>
prefix nidm_noiseFWHMInVoxels: <http://purl.org/nidash/nidm#NIDM_0000159>
prefix nidm_noiseFWHMInUnits: <http://purl.org/nidash/nidm#NIDM_0000157>
prefix nidm_randomFieldStationarity: <http://purl.org/nidash/nidm#NIDM_0000120>
prefix spm_smallestSignificantClusterSizeInVoxelsFWE05: <http://purl.org/nidas\
h/spm#SPM_0000014>
prefix spm_smallestSignificantClusterSizeInVoxelsFDR05: <http://purl.org/nidas\
h/spm#SPM_0000013>
prefix spm_searchVolumeReselsGeometry: <http://purl.org/nidash/spm#SPM_0000010>
prefix nidm_noiseRoughnessInVoxels: <http://purl.org/nidash/nidm#NIDM_0000145>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_SearchSpaceMaskMap: ;
rdfs:label ?label ;
nidm_searchVolumeInVoxels: ?vol_in_voxels ;
nidm_searchVolumeInUnits: ?vol_in_units ;
nidm_searchVolumeInResels: ?vol_in_resels ;
nidm_reselSizeInVoxels: ?resel_size_in_voxels ;
nidm_reselSizeInVoxels: ?resel_size_in_voxels ;
nidm_noiseFWHMInVoxels: ?noise_fwhm_in_voxels ;
nidm_noiseFWHMInUnits: ?noise_fwhm_in_units ;
nidm_randomFieldStationarity: ?random_field_stationarity ;
prov:atLocation ?search_space_file ;
dct:format ?fmt ;
nfo:fileName ?filename ;
crypto:sha512 ?sha .
OPTIONAL {""" + oid_var + """ nidm_expectedNumberOfVoxelsPerCluster: ?expected_num_voxels } .
OPTIONAL {""" + oid_var + """ nidm_expectedNumberOfClusters: ?expected_num_clusters } .
OPTIONAL {""" + oid_var + """ nidm_heightCriticalThresholdFWE05: ?height_critical_fwe05 } .
OPTIONAL {""" + oid_var + """ nidm_heightCriticalThresholdFDR05: ?height_critical_fdr05 } .
OPTIONAL {""" + oid_var + """ spm_smallestSignificantClusterSizeInVoxelsFWE05: ?extent_critical_fwe05 } .
OPTIONAL {""" + oid_var + """ spm_smallestSignificantClusterSizeInVoxelsFDR05: ?extent_critical_fdr05 } .
OPTIONAL {""" + oid_var + """ spm_searchVolumeReselsGeometry: ?search_vol_geom } .
OPTIONAL {""" + oid_var + """ nidm_noiseRoughnessInVoxels: ?noise_roughness } .
}
"""
return query
# Generate prov for search space entity generated by the inference activity
def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = (
(PROV['label'], self.label),
(PROV['type'], NIDM_SEARCH_SPACE_MASK_MAP),
(NIDM_RANDOM_FIELD_STATIONARITY, self.rf_stationarity),
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
(NIDM_SEARCH_VOLUME_IN_VOXELS, self.search_volume_in_voxels),
(NIDM_SEARCH_VOLUME_IN_UNITS, self.search_volume_in_units),
(NIDM_SEARCH_VOLUME_IN_RESELS, self.search_volume_in_resels),
(NIDM_RESEL_SIZE_IN_VOXELS, self.resel_size_in_voxels))
# Noise FWHM was introduced in NIDM-Results 1.1.0
if self.noise_fwhm_in_voxels is not None:
if (version['major'] > 1) or \
(version['major'] >= 1 and
(version['minor'] > 0 or version['revision'] > 0)):
atts = atts + (
(NIDM_NOISE_FWHM_IN_VOXELS, self.noise_fwhm_in_voxels),
(NIDM_NOISE_FWHM_IN_UNITS, self.noise_fwhm_in_units))
if self.expected_num_voxels is not None:
atts = atts + ((NIDM_EXPECTED_NUMBER_OF_VOXELS_PER_CLUSTER,
self.expected_num_voxels),)
if self.expected_num_clusters is not None:
atts = atts + ((NIDM_EXPECTED_NUMBER_OF_CLUSTERS,
self.expected_num_clusters),)
if self.height_critical_fwe05 is not None:
atts = atts + ((NIDM_HEIGHT_CRITICAL_THRESHOLD_FWE_05,
self.height_critical_fwe05),)
if self.height_critical_fdr05 is not None:
atts = atts + ((NIDM_HEIGHT_CRITICAL_THRESHOLD_FDR_05,
self.height_critical_fdr05),)
if self.extent_critical_fwe05 is not None:
atts = atts + ((
SPM_SMALLEST_SIGNIFICANT_CLUSTER_SIZE_IN_VOXELS_FWE05,
self.extent_critical_fwe05),)
if self.extent_critical_fdr05 is not None:
atts = atts + ((
SPM_SMALLEST_SIGNIFICANT_CLUSTER_SIZE_IN_VOXELS_FDR05,
self.extent_critical_fdr05),)
if self.search_vol_geom is not None:
atts = atts + ((SPM_SEARCH_VOLUME_RESELS_GEOMETRY,
self.search_vol_geom),)
if self.noise_roughness:
atts = atts + ((NIDM_NOISE_ROUGHNESS_IN_VOXELS,
self.noise_roughness),)
# Create "Search Space Mask map" entity
self.add_attributes(atts)
class Coordinate(NIDMObject):
"""
Object representing a Coordinate entity.
"""
def __init__(self, label_id, coord_vector=None, coord_vector_std=None,
x=None, y=None, z=None, x_std=None, y_std=None, z_std=None,
label=None, oid=None):
super(Coordinate, self).__init__(oid=oid)
self.label_id = label_id
if x is not None and y is not None and z is not None:
self.coord_vector = [x, y, z]
else:
if coord_vector and not type(coord_vector) is list:
coord_vector = json.loads(coord_vector)
self.coord_vector = coord_vector
if x_std is not None and y_std is not None and z_std is not None:
self.coord_vector_std = [x_std, y_std, z_std]
else:
if coord_vector_std and not type(coord_vector_std) is list:
coord_vector_std = json.loads(coord_vector_std)
self.coord_vector_std = coord_vector_std
self.type = NIDM_COORDINATE
self.prov_type = PROV['Entity']
if label is not None:
self.label = label
else:
self.label = "Coordinate " + self.label_id
def __str__(self):
return '%s\t%s' % (self.label, self.coord_vector)
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# We can not have this as a dictionnary because we want to keep the
# duplicate prov:type attribute
atts = ( # (PROV['type'],PROV['Location']),
(PROV['type'], NIDM_COORDINATE),
(PROV['type'], PROV['Location']),
(PROV['label'], self.label)
)
if self.coord_vector is not None:
atts = atts +\
((NIDM_COORDINATE_VECTOR_IN_VOXELS,
json.dumps(self.coord_vector)),)
# FSL unnormalised subject-level analyses do not provide coordinates in
# voxels
if self.coord_vector_std is not None:
atts = atts +\
((NIDM_COORDINATE_VECTOR, json.dumps(self.coord_vector_std)),)
self.add_attributes(atts)
class Peak(NIDMObject):
"""
Object representing a Peak entity.
"""
def __init__(self, equiv_z, p_unc=None, p_fwer=None, label=None,
coord_label=None, exc_set_id=None, oid=None, suffix='',
p_fdr=None, value=None, coord_id=None, *args, **kwargs):
super(Peak, self).__init__(oid)
# FIXME: Currently assumes less than 10 clusters per contrast
# cluster_num = cluster_index
# FIXME: Currently assumes less than 100 peaks
if oid is not None:
self.label = label
peak_unique_id = label[5:]
peak_index = peak_unique_id
# cluster_index, peak_index = peak_unique_id.split("_")
else:
peak_unique_id = suffix
self.label = "Peak " + peak_unique_id
self.equiv_z = equiv_z
self.p_unc = p_unc
self.p_fwer = p_fwer
self.coordinate = Coordinate(
str(peak_unique_id), label=coord_label, oid=coord_id, **kwargs)
self.type = NIDM_PEAK
self.prov_type = PROV['Entity']
# self.cluster = cluster_id
self.exc_set_id = exc_set_id
self.value = value
self.p_fdr = p_fdr
@classmethod
def get_query(klass, oid=None):
if oid is None:
oid_var = "?oid"
else:
oid_var = "<" + str(oid) + ">"
query = """
prefix nidm_Peak: <http://purl.org/nidash/nidm#NIDM_0000062>
prefix nidm_pValueUncorrected: <http://purl.org/nidash/nidm#NIDM_0000116>
prefix nidm_equivalentZStatistic: <http://purl.org/nidash/nidm#NIDM_0000092>
prefix nidm_pValueFWER: <http://purl.org/nidash/nidm#NIDM_0000115>
prefix nidm_qValueFDR: <http://purl.org/nidash/nidm#NIDM_0000119>
prefix nidm_coordinateVectorInVoxels: <http://purl.org/nidash/nidm#NIDM_000013\
9>
prefix nidm_coordinateVector: <http://purl.org/nidash/nidm#NIDM_0000086>
SELECT DISTINCT * WHERE {
""" + oid_var + """ a nidm_Peak: ;
rdfs:label ?label ;
prov:atLocation ?coord_id .
?coord_id a nidm_Coordinate: ;
rdfs:label ?coord_label ;
nidm_coordinateVector: ?coord_vector_std .
OPTIONAL {?coord_id nidm_coordinateVectorInVoxels: ?coord_vector .} .
OPTIONAL {""" + oid_var + """ prov:value ?value .} .
OPTIONAL {""" + oid_var + """ nidm_pValueUncorrected: ?p_unc .} .
OPTIONAL {""" + oid_var + """ nidm_equivalentZStatistic: ?equiv_z .} .
OPTIONAL {""" + oid_var + """ nidm_pValueFWER: ?p_fwer .} .
OPTIONAL {""" + oid_var + """ nidm_qValueFDR: ?p_fdr .} .
}
"""
return query
def __str__(self):
return '%s \tz=%.2f \tp=%.2e (unc.) \t%s' % (
self.label, self.equiv_z, self.p_unc, str(self.coordinate))
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
if self.p_unc is None:
norm_cdf_z = (1.0 + erf(self.equiv_z / sqrt(2.0))) / 2.0
self.p_unc = 1 - norm_cdf_z
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(PROV['location'], self.coordinate.id))
if self.value is not None:
atts = atts + (
(PROV['value'], self.value),
)
if self.p_unc is not None:
atts = atts + (
(NIDM_P_VALUE_UNCORRECTED,
Literal(self.p_unc, datatype=XSD_FLOAT)),
)
if self.equiv_z is not None:
atts = atts + (
(NIDM_EQUIVALENT_ZSTATISTIC,
Literal(self.equiv_z, datatype=XSD_FLOAT)),
)
if self.p_fdr is not None:
atts = atts + (
(NIDM_Q_VALUE_FDR,
Literal(self.p_fdr, datatype=XSD_FLOAT)),
)
if self.p_fwer is not None:
atts = atts + (
(NIDM_P_VALUE_FWER,
Literal(self.p_fwer, datatype=XSD_FLOAT)),
)
self.add_attributes(atts)
| 34.308374 | 109 | 0.594518 |
f7204379766eb4e6ae9bd5b9297cae2841d80760 | 8,607 | py | Python | gpxo/track.py | liquidpizza/gpxo | 4f8eb43a4d6b879f51a7e688dfa80b4aa5558889 | [
"BSD-3-Clause"
] | null | null | null | gpxo/track.py | liquidpizza/gpxo | 4f8eb43a4d6b879f51a7e688dfa80b4aa5558889 | [
"BSD-3-Clause"
] | null | null | null | gpxo/track.py | liquidpizza/gpxo | 4f8eb43a4d6b879f51a7e688dfa80b4aa5558889 | [
"BSD-3-Clause"
] | null | null | null | """General tools for gpx data processing based on gpxpy."""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import gpxpy
from vincenty import vincenty
import mplleaflet
from .general import smooth, closest_pt
# =============================== Misc. Config ===============================
# short names for plots
shortnames = {'t': 'time',
's': 'duration (s)',
'd': 'distance (km)',
'v': 'velocity (km/h)',
'z': 'elevation (m)',
'c': 'compass (°)'}
# ========================= Misc. private functions ==========================
# Function to transform array of timedeltas to seoncds
_total_seconds = np.vectorize(lambda dt: dt.total_seconds())
# ============================ Main class (Track) ============================
class Track:
def __init__(self, filename, track=0, segment=0):
with open(filename, 'r') as gpx_file:
gpx = gpxpy.parse(gpx_file)
pts = gpx.tracks[track].segments[segment].points
self.latitude = np.array([pt.latitude for pt in pts])
self.longitude = np.array([pt.longitude for pt in pts])
self.elevation = np.array([pt.elevation for pt in pts])
self.time = np.array([pt.time for pt in pts])
# If some elevation or time data is missing, just set attribute to None
if any(self.time == None):
self.time = None
if any(self.elevation == None):
self.elevation = None
@staticmethod
def _distance(position1, position2):
"""Distance between two positions (latitude, longitude)."""
return vincenty(position1, position2)
def _resample(self, quantity, reference):
"""Resample quantities (velocity, compass) to fall back on reference
Reference is typically time or distance."""
# midpoints correponding to shifted quantity
midpts = reference[:-1] + (np.diff(reference) / 2)
# linear interpolation to fall back to initial times
qty_resampled = np.interp(reference, midpts, quantity)
return qty_resampled
@property
def seconds(self):
if self.time is not None:
return _total_seconds(self.time - self.time[0])
@property
def distance(self):
"""Travelled distance in kilometers."""
ds = [0]
x1s = self.latitude[:-1]
x2s = self.latitude[1:]
y1s = self.longitude[:-1]
y2s = self.longitude[1:]
for x1, x2, y1, y2 in zip(x1s, x2s, y1s, y2s):
dd = self._distance((x1, y1), (x2, y2))
ds.append(dd)
return np.cumsum(ds)
@property
def compass(self):
"""Compass bearing in decimal degrees (°). See gpxo.compass"""
lat1, long1 = np.radians((self.latitude[:-1], self.longitude[:-1]))
lat2, long2 = np.radians((self.latitude[1:], self.longitude[1:]))
d_long = long2 - long1
x = np.sin(d_long) * np.cos(lat2)
y = np.cos(lat1) * np.sin(lat2) - (np.sin(lat1) * np.cos(lat2) * np.cos(d_long))
# Resample before taking arctan because if not, interpolation fails
# when the signal fluctuates between 0 and 360° when compass is N
x_res = self._resample(x, self.distance)
y_res = self._resample(y, self.distance)
initial_bearing = np.arctan2(x_res, y_res)
# Now we have the initial bearing but np.arctan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = np.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
@property
def velocity(self):
"""Instantaneous velocity in km/h."""
if self.time is not None:
dt = np.diff(self.seconds)
dd = np.diff(self.distance)
vs = 3600 * dd / dt
return self._resample(vs, self.seconds)
else:
return None
@property
def data(self):
"""pd.DataFrame with all track data (time, position, velocity etc.)"""
names = ['latitude (°)', 'longitude (°)', 'distance (km)', 'compass (°)']
columns = [self.latitude, self.longitude, self.distance, self.compass]
if self.time is not None:
names += ['time', ' duration (s)', 'velocity (km/h)']
columns += [self.time, self.seconds, self.velocity]
if self.elevation is not None:
names.append('elevation (m)')
columns.append(self.elevation)
data = pd.DataFrame(dict(zip(names, columns)))
if self.time is not None:
data['time'] = data['time'].dt.tz_localize(None)
data.set_index('time', inplace=True)
return data
def _shortname_to_column(self, name):
"""shorname to column name in self.data."""
try:
cname = shortnames[name]
except KeyError:
raise ValueError(f'Invalid short name: {name}. ')
if cname == 'time':
column = self.data.index
else:
try:
column = self.data[cname]
except KeyError:
raise KeyError(f'{cname} Data unavailable in current track. ')
return {'name': cname, 'column': column}
def plot(self, mode, *args, **kwargs):
"""Plot columns of self.data (use pandas DataFrame plot arguments).
Parameters
----------
- mode (str): 2 letters that define short names for x and y axis
- *args: any additional argument for matplotlib ax.plot()
- **kwargs: any additional keyword argument for matplotlib ax.plot()
Output
------
- matplotlib axes
Short names
-----------
't': 'time'
's': 'duration (s)'
'd': 'distance (km)'
'v': 'velocity (km/h)'
'z': 'elevation (m)'
'c': 'compass (°)'
"""
try:
xname, yname = mode
except ValueError:
raise ValueError('Invalid plot mode (should be two letters, e.g. '
f"'tv', not {mode}")
xinfo = self._shortname_to_column(xname)
xlabel = xinfo['name']
x = xinfo['column']
yinfo = self._shortname_to_column(yname)
ylabel = yinfo['name']
y = yinfo['column']
fig, ax = plt.subplots()
ax.plot(x, y, *args, **kwargs)
if xlabel == 'time':
fig.autofmt_xdate()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def smooth(self, n=5, window='hanning'):
"""Smooth position data (and subsequently distance, velocity etc.)
Parameters
----------
- n: size of moving window for smoothing
- window: type of window (e.g. 'hanning' or 'flat', see gpxo.smooth())
"""
self.latitude = smooth(self.latitude, n=n, window=window)
self.longitude = smooth(self.longitude, n=n, window=window)
self.elevation = smooth(self.elevation, n=n, window=window)
def closest_to(self, pt):
"""Find index of point in trajectory that is closest to pt=(lat, long)."""
return closest_pt(pt, (self.latitude, self.longitude))
def map(self, map_type='osm', embed=False, ax=None, size=(10, 10),
plot='plot', **kwargs):
"""Plot trajectory on map.
Parameters
----------
- map_type can be e.g. osm, esri_aerial, esri_worldtopo, etc. see:
https://github.com/jwass/mplleaflet/blob/master/mplleaflet/maptiles.py
- embed: if True, embed plot in Jupyter. If False (default), open in
browser.
- ax: if not None, use provided matplotlib axes.
- size: when embedded, size of the figure.
- plot: 'plot' or 'scatter'
- **kwargs: any plt.plot or plt.scatter keyword arguments
"""
if ax is None:
fig, ax = plt.subplots(figsize=size)
else:
fig = ax.figure
if plot == 'plot':
ax.plot(self.longitude, self.latitude, '.-r', **kwargs)
elif plot == 'scatter':
ax.scatter(self.longitude, self.latitude, **kwargs)
else:
raise ValueError(f'Unrecognized plot type: {plot}')
parameters = {'fig': fig, 'tiles': map_type}
if embed:
leaflet = mplleaflet.display(**parameters)
else:
leaflet = mplleaflet.show(**parameters)
return leaflet
| 31.412409 | 88 | 0.562449 |
f7211163c547410a5d37c79cba8d58a47a6c46de | 7,205 | py | Python | final-exam/tic_toc_toe_messy.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | final-exam/tic_toc_toe_messy.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | final-exam/tic_toc_toe_messy.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | """
Tic Tac Toe
Reference: With modification from http://inventwithpython.com/chapter10.html.
# TODOs:
# 1. Find all TODO items and see whether you can improve the code.
# In most cases (if not all), you can make them more readable/modular.
# 2. Add/fix function's docstrings
"""
import random
# I didn't refactor the draw and is_winner, that uses the magic number 10,
# function because that would be drastically changing how the
# code works. Instead of creating a normal tic tac toe game like intended,
# it would add a new feature for creating larger boards, no longer making this
# refactoring but adding a new feature.
def draw_board(board):
"""This function prints out the board that it was passed."""
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
def input_player_letter():
"""Lets the player type which letter they want to be. Returns a list with the
player’s letter as the first item, and the computer's letter as the second."""
letter = ''
while letter not in ('X', 'O'):
print('Do you want to be X or O?')
letter = input().upper()
# the first element in the list is the player’s letter, the second is the computer's letter.
if letter == 'X':
return ['X', 'O']
return ['O', 'X']
def who_goes_first():
"""Randomly choose the player who goes first."""
if random.randint(0, 1) == 0:
return 'computer'
return 'player'
def play_again():
"""Returns True if the player wants to play again, otherwise it returns False."""
print('Do you want to play again? (yes or no)')
return input().lower().startswith('y')
def make_move(board, letter, move):
"""Makes a move on the given board with the given letter and move"""
board[move] = letter
def is_winner(board, letter):
"""Given a board and a player’s letter, this function returns True if
that player has won."""
return ((board[1] == letter and board[2] == letter and board[3] == letter) or # across the top
(board[4] == letter and board[5] == letter and board[6] == letter) or # across the middle
(board[7] == letter and board[8] == letter and board[9] == letter) or # across the bottom
(board[1] == letter and board[4] == letter and board[7] == letter) or # down the left side
(board[2] == letter and board[5] == letter and board[8] == letter) or # down the middle
(board[3] == letter and board[6] == letter and board[9] == letter) or # down the right side
(board[3] == letter and board[5] == letter and board[7] == letter) or # diagonal
(board[1] == letter and board[5] == letter and board[9] == letter)) # diagonal
def get_board_copy(board):
"""Make a duplicate of the board list and return it the duplicate."""
return list(board)
def is_space_free(board, move):
"""Return true if the passed move is free on the passed board."""
return board[move] == ' '
def get_player_move(board):
"""Let the player type in their move."""
player_move = ' '
options = set(str(i) for i in range(1, len(board)))
while (player_move not in options or
not is_space_free(board, int(player_move))):
print('What is your next move? (1-9)')
player_move = input()
return int(player_move)
def choose_random_move_from_list(board, moves_list):
"""Returns a valid move from the passed list on the passed board or None
if there is no valid move."""
possible_moves = []
for i in moves_list:
if is_space_free(board, i):
possible_moves.append(i)
if possible_moves:
return random.choice(possible_moves)
def is_next_move_win(board, letter):
"""Returns true is if the given letter can make a winning move, false if not"""
for i in range(1, 10):
copy = get_board_copy(board)
if is_space_free(copy, i):
make_move(copy, letter, i)
if is_winner(copy, letter):
return i
def get_computer_move(board, temp_computer_letter):
"""Given a board and the computer's letter, determine where to move and return that move."""
if temp_computer_letter == 'X':
temp_player_letter = 'O'
else:
temp_player_letter = 'X'
# Here is our algorithm for our Tic Tac Toe AI:
# First, check if we can win in the next move
is_ai_winner = is_next_move_win(board, temp_computer_letter)
if is_ai_winner:
return is_ai_winner
# Check if the player could win on their next move, and block them.
is_player_winner = is_next_move_win(board, temp_player_letter)
if is_player_winner:
return is_player_winner
# Try to take one of the corners, if they are free.
move = choose_random_move_from_list(board, [1, 3, 7, 9])
if move is not None:
return move
# Try to take the center, if it is free.
if is_space_free(board, 5):
return 5
# Move on one of the sides.
return choose_random_move_from_list(board, [2, 4, 6, 8])
def is_board_full(board):
"""Return True if every space on the board has been taken.
Otherwise return False."""
for i in range(1, len(board)):
if is_space_free(board, i):
return False
return True
def start_new_round(board, temp_player_letter, temp_computer_letter, temp_turn):
"""Starts a round and plays it through untill the player and computer takes their turn"""
while True:
if temp_turn == 'player':
# Player’s turn.
draw_board(board)
move = get_player_move(board)
make_move(board, temp_player_letter, move)
if is_winner(board, temp_player_letter):
draw_board(board)
print('Hooray! You have won the game!')
break
temp_turn = 'computer'
else:
# Computer’s turn.
move = get_computer_move(board, temp_computer_letter)
make_move(board, temp_computer_letter, move)
if is_winner(board, temp_computer_letter):
draw_board(board)
print('The computer has beaten you! You lose.')
break
temp_turn = 'player'
if is_board_full(board):
draw_board(board)
print('The game is a tie!')
break
def start_session(board_size=10):
"""Starts a session for playing mutliple games with the bot"""
print('Welcome to Tic Tac Toe!')
while True:
# Reset the board
the_board = [' '] * board_size
player_letter, computer_letter = input_player_letter()
turn = who_goes_first()
print('The ' + turn + ' will go first.')
start_new_round(the_board, player_letter, computer_letter, turn)
if not play_again():
break
if __name__ == '__main__':
start_session()
| 36.025 | 98 | 0.624427 |
f7212251e63dcb5ce319603d8ff0812abad4359b | 1,095 | py | Python | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | 1 | 2016-06-23T05:56:53.000Z | 2016-06-23T05:56:53.000Z | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | null | null | null | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | null | null | null | from synapse.syncmd import exec_cmd
from synapse.synapse_exceptions import ResourceException
from synapse.logger import logger
log = logger('yum-pkg')
def install(name):
ret = exec_cmd("/usr/bin/yum -q -y install {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def get_installed_packages():
ret = exec_cmd("/bin/rpm -qa")
return ret['stdout'].split('\n')
def remove(name):
ret = exec_cmd("/usr/bin/yum -q -y remove {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def update(name):
# We need to check first if the package is installed. yum update of a
# non-existing package has a returncode of 0. We need to raise an exception
# if the package is not installed !
inst = is_installed(name)
ret = exec_cmd("/usr/bin/yum -q -y update {0}".format(name))
if ret['returncode'] != 0 or not inst:
raise ResourceException(ret['stderr'])
def is_installed(name):
ret = exec_cmd("/bin/rpm -q {0}".format(name))
return ret['returncode'] == 0
| 28.076923 | 79 | 0.663014 |
f721236e30c2bc62859814934c24d2d0a6124a36 | 1,534 | py | Python | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import unittest
import yaml
from maro.data_lib.ecr.vessel_parser import VesselsParser
from maro.data_lib.ecr.entities import VesselSetting
conf_str = """
vessels:
rt1_vessel_001:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: supply_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
rt1_vessel_002:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: demand_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
"""
class TestVesselParser(unittest.TestCase):
def test_vessel_parse(self):
conf = yaml.safe_load(conf_str)
parser = VesselsParser()
vessel_mapping, vessels = parser.parse(conf["vessels"])
self.assertEqual(2, len(vessel_mapping))
self.assertEqual(2, len(vessels))
self.assertEqual("rt1_vessel_001", vessels[0].name)
self.assertEqual("rt1_vessel_002", vessels[1].name)
# check capacity
self.assertListEqual([92400, 92400], [v.capacity for v in vessels])
self.assertListEqual([1, 1], [v.parking_duration for v in vessels])
self.assertListEqual([0, 0], [v.parking_noise for v in vessels])
self.assertListEqual([10, 10], [v.sailing_speed for v in vessels])
self.assertListEqual([0, 0], [v.sailing_noise for v in vessels])
if __name__=="__main__":
unittest.main() | 25.566667 | 75 | 0.666232 |
f72133aff214d90410fb19b8ccb50eafa1390f3b | 12,732 | py | Python | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | 1 | 2021-06-11T19:54:19.000Z | 2021-06-11T19:54:19.000Z | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | 1 | 2019-08-30T14:45:33.000Z | 2019-08-30T14:45:33.000Z | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for customremotes archives providing dl+archive URLs handling"""
from unittest.mock import patch
import os
import os.path as op
import sys
import re
import logging
import glob
from time import sleep
from ..archives import (
ArchiveAnnexCustomRemote,
link_file_load,
)
from ..base import AnnexExchangeProtocol
from ...support.annexrepo import AnnexRepo
from ...consts import ARCHIVES_SPECIAL_REMOTE
from .test_base import (
BASE_INTERACTION_SCENARIOS,
check_interaction_scenario,
)
from ...tests.utils import (
abspath,
assert_equal,
assert_false,
assert_is_instance,
assert_not_in,
assert_true,
chpwd,
eq_,
get_most_obscure_supported_name,
in_,
known_failure_githubci_win,
ok_,
ok_file_has_content,
serve_path_via_http,
swallow_logs,
swallow_outputs,
with_tempfile,
with_tree,
)
from ...cmd import Runner, GitRunner
from ...utils import (
_path_,
on_linux,
on_osx,
unlink,
)
from . import _get_custom_runner
from ...tests.test_archives import (
fn_archive_obscure,
fn_archive_obscure_ext,
fn_in_archive_obscure,
)
#import line_profiler
#prof = line_profiler.LineProfiler()
# TODO: with_tree ATM for archives creates this nested top directory
# matching archive name, so it will be a/d/test.dat ... we don't want that probably
@known_failure_githubci_win
@with_tree(
tree=(('a.tar.gz', {'d': {fn_in_archive_obscure: '123'}}),
('simple.txt', '123'),
(fn_archive_obscure_ext, (('d', ((fn_in_archive_obscure, '123'),)),)),
(fn_archive_obscure, '123')))
@with_tempfile()
def test_basic_scenario(d, d2):
fn_archive, fn_extracted = fn_archive_obscure_ext, fn_archive_obscure
annex = AnnexRepo(d, runner=_get_custom_runner(d))
annex.init_remote(
ARCHIVES_SPECIAL_REMOTE,
['encryption=none', 'type=external', 'externaltype=%s' % ARCHIVES_SPECIAL_REMOTE,
'autoenable=true'
])
assert annex.is_special_annex_remote(ARCHIVES_SPECIAL_REMOTE)
# We want two maximally obscure names, which are also different
assert(fn_extracted != fn_in_archive_obscure)
annex.add(fn_archive)
annex.commit(msg="Added tarball")
annex.add(fn_extracted)
annex.commit(msg="Added the load file")
# Operations with archive remote URL
annexcr = ArchiveAnnexCustomRemote(path=d)
# few quick tests for get_file_url
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat"), "dl+archive:xyz#path=a.dat")
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat", size=999), "dl+archive:xyz#path=a.dat&size=999")
# see https://github.com/datalad/datalad/issues/441#issuecomment-223376906
# old style
eq_(annexcr._parse_url("dl+archive:xyz/a.dat#size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz/a.dat"), ("xyz", "a.dat", {})) # old format without size
# new style
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat&size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat"), ("xyz", "a.dat", {})) # old format without size
file_url = annexcr.get_file_url(
archive_file=fn_archive,
file=fn_archive.replace('.tar.gz', '') + '/d/' + fn_in_archive_obscure)
annex.add_url_to_file(fn_extracted, file_url, ['--relaxed'])
annex.drop(fn_extracted)
list_of_remotes = annex.whereis(fn_extracted, output='descriptions')
in_('[%s]' % ARCHIVES_SPECIAL_REMOTE, list_of_remotes)
assert_false(annex.file_has_content(fn_extracted))
annex.get(fn_extracted)
assert_true(annex.file_has_content(fn_extracted))
annex.rm_url(fn_extracted, file_url)
assert_false(annex.drop(fn_extracted)['success'])
annex.add_url_to_file(fn_extracted, file_url)
annex.drop(fn_extracted)
annex.get(fn_extracted)
annex.drop(fn_extracted) # so we don't get from this one next
# Let's create a clone and verify chain of getting file through the tarball
cloned_annex = AnnexRepo.clone(d, d2, runner=_get_custom_runner(d2))
# we still need to enable manually atm that special remote for archives
# cloned_annex.enable_remote('annexed-archives')
assert_false(cloned_annex.file_has_content(fn_archive))
assert_false(cloned_annex.file_has_content(fn_extracted))
cloned_annex.get(fn_extracted)
assert_true(cloned_annex.file_has_content(fn_extracted))
# as a result it would also fetch tarball
assert_true(cloned_annex.file_has_content(fn_archive))
# Check if protocol was collected
if os.environ.get('DATALAD_TESTS_PROTOCOLREMOTE'):
assert_is_instance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol)
protocol_file = _path_(annex.path,
'.git/bin/git-annex-remote-datalad-archive')
ok_file_has_content(protocol_file, "VERSION 1", re_=True, match=False)
ok_file_has_content(protocol_file, "GETAVAILABILITY", re_=True, match=False)
ok_file_has_content(protocol_file, "#!/bin/bash", re_=True, match=False)
else:
assert_false(isinstance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol))
# verify that we can drop if original archive gets dropped but available online:
# -- done as part of the test_add_archive_content.py
# verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)
@known_failure_githubci_win
@with_tree(
tree={'a.tar.gz': {'d': {fn_in_archive_obscure: '123'}}}
)
def test_annex_get_from_subdir(topdir):
from datalad.api import add_archive_content
annex = AnnexRepo(topdir, init=True)
annex.add('a.tar.gz')
annex.commit()
add_archive_content('a.tar.gz', annex=annex, delete=True)
fpath = op.join(topdir, 'a', 'd', fn_in_archive_obscure)
with chpwd(op.join(topdir, 'a', 'd')):
runner = Runner()
runner(['git', 'annex', 'drop', '--', fn_in_archive_obscure]) # run git annex drop
assert_false(annex.file_has_content(fpath)) # and verify if file deleted from directory
runner(['git', 'annex', 'get', '--', fn_in_archive_obscure]) # run git annex get
assert_true(annex.file_has_content(fpath)) # and verify if file got into directory
@known_failure_githubci_win
def test_get_git_environ_adjusted():
gitrunner = GitRunner()
env = {"GIT_DIR": "../../.git", "GIT_WORK_TREE": "../../", "TEST_VAR": "Exists"}
# test conversion of relevant env vars from relative_path to correct absolute_path
adj_env = gitrunner.get_git_environ_adjusted(env)
assert_equal(adj_env["GIT_DIR"], abspath(env["GIT_DIR"]))
assert_equal(adj_env["GIT_WORK_TREE"], abspath(env["GIT_WORK_TREE"]))
# test if other environment variables passed to function returned unaltered
assert_equal(adj_env["TEST_VAR"], env["TEST_VAR"])
# test import of sys_env if no environment passed to function
sys_env = gitrunner.get_git_environ_adjusted()
assert_equal(sys_env["PWD"], os.environ.get("PWD"))
def test_no_rdflib_loaded():
# rely on rdflib polluting stdout to see that it is not loaded whenever we load this remote
# since that adds 300ms delay for no immediate use
from ...cmd import Runner
runner = Runner()
with swallow_outputs() as cmo:
runner.run(
[sys.executable,
'-c',
'import datalad.customremotes.archives, sys; '
'print([k for k in sys.modules if k.startswith("rdflib")])'],
log_stdout=False,
log_stderr=False)
# print cmo.out
assert_not_in("rdflib", cmo.out)
assert_not_in("rdflib", cmo.err)
@with_tree(tree={'archive.tar.gz': {'f1.txt': 'content'}})
def test_interactions(tdir):
# Just a placeholder since constructor expects a repo
repo = AnnexRepo(tdir, create=True, init=True)
repo.add('archive.tar.gz')
repo.commit('added')
for scenario in BASE_INTERACTION_SCENARIOS + [
[
('GETCOST', 'COST %d' % ArchiveAnnexCustomRemote.COST),
],
[
# by default we do not require any fancy init
# no urls supported by default
('CLAIMURL http://example.com', 'CLAIMURL-FAILURE'),
# we know that is just a single option, url, is expected so full
# one would be passed
('CLAIMURL http://example.com roguearg', 'CLAIMURL-FAILURE'),
],
# basic interaction failing to fetch content from archive
[
('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
('VALUE dl+archive://somekey2#path', None),
('VALUE dl+archive://somekey3#path', None),
('VALUE',
re.compile(
'TRANSFER-FAILURE RETRIEVE somekey Failed to fetch any '
'archive containing somekey. Tried: \[\]')
)
],
# # incorrect response received from annex -- something isn't right but ... later
# [
# ('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
# # We reply with UNSUPPORTED-REQUEST in these cases
# ('GETCOST', 'UNSUPPORTED-REQUEST'),
# ],
]:
check_interaction_scenario(ArchiveAnnexCustomRemote, tdir, scenario)
@with_tree(tree=
{'1.tar.gz':
{
'bu.dat': '52055957098986598349795121365535' * 10000,
'bu3.dat': '8236397048205454767887168342849275422' * 10000
},
'2.tar.gz':
{
'bu2.dat': '17470674346319559612580175475351973007892815102' * 10000
},
}
)
@serve_path_via_http()
@with_tempfile
def check_observe_tqdm(topdir, topurl, outdir):
# just a helper to enable/use when want quickly to get some
# repository with archives and observe tqdm
from datalad.api import add_archive_content
from datalad.api import create
ds = create(outdir)
for f in '1.tar.gz', '2.tar.gz':
with chpwd(outdir):
ds.repo.add_url_to_file(f, topurl + f)
ds.save(f)
add_archive_content(f, delete=True, drop_after=True)
files = glob.glob(op.join(outdir, '*'))
ds.drop(files) # will not drop tarballs
ds.repo.drop([], options=['--all', '--fast'])
ds.get(files)
ds.repo.drop([], options=['--all', '--fast'])
# now loop so we could play with it outside
print(outdir)
# import pdb; pdb.set_trace()
while True:
sleep(0.1)
@known_failure_githubci_win
@with_tempfile
def test_link_file_load(tempfile):
tempfile2 = tempfile + '_'
with open(tempfile, 'w') as f:
f.write("LOAD")
link_file_load(tempfile, tempfile2) # this should work in general
ok_(os.path.exists(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
def inode(fname):
with open(fname) as fd:
return os.fstat(fd.fileno()).st_ino
def stats(fname, times=True):
"""Return stats on the file which should have been preserved"""
with open(fname) as fd:
st = os.fstat(fd.fileno())
stats = (st.st_mode, st.st_uid, st.st_gid, st.st_size)
if times:
return stats + (st.st_atime, st.st_mtime)
else:
return stats
# despite copystat mtime is not copied. TODO
# st.st_mtime)
if on_linux or on_osx:
# above call should result in the hardlink
assert_equal(inode(tempfile), inode(tempfile2))
assert_equal(stats(tempfile), stats(tempfile2))
# and if we mock absence of .link
def raise_AttributeError(*args):
raise AttributeError("TEST")
with patch('os.link', raise_AttributeError):
with swallow_logs(logging.WARNING) as cm:
link_file_load(tempfile, tempfile2) # should still work
ok_("failed (TEST), copying file" in cm.out)
# should be a copy (either originally for windows, or after mocked call)
ok_(inode(tempfile) != inode(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
assert_equal(stats(tempfile, times=False), stats(tempfile2, times=False))
unlink(tempfile2) # TODO: next two with_tempfile
| 37.011628 | 116 | 0.654807 |
f72142e5ac00cf950ce98fbca8180f0dd514c5e9 | 1,671 | py | Python | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | from socket import socket, AF_INET, SOCK_DGRAM, inet_aton, inet_ntoa
import time
sockets = {}
network = ('127.0.0.1', 12345)
def bytes_to_addr(bytes):
return inet_ntoa(bytes[:4]), int.from_bytes(bytes[4:8], 'big')
def addr_to_bytes(addr):
return inet_aton(addr[0]) + addr[1].to_bytes(4, 'big')
def get_sendto(id, rate=None):
if rate:
def sendto(data: bytes, addr):
time.sleep(len(data) / rate)
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
else:
def sendto(data: bytes, addr):
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
class UnreliableSocket:
def __init__(self, rate=None):
assert rate is None or rate > 0, 'Rate should be positive or None.'
sockets[id(self)] = socket(AF_INET, SOCK_DGRAM)
self.sendto = get_sendto(id(self), rate)
def bind(self, address: (str, int)):
sockets[id(self)].bind(address)
def recvfrom(self, bufsize) -> bytes:
data, frm = sockets[id(self)].recvfrom(bufsize)
addr = bytes_to_addr(data[:8])
if frm == network:
return data[8:], addr
else:
return self.recvfrom(bufsize)
def settimeout(self, value):
sockets[id(self)].settimeout(value)
def gettimeout(self):
return sockets[id(self)].gettimeout()
def setblocking(self, flag):
sockets[id(self)].setblocking(flag)
def getblocking(self):
sockets[id(self)].getblocking()
def getsockname(self):
return sockets[id(self)].getsockname()
def close(self):
sockets[id(self)].close()
| 26.109375 | 75 | 0.618791 |
f7217194f4c19697a8e59fe9babfa90a23edf214 | 2,031 | py | Python | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | null | null | null | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | 4 | 2022-02-04T15:18:31.000Z | 2022-02-07T15:07:43.000Z | tests/test_db_utils.py | larssl780/thin_wrappers | c0791d76a734303708892a25cce2e237caf9920a | [
"MIT"
] | null | null | null | import pytest
import pathlib
import sys
import requests
import io
import zipfile
import tempfile
import pandas as pd
import os
HERE = pathlib.Path(__file__).resolve().parent
# insert at 1, 0 is the script path (or '' in REPL)
# temporary hack until package is published and we can inherit from there:
sys.path.insert(1, '%s/thin_wrappers' % HERE.parent)
import db_utils as db # NOQA: E402
def headers():
return {'Accept': 'application/json, text/plain, */*',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'DNT': '1',
'Pragma': 'no-cache',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
}
def download_data():
url = 'https://eforexcel.com/wp/wp-content/uploads/2017/07/100-CC-Records.zip'
res = requests.get(url, headers=headers())
filebytes = io.BytesIO(res.content)
tmp = zipfile.ZipFile(filebytes)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.csv')
with open(temp.name, 'wb') as fp:
fp.write(tmp.read('100 CC Records.csv'))
datum = pd.read_csv(temp.name, encoding='cp1252')
return datum
def test_database():
"""Test that it works writig data to an sqlite db and then read it.
"""
df = download_data()
db.write_db_table('dummy', df, 'replace', 'test_db.sqlite')
assert os.path.exists('test_db.sqlite'), "Did not find database?!"
n_records = len(df)
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(
from_db) == n_records, "Number of records does not match between database and data!"
db.write_db_table('dummy', df, 'append', 'test_db.sqlite')
from_db = db.read_sql_table('dummy', 'test_db.sqlite')
assert len(from_db) == (
2 * n_records), "Number of records does not match between database and data!"
if __name__ == '__main__':
pytest.main([__file__])
| 30.313433 | 148 | 0.65485 |
f7218963b535569939ecb7f8ec24da1fd34de53b | 8,127 | py | Python | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 3 | 2019-08-28T17:10:29.000Z | 2020-11-22T14:06:45.000Z | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 1 | 2019-11-03T12:02:43.000Z | 2019-11-20T02:04:36.000Z | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 2 | 2019-08-29T02:32:56.000Z | 2019-12-22T17:44:26.000Z | """
The class wrapper for the networks
"""
# Built-in
import os
import time
# Torch
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
# Libs
import numpy as np
# Own module
class Network(object):
def __init__(self, model_fn, flags, train_loader, test_loader,
ckpt_dir=os.path.join(os.path.abspath(''), 'models'),
inference_mode=False, saved_model=None):
self.model_fn = model_fn # The model maker function
self.flags = flags # The Flags containing the specs
if inference_mode: # If inference mode, use saved model
self.ckpt_dir = os.path.join(ckpt_dir, saved_model)
self.saved_model = saved_model
else: # training mode, create a new ckpt folder
self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
self.model = self.create_model() # The model itself
self.loss = self.make_loss() # The loss function
self.optm = self.make_optimizer() # The optimizer
self.train_loader = train_loader # The train data loader
self.test_loader = test_loader # The test data loader
self.log = SummaryWriter(self.ckpt_dir) # Create a summary writer for keeping the summary to the tensor board
self.best_validation_loss = float('inf') # Set the BVL to large number
def create_model(self):
"""
Function to create the network module from provided model fn and flags
:return: the created nn module
"""
model = self.model_fn(self.flags)
#summary(model, input_size=(128, 8))
print(model)
return model
def make_loss(self, logit=None, labels=None):
"""
Create a tensor that represents the loss. This is consistant both at training time \
and inference time for Backward model
:param logit: The output of the network
:return: the total loss
"""
if logit is None:
return None
MSE_loss = nn.functional.mse_loss(logit, labels) # The MSE Loss of the
BDY_loss = 0 # Implemenation later
return MSE_loss + BDY_loss
def make_optimizer(self):
"""
Make the corresponding optimizer from the flags. Only below optimizers are allowed. Welcome to add more
:return:
"""
if self.flags.optim == 'Adam':
op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'RMSprop':
op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'SGD':
op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
else:
raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben")
return op
def save(self):
"""
Saving the model to the current check point folder with name best_model.pt
:return: None
"""
#torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model.pt'))
def load(self):
"""
Loading the model from the check point folder with name best_model.pt
:return:
"""
#self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))
self.model.load(torch.load(os.path.join(self.ckpt_dir, 'best_model.pt')))
def train(self):
"""
The major training function. This would start the training using information given in the flags
:return: None
"""
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
for epoch in range(self.flags.train_step):
# Set to Training Mode
train_loss = 0
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda() # Put data onto GPU
spectra = spectra.cuda() # Put data onto GPU
self.optm.zero_grad() # Zero the gradient first
logit = self.model(geometry) # Get the output
loss = self.make_loss(logit, spectra) # Get the loss tensor
loss.backward() # Calculate the backward gradients
self.optm.step() # Move one step the optimizer
train_loss += loss # Aggregate the loss
if epoch % self.flags.eval_step: # For eval steps, do the evaluations and tensor board
# Record the training loss to the tensorboard
train_avg_loss = train_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
# Set to Evaluation Mode
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader): # Loop through the eval set
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra) # compute the loss
test_loss += loss # Aggregate the loss
# Record the testing loss to the tensorboard
test_avg_loss = test_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
% (epoch, train_avg_loss, test_avg_loss ))
# Model improving, save the model down
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None
def evaluate(self, save_dir='data/'):
self.load()
self.model.eval() # Evaluation mode
# Get the file names
Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(self.saved_model))
Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(self.saved_model))
Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(self.saved_model))
#Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(self.saved_model)) # For pure forward model, there is no Xpred
# Open those files to append
with open(Xtruth_file,'a') as fxt,open(Ytruth_file, 'a') as fyt, open(Ypred_file,'a') as fyp:
# Loop through the eval data and evaluate
for ind, (geometry, spectra) in enumerate(self.test_loader):
logits = self.model(geometry)
np.savetxt(fxt, geometry.numpy(), fmt='%.3f')
np.savetxt(fyt, spectra.numpy(), fmt='%.3f')
np.savetxt(fyp, logits.numpy(), fmt='%.3f')
| 47.526316 | 135 | 0.556909 |
f7218c5841c78da8df7b09b9049a325f9cfeaba6 | 8,968 | py | Python | custom_admin/views.py | samuira/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 1 | 2019-11-09T17:18:10.000Z | 2019-11-09T17:18:10.000Z | custom_admin/views.py | abhisek11/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 19 | 2019-12-05T00:13:31.000Z | 2022-03-11T23:58:13.000Z | custom_admin/views.py | abhisek11/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 1 | 2020-02-29T07:35:25.000Z | 2020-02-29T07:35:25.000Z | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy, reverse
from django.utils.text import slugify
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView
from blog.models import BlogPost
from custom_admin.models import User
from custom_admin.utils import Util
from .forms import LoginForm, RegisterForm, BlogPostCreateForm, BlogPostEditForm, UserEditForm
from django.shortcuts import redirect
from datetime import datetime
class Dashboard(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/dashboard.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
return render(request, self.template_name)
class Login(View):
template_name = 'custom_admin/account/login.html'
form_class = LoginForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST)
self.context['form'] = form
if form.is_valid():
user = authenticate(request=request, email=request.POST['email'], password=request.POST['password'])
if user:
login(request, user)
return redirect('dashboard')
else:
messages.error(request, 'Incorrect Email or Password')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Register(View):
template_name = 'custom_admin/account/register.html'
form_class = RegisterForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request=request)
self.context['form'] = form
if form.is_valid():
try:
user = User.objects.create_user(email=request.POST['email'], password=request.POST['password'])
except ValidationError as e:
[messages.error(request, error[0]) for error in e.message_dict.values()]
else:
return redirect('login')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Logout(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('login'))
class BlogList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
queryset = BlogPost.objects.all()
paginate_by = 10
context_object_name = 'blog_post'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class BlogCreate(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/create.html'
login_url = reverse_lazy('login')
form_class = BlogPostCreateForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
self.context.clear()
self.context['ckeditor'] = True
print(self.context)
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request.FILES)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
BlogPost.objects.create(
created_by=request.user,
title_image=form.cleaned_data.get('title_image', ''),
title=form.cleaned_data.get('title'),
description=form.cleaned_data.get('bp_description'),
slug=slugify(form.cleaned_data.get('title'))
)
messages.success(self.request, 'Blog has been created successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/edit.html'
login_url = reverse_lazy('login')
form_class = BlogPostEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['ckeditor'] = True
self.context['blog'] = BlogPost.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES, pk=self.context['blog'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
blog = self.context['blog']
blog.title_image = form.cleaned_data.get('title_image', '') or blog.title_image
blog.title = form.cleaned_data.get('title')
blog.is_verified = form.cleaned_data.get('is_verified')
blog.published_on = datetime.now() if form.cleaned_data.get('is_verified') and not blog.published_on else blog.published_on
blog.description = form.cleaned_data.get('bp_description')
blog.slug = slugify(form.cleaned_data.get('title'))
blog.save()
messages.success(self.request, 'Blog has been updated successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogDelete(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
BlogPost.objects.get(pk=kwargs['pk']).delete()
messages.success(self.request, 'Blog has been deleted successfully.')
return HttpResponseRedirect(reverse('blog-list'))
class UserList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/user/list.html'
login_url = reverse_lazy('login')
queryset = User.objects.all()
paginate_by = 10
context_object_name = 'user_list'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class UserEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/user/edit.html'
login_url = reverse_lazy('login')
form_class = UserEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
form = self.form_class(request.POST, request.FILES, pk=self.context['user'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
user = self.context['user']
user.avatar = form.cleaned_data.get('avatar') or user.avatar
user.first_name = form.cleaned_data.get('first_name', '')
user.last_name = form.cleaned_data.get('last_name', '')
user.phone = form.cleaned_data.get('phone', '')
user.is_superuser = form.cleaned_data.get('is_superuser', False)
user.is_staff = form.cleaned_data.get('is_staff', False)
user.is_active = form.cleaned_data.get('is_active', False)
user.save()
messages.success(self.request, 'User has been updated successfully.')
return HttpResponseRedirect(reverse('user-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
print('Error:', error)
return render(request, self.template_name, self.context)
| 33.092251 | 126 | 0.748104 |
f721c578750ba0a7105c0bada589a4631a8b372e | 1,845 | py | Python | pili/email.py | pilosus/pili | 8eb51e79420b7a2e4148f3b819e787cf6711e8cd | [
"MIT"
] | 2 | 2019-12-22T13:05:08.000Z | 2020-02-02T13:05:31.000Z | pili/email.py | pilosus/pili | 8eb51e79420b7a2e4148f3b819e787cf6711e8cd | [
"MIT"
] | 71 | 2016-10-31T15:41:10.000Z | 2022-03-21T14:26:22.000Z | pili/email.py | pilosus/pili | 8eb51e79420b7a2e4148f3b819e787cf6711e8cd | [
"MIT"
] | null | null | null | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from pili.app import celery, mail
def send_email(to, subject, template, **kwargs):
"""Send email using either Celery, or Thread.
Selection depends on CELERY_INSTEAD_THREADING config variable.
"""
app = current_app._get_current_object()
if app.config['CELERY_INSTEAD_THREADING']:
send_email_celery(to, subject, template, countdown=None, **kwargs)
else:
send_email_thread(to, subject, template, **kwargs)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email_thread(to, subject, template, **kwargs):
"""Send async email using threading.
"""
app = current_app._get_current_object()
msg = Message(
app.config['PILI_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['PILI_MAIL_SENDER'],
recipients=[to],
)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
@celery.task(serializer='pickle')
def send_celery_async_email(msg):
mail.send(msg)
# NOTE rename to send_email in production if Thread support is not needed
def send_email_celery(to, subject, template, countdown=None, **kwargs):
"""Send async email using Celery.
"""
app = current_app._get_current_object()
msg = Message(
app.config['PILI_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['PILI_MAIL_SENDER'],
recipients=[to],
)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
send_celery_async_email.apply_async(args=[msg], countdown=countdown)
| 30.75 | 74 | 0.688347 |
f722029155968a27b82e4ff0a96fce5c6afb335d | 1,571 | py | Python | siliconcompiler/tools/openfpgaloader/openfpgaloader.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | 1 | 2022-03-10T03:56:49.000Z | 2022-03-10T03:56:49.000Z | siliconcompiler/tools/openfpgaloader/openfpgaloader.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | null | null | null | siliconcompiler/tools/openfpgaloader/openfpgaloader.py | hohe/siliconcompiler | 497f272c87c8f247dcd29db76c8d6ed0c0939e50 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import re
import sys
import shutil
import siliconcompiler
####################################################################
# Make Docs
####################################################################
def make_docs():
'''
The OpenFPGALoader is a universal utility for programming
FPGAs. Compatible with many boards, cables and FPGA from
major manufacturers (Xilinx, Altera/Intel, Lattice, Gowin,
Efinix, Anlogic). openFPGALoader works on Linux, Windows and
macOS.
Documentation: https://github.com/trabucayre/openFPGALoader
Sources: https://github.com/trabucayre/openFPGALoader
Installation: https://github.com/trabucayre/openFPGALoader
Status: SC integration WIP
'''
chip = siliconcompiler.Chip()
chip.set('arg','step','program')
chip.set('arg','index','0')
chip.set('design', '<design>')
setup(chip)
return chip
################################
# Setup Tool (pre executable)
################################
def setup(chip):
''' openFPGALoader setup function
'''
# If the 'lock' bit is set, don't reconfigure.
tool = 'openfpgaloader'
step = chip.get('arg','step')
index = chip.get('arg','index')
# tool setup
chip.set('eda', tool, 'exe', tool, clobber=False)
chip.set('eda', tool, 'vswitch', '--Version', clobber=False)
chip.set('eda', tool, 'version', 'v0.5.0', clobber=False)
options = []
options.append("inputs" + chip.get('design') + ".bit")
chip.add('eda', tool, 'option', step, index, options)
| 27.086207 | 68 | 0.574793 |
f7220c584b0c5c0a472d032b76d41c1b9f1c37f5 | 63,818 | py | Python | gunicorn/config.py | Alexa3001/gunicorn | c0d05dad3d759f8cbbc465ba4698e1e94ed67cd7 | [
"MIT"
] | null | null | null | gunicorn/config.py | Alexa3001/gunicorn | c0d05dad3d759f8cbbc465ba4698e1e94ed67cd7 | [
"MIT"
] | null | null | null | gunicorn/config.py | Alexa3001/gunicorn | c0d05dad3d759f8cbbc465ba4698e1e94ed67cd7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
# Please remember to run "make -C docs html" after update "desc" attributes.
import argparse
import copy
import grp
import inspect
import os
import pwd
import re
import shlex
import ssl
import sys
import textwrap
from gunicorn import __version__, util
from gunicorn.errors import ConfigError
from gunicorn.reloader import reloader_engines
KNOWN_SETTINGS = []
PLATFORM = sys.platform
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
def auto_int(_, x):
# for compatible with octal numbers in python3
if re.match(r'0(\d)', x, re.IGNORECASE):
x = x.replace('0', '0o', 1)
return int(x, 0)
class Config(object):
def __init__(self, usage=None, prog=None):
self.settings = make_settings()
self.usage = usage
self.prog = prog or os.path.basename(sys.argv[0])
self.env_orig = os.environ.copy()
def __str__(self):
lines = []
kmax = max(len(k) for k in self.settings)
for k in sorted(self.settings):
v = self.settings[k].value
if callable(v):
v = "<{}()>".format(v.__qualname__)
lines.append("{k:{kmax}} = {v}".format(k=k, v=v, kmax=kmax))
return "\n".join(lines)
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super().__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def get_cmd_args_from_env(self):
if 'GUNICORN_CMD_ARGS' in self.env_orig:
return shlex.split(self.env_orig['GUNICORN_CMD_ARGS'])
return []
def parser(self):
kwargs = {
"usage": self.usage,
"prog": self.prog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument("-v", "--version",
action="version", default=argparse.SUPPRESS,
version="%(prog)s (version " + __version__ + ")\n",
help="show program's version number and exit")
parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
keys = sorted(self.settings, key=self.settings.__getitem__)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class_str(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
return "gthread"
return uri
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
# are we using a threaded worker?
is_sync = uri.endswith('SyncWorker') or uri == 'sync'
if is_sync and self.threads > 1:
uri = "gunicorn.workers.gthread.ThreadWorker"
worker_class = util.load_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def address(self):
s = self.settings['bind'].get()
return [util.parse_address(util.bytes_to_str(bind)) for bind in s]
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
@property
def logger_class(self):
uri = self.settings['logger_class'].get()
if uri == "simple":
# support the default
uri = LoggerClass.default
# if default logger is in use, and statsd is on, automagically switch
# to the statsd logger
if uri == LoggerClass.default:
if 'statsd_host' in self.settings and self.settings['statsd_host'].value is not None:
uri = "gunicorn.instrument.statsd.Statsd"
logger_class = util.load_class(
uri,
default="gunicorn.glogging.Logger",
section="gunicorn.loggers")
if hasattr(logger_class, "install"):
logger_class.install()
return logger_class
@property
def is_ssl(self):
return self.certfile or self.keyfile
@property
def ssl_options(self):
opts = {}
for name, value in self.settings.items():
if value.section == 'SSL':
opts[name] = value.get()
return opts
@property
def env(self):
raw_env = self.settings['raw_env'].get()
env = {}
if not raw_env:
return env
for e in raw_env:
s = util.bytes_to_str(e)
try:
k, v = s.split('=', 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
env[k] = v
return env
@property
def sendfile(self):
if self.settings['sendfile'].get() is not None:
return False
if 'SENDFILE' in os.environ:
sendfile = os.environ['SENDFILE'].lower()
return sendfile in ['y', '1', 'yes', 'true']
return True
@property
def reuse_port(self):
return self.settings['reuse_port'].get()
@property
def paste_global_conf(self):
raw_global_conf = self.settings['raw_paste_global_conf'].get()
if raw_global_conf is None:
return None
global_conf = {}
for e in raw_global_conf:
s = util.bytes_to_str(e)
try:
k, v = re.split(r'(?<!\\)=', s, 1)
except ValueError:
raise RuntimeError("environment setting %r invalid" % s)
k = k.replace('\\=', '=')
v = v.replace('\\=', '=')
global_conf[k] = v
return global_conf
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = staticmethod(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
nargs = None
const = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
help_txt = "%s [%s]" % (self.short, self.default)
help_txt = help_txt.replace("%", "%%")
kwargs = {
"dest": self.name,
"action": self.action or "store",
"type": self.type or str,
"default": None,
"help": help_txt
}
if self.meta is not None:
kwargs['metavar'] = self.meta
if kwargs["action"] != "store":
kwargs.pop("type")
if self.nargs is not None:
kwargs["nargs"] = self.nargs
if self.const is not None:
kwargs["const"] = self.const
parser.add_argument(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
if not callable(self.validator):
raise TypeError('Invalid validator: %s' % self.name)
self.value = self.validator(val)
def __lt__(self, other):
return (self.section == other.section and
self.order < other.order)
__cmp__ = __lt__
def __repr__(self):
return "<%s.%s object at %x with value %r>" % (
self.__class__.__module__,
self.__class__.__name__,
id(self),
self.value,
)
Setting = SettingMeta('Setting', (Setting,), {})
def validate_bool(val):
if val is None:
return
if isinstance(val, bool):
return val
if not isinstance(val, str):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, int):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_ssl_version(val):
ssl_versions = {}
for protocol in [p for p in dir(ssl) if p.startswith("PROTOCOL_")]:
ssl_versions[protocol[9:]] = getattr(ssl, protocol)
if val in ssl_versions:
# string matching PROTOCOL_...
return ssl_versions[val]
try:
intval = validate_pos_int(val)
if intval in ssl_versions.values():
# positive int matching a protocol int constant
return intval
except (ValueError, TypeError):
# negative integer or not an integer
# drop this in favour of the more descriptive ValueError below
pass
raise ValueError("Invalid ssl_version: %s. Valid options: %s"
% (val, ', '.join(ssl_versions)))
def validate_string(val):
if val is None:
return None
if not isinstance(val, str):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_file_exists(val):
if val is None:
return None
if not os.path.exists(val):
raise ValueError("File %s does not exists." % val)
return val
def validate_list_string(val):
if not val:
return []
# legacy syntax
if isinstance(val, str):
val = [val]
return [validate_string(v) for v in val]
def validate_list_of_existing_files(val):
return [validate_file_exists(v) for v in validate_list_string(val)]
def validate_string_to_list(val):
val = validate_string(val)
if not val:
return []
return [v.strip() for v in val.split(",") if v]
def validate_class(val):
if inspect.isfunction(val) or inspect.ismethod(val):
val = val()
if inspect.isclass(val):
return val
return validate_string(val)
def validate_callable(arity):
def _validate_callable(val):
if isinstance(val, str):
try:
mod_name, obj_name = val.rsplit(".", 1)
except ValueError:
raise TypeError("Value '%s' is not import string. "
"Format: module[.submodules...].object" % val)
try:
mod = __import__(mod_name, fromlist=[obj_name])
val = getattr(mod, obj_name)
except ImportError as e:
raise TypeError(str(e))
except AttributeError:
raise TypeError("Can not load '%s' from '%s'"
"" % (obj_name, mod_name))
if not callable(val):
raise TypeError("Value is not callable: %s" % val)
if arity != -1 and arity != util.get_arity(val):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
def validate_post_request(val):
val = validate_callable(-1)(val)
largs = util.get_arity(val)
if largs == 4:
return val
elif largs == 3:
return lambda worker, req, env, _r: val(worker, req, env)
elif largs == 2:
return lambda worker, req, _e, _r: val(worker, req)
else:
raise TypeError("Value must have an arity of: 4")
def validate_chdir(val):
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("can't chdir to %r" % val)
return path
def validate_hostport(val):
val = validate_string(val)
if val is None:
return None
elements = val.split(":")
if len(elements) == 2:
return (elements[0], int(elements[1]))
else:
raise TypeError("Value must consist of: hostname:port")
def validate_reload_engine(val):
if val not in reloader_engines:
raise ConfigError("Invalid reload_engine: %r" % val)
return val
def get_default_config_file():
config_path = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
if os.path.exists(config_path):
return config_path
return None
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "CONFIG"
validator = validate_string
default = "./gunicorn.conf.py"
desc = """\
The Gunicorn config file.
A string of the form ``PATH``, ``file:PATH``, or ``python:MODULE_NAME``.
Only has an effect when specified on the command line or as part of an
application specific configuration.
By default, a file named ``gunicorn.conf.py`` will be read from the same
directory where gunicorn is being run.
.. versionchanged:: 19.4
Loading the config from a Python module requires the ``python:``
prefix.
"""
class WSGIApp(Setting):
name = "wsgi_app"
section = "Config File"
meta = "STRING"
validator = validate_string
default = None
desc = """\
A WSGI application path in pattern ``$(MODULE_NAME):$(VARIABLE_NAME)``.
.. versionadded:: 20.1.0
"""
class Bind(Setting):
name = "bind"
action = "append"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_list_string
if 'PORT' in os.environ:
default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
else:
default = ['127.0.0.1:8000']
desc = """\
The socket to bind.
A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``,
``fd://FD``. An IP is a valid ``HOST``.
.. versionchanged:: 20.0
Support for ``fd://FD`` got added.
Multiple addresses can be bound. ex.::
$ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
will bind the `test:app` application on localhost both on ipv6
and ipv4 interfaces.
If the ``PORT`` environment variable is defined, the default
is ``['0.0.0.0:$PORT']``. If it is not defined, the default
is ``['127.0.0.1:8000']``.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = int(os.environ.get("WEB_CONCURRENCY", 1))
desc = """\
The number of worker processes for handling requests.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
By default, the value of the ``WEB_CONCURRENCY`` environment variable,
which is set by some Platform-as-a-Service providers such as Heroku. If
it is not defined, the default is ``1``.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_class
default = "sync"
desc = """\
The type of workers to use.
The default class (``sync``) should handle most "normal" types of
workloads. You'll want to read :doc:`design` for information on when
you might want to choose one of the other worker classes. Required
libraries may be installed using setuptools' ``extras_require`` feature.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.24.1 (or install it via
``pip install gunicorn[eventlet]``)
* ``gevent`` - Requires gevent >= 1.4 (or install it via
``pip install gunicorn[gevent]``)
* ``tornado`` - Requires tornado >= 0.2 (or install it via
``pip install gunicorn[tornado]``)
* ``gthread`` - Python 2 requires the futures package to be installed
(or install it via ``pip install gunicorn[gthread]``)
Optionally, you can provide your own worker by giving Gunicorn a
Python path to a subclass of ``gunicorn.workers.base.Worker``.
This alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``.
"""
class WorkerThreads(Setting):
name = "threads"
section = "Worker Processes"
cli = ["--threads"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of worker threads for handling requests.
Run each worker with the specified number of threads.
A positive integer generally in the ``2-4 x $(NUM_CORES)`` range.
You'll want to vary this a bit to find the best for your particular
application's work load.
If it is not defined, the default is ``1``.
This setting only affects the Gthread worker type.
.. note::
If you try to use the ``sync`` worker type and set the ``threads``
setting to more than 1, the ``gthread`` worker type will be used
instead.
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a worker
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class MaxRequestsJitter(Setting):
name = "max_requests_jitter"
section = "Worker Processes"
cli = ["--max-requests-jitter"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum jitter to add to the *max_requests* setting.
The jitter causes the restart per worker to be randomized by
``randint(0, max_requests_jitter)``. This is intended to stagger worker
restarts to avoid all workers restarting at the same time.
.. versionadded:: 19.2
"""
class WaitForNewWorkers(Setting):
name = "wait_for_new_workers"
section = "Worker Processes"
cli = ["--wait-for-new-workers"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Wait for a new worker to become ready before killing an old worker.
"""
class MaxRestartingWorkers(Setting):
name = "max_restarting_workers"
section = "Worker Processes"
cli = ["--max-restarting-workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of workers which can be restarted at the same time.
"""
class WarmupRequests(Setting):
name = "warmup_requests"
section = "Worker Processes"
cli = ["--warmup-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The number of requests a new worker needs to handle until the old worker can be killed.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Value is a positive number or 0. Setting it to 0 has the effect of
infinite timeouts by disabling timeouts for all workers entirely.
Generally, the default of thirty seconds should suffice. Only set this
noticeably higher if you're sure of the repercussions for sync workers.
For the non sync workers it just means that the worker process is still
communicating and is not tied to the length of time required to handle a
single request.
"""
class GracefulTimeout(Setting):
name = "graceful_timeout"
section = "Worker Processes"
cli = ["--graceful-timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Timeout for graceful workers restart.
After receiving a restart signal, workers have this much time to finish
serving requests. Workers still alive after the timeout (starting from
the receipt of the restart signal) are force killed.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range for servers with direct connection
to the client (e.g. when you don't have separate load balancer). When
Gunicorn is deployed behind a load balancer, it often makes sense to
set this to a higher value.
.. note::
``sync`` worker does not support persistent connections and will
ignore this option.
"""
class LimitRequestLine(Setting):
name = "limit_request_line"
section = "Security"
cli = ["--limit-request-line"]
meta = "INT"
validator = validate_pos_int
type = int
default = 4094
desc = """\
The maximum size of HTTP request line in bytes.
This parameter is used to limit the allowed size of a client's
HTTP request-line. Since the request-line consists of the HTTP
method, URI, and protocol version, this directive places a
restriction on the length of a request-URI allowed for a request
on the server. A server needs this value to be large enough to
hold any of its resource names, including any information that
might be passed in the query part of a GET request. Value is a number
from 0 (unlimited) to 8190.
This parameter can be used to prevent any DDOS attack.
"""
class LimitRequestFields(Setting):
name = "limit_request_fields"
section = "Security"
cli = ["--limit-request-fields"]
meta = "INT"
validator = validate_pos_int
type = int
default = 100
desc = """\
Limit the number of HTTP headers fields in a request.
This parameter is used to limit the number of headers in a request to
prevent DDOS attack. Used with the *limit_request_field_size* it allows
more safety. By default this value is 100 and can't be larger than
32768.
"""
class LimitRequestFieldSize(Setting):
name = "limit_request_field_size"
section = "Security"
cli = ["--limit-request-field_size"]
meta = "INT"
validator = validate_pos_int
type = int
default = 8190
desc = """\
Limit the allowed size of an HTTP request header field.
Value is a positive number or 0. Setting it to 0 will allow unlimited
header field sizes.
.. warning::
Setting this parameter to a very high or unlimited value can open
up for DDOS attacks.
"""
class EnrichResponse(Setting):
name = "enrich_response"
section = 'Debugging'
cli = ['--enrich-response']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Add extra information in the http response body. Works only for sync worker type.
While handling a request, a few timestamps are taken (in microseconds, since 1st of January, 1970):
* ``spawning time`` - when worker object is initialized (this is before forking the new process)
* ``time 1`` - immediately after entering "handle_request"
* ``time 2`` - just before getting the response
* ``time 3`` - immediately after getting the response
The following information is inserted into the response body:
* ``spawn``: spawning time
* ``t1``: time1
* ``d1``: time2 - time1
* ``d2``: time3 - time2
* ``pid``: the pid of the worker handling the request
* ``nr``: number of requests handled by this worker so far
* ``max``: number of requests planned for this worker (this can be exceeded a little bit because of the rolling restarting strategy)
The new response is a json with two keys:
"res" contains the original response
"info" contains the extra information
'''
class Reload(Setting):
name = "reload"
section = 'Debugging'
cli = ['--reload']
validator = validate_bool
action = 'store_true'
default = False
desc = '''\
Restart workers when code changes.
This setting is intended for development. It will cause workers to be
restarted whenever application code changes.
The reloader is incompatible with application preloading. When using a
paste configuration be sure that the server block does not import any
application code or the reload will not work as designed.
The default behavior is to attempt inotify with a fallback to file
system polling. Generally, inotify should be preferred if available
because it consumes less system resources.
.. note::
In order to use the inotify reloader, you must have the ``inotify``
package installed.
'''
class ReloadEngine(Setting):
name = "reload_engine"
section = "Debugging"
cli = ["--reload-engine"]
meta = "STRING"
validator = validate_reload_engine
default = "auto"
desc = """\
The implementation that should be used to power :ref:`reload`.
Valid engines are:
* ``'auto'``
* ``'poll'``
* ``'inotify'`` (requires inotify)
.. versionadded:: 19.7
"""
class ReloadExtraFiles(Setting):
name = "reload_extra_files"
action = "append"
section = "Debugging"
cli = ["--reload-extra-file"]
meta = "FILES"
validator = validate_list_of_existing_files
default = []
desc = """\
Extends :ref:`reload` option to also watch and reload on additional files
(e.g., templates, configurations, specifications, etc.).
.. versionadded:: 19.8
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class ConfigCheck(Setting):
name = "check_config"
section = "Debugging"
cli = ["--check-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Check the configuration and exit. The exit status is 0 if the
configuration is correct, and 1 if the configuration is incorrect.
"""
class PrintConfig(Setting):
name = "print_config"
section = "Debugging"
cli = ["--print-config"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Print the configuration settings as fully resolved. Implies :ref:`check-config`.
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Sendfile(Setting):
name = "sendfile"
section = "Server Mechanics"
cli = ["--no-sendfile"]
validator = validate_bool
action = "store_const"
const = False
desc = """\
Disables the use of ``sendfile()``.
If not set, the value of the ``SENDFILE`` environment variable is used
to enable or disable its usage.
.. versionadded:: 19.2
.. versionchanged:: 19.4
Swapped ``--sendfile`` with ``--no-sendfile`` to actually allow
disabling.
.. versionchanged:: 19.6
added support for the ``SENDFILE`` environment variable
"""
class ReusePort(Setting):
name = "reuse_port"
section = "Server Mechanics"
cli = ["--reuse-port"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Set the ``SO_REUSEPORT`` flag on the listening socket.
.. versionadded:: 19.8
"""
class Chdir(Setting):
name = "chdir"
section = "Server Mechanics"
cli = ["--chdir"]
validator = validate_chdir
default = util.getcwd()
desc = """\
Change directory to specified directory before loading apps.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Env(Setting):
name = "raw_env"
action = "append"
section = "Server Mechanics"
cli = ["-e", "--env"]
meta = "ENV"
validator = validate_list_string
default = []
desc = """\
Set environment variables in the execution environment.
Should be a list of strings in the ``key=value`` format.
For example on the command line:
.. code-block:: console
$ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
Or in the configuration file:
.. code-block:: python
raw_env = ["FOO=1"]
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class WorkerTmpDir(Setting):
name = "worker_tmp_dir"
section = "Server Mechanics"
cli = ["--worker-tmp-dir"]
meta = "DIR"
validator = validate_string
default = None
desc = """\
A directory to use for the worker heartbeat temporary file.
If not set, the default temporary directory will be used.
.. note::
The current heartbeat system involves calling ``os.fchmod`` on
temporary file handlers and may block a worker for arbitrary time
if the directory is on a disk-backed filesystem.
See :ref:`blocking-os-fchmod` for more detailed information
and a solution for avoiding this problem.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getpwnam(value)`` or ``None`` to not
change the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to ``pwd.getgrnam(value)`` or ``None`` to not
change the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = auto_int
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the ``os.umask(mode)`` call or a string compatible
with ``int(value, 0)`` (``0`` means Python guesses the base, so values
like ``0``, ``0xFF``, ``0022`` are valid for decimal, hex, and octal
representations)
"""
class Initgroups(Setting):
name = "initgroups"
section = "Server Mechanics"
cli = ["--initgroups"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
If true, set the worker process's group access list with all of the
groups of which the specified username is a member, plus the specified
group id.
.. versionadded:: 19.7
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. If the source IP is permitted by
``forwarded-allow-ips`` (below), *and* at least one request header matches
a key-value pair listed in this dictionary, then Gunicorn will set
``wsgi.url_scheme`` to ``https``, so your application can tell that the
request is secure.
If the other headers listed in this dictionary are not present in the request, they will be ignored,
but if the other headers are present and do not match the provided values, then
the request will fail to parse. See the note below for more detailed examples of this behaviour.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class ForwardedAllowIPS(Setting):
name = "forwarded_allow_ips"
section = "Server Mechanics"
cli = ["--forwarded-allow-ips"]
meta = "STRING"
validator = validate_string_to_list
default = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1")
desc = """\
Front-end's IPs from which allowed to handle set secure headers.
(comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment).
By default, the value of the ``FORWARDED_ALLOW_IPS`` environment
variable. If it is not defined, the default is ``"127.0.0.1"``.
.. note::
The interplay between the request headers, the value of ``forwarded_allow_ips``, and the value of
``secure_scheme_headers`` is complex. Various scenarios are documented below to further elaborate. In each case, we
have a request from the remote address 134.213.44.18, and the default value of ``secure_scheme_headers``:
.. code::
secure_scheme_headers = {
'X-FORWARDED-PROTOCOL': 'ssl',
'X-FORWARDED-PROTO': 'https',
'X-FORWARDED-SSL': 'on'
}
.. list-table::
:header-rows: 1
:align: center
:widths: auto
* - ``forwarded-allow-ips``
- Secure Request Headers
- Result
- Explanation
* - .. code::
["127.0.0.1"]
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "http"
- IP address was not allowed
* - .. code::
"*"
- <none>
- .. code::
wsgi.url_scheme = "http"
- IP address allowed, but no secure headers provided
* - .. code::
"*"
- .. code::
X-Forwarded-Proto: https
- .. code::
wsgi.url_scheme = "https"
- IP address allowed, one request header matched
* - .. code::
["134.213.44.18"]
- .. code::
X-Forwarded-Ssl: on
X-Forwarded-Proto: http
- ``InvalidSchemeHeaders()`` raised
- IP address allowed, but the two secure headers disagreed on if HTTPS was used
"""
class AccessLog(Setting):
name = "accesslog"
section = "Logging"
cli = ["--access-logfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The Access log file to write to.
``'-'`` means log to stdout.
"""
class DisableRedirectAccessToSyslog(Setting):
name = "disable_redirect_access_to_syslog"
section = "Logging"
cli = ["--disable-redirect-access-to-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Disable redirect access logs to syslog.
.. versionadded:: 19.8
"""
class AccessLogFormat(Setting):
name = "access_log_format"
section = "Logging"
cli = ["--access-logformat"]
meta = "STRING"
validator = validate_string
default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
desc = """\
The access log format.
=========== ===========
Identifier Description
=========== ===========
h remote address
l ``'-'``
u user name
t date of the request
r status line (e.g. ``GET / HTTP/1.1``)
m request method
U URL path without query string
q query string
H protocol
s status
B response length
b response length or ``'-'`` (CLF format)
f referer
a user agent
T request time in seconds
M request time in milliseconds
D request time in microseconds
L request time in decimal seconds
p process ID
{header}i request header
{header}o response header
{variable}e environment variable
=========== ===========
Use lowercase for header and environment variable names, and put
``{...}x`` names inside ``%(...)s``. For example::
%({x-forwarded-for}i)s
"""
class ErrorLog(Setting):
name = "errorlog"
section = "Logging"
cli = ["--error-logfile", "--log-file"]
meta = "FILE"
validator = validate_string
default = '-'
desc = """\
The Error log file to write to.
Using ``'-'`` for FILE makes gunicorn log to stderr.
.. versionchanged:: 19.2
Log to stderr by default.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of Error log outputs.
Valid level names are:
* ``'debug'``
* ``'info'``
* ``'warning'``
* ``'error'``
* ``'critical'``
"""
class CaptureOutput(Setting):
name = "capture_output"
section = "Logging"
cli = ["--capture-output"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Redirect stdout/stderr to specified file in :ref:`errorlog`.
.. versionadded:: 19.6
"""
class LoggerClass(Setting):
name = "logger_class"
section = "Logging"
cli = ["--logger-class"]
meta = "STRING"
validator = validate_class
default = "gunicorn.glogging.Logger"
desc = """\
The logger you want to use to log events in Gunicorn.
The default class (``gunicorn.glogging.Logger``) handles most
normal usages in logging. It provides error and access logging.
You can provide your own logger by giving Gunicorn a Python path to a
class that quacks like ``gunicorn.glogging.Logger``.
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class LogConfigDict(Setting):
name = "logconfig_dict"
section = "Logging"
validator = validate_dict
default = {}
desc = """\
The log config dictionary to use, using the standard Python
logging module's dictionary configuration format. This option
takes precedence over the :ref:`logconfig` option, which uses the
older file configuration format.
Format: https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig
.. versionadded:: 19.8
"""
class SyslogTo(Setting):
name = "syslog_addr"
section = "Logging"
cli = ["--log-syslog-to"]
meta = "SYSLOG_ADDR"
validator = validate_string
if PLATFORM == "darwin":
default = "unix:///var/run/syslog"
elif PLATFORM in ('freebsd', 'dragonfly', ):
default = "unix:///var/run/log"
elif PLATFORM == "openbsd":
default = "unix:///dev/log"
else:
default = "udp://localhost:514"
desc = """\
Address to send syslog messages.
Address is a string of the form:
* ``unix://PATH#TYPE`` : for unix domain socket. ``TYPE`` can be ``stream``
for the stream driver or ``dgram`` for the dgram driver.
``stream`` is the default.
* ``udp://HOST:PORT`` : for UDP sockets
* ``tcp://HOST:PORT`` : for TCP sockets
"""
class Syslog(Setting):
name = "syslog"
section = "Logging"
cli = ["--log-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Send *Gunicorn* logs to syslog.
.. versionchanged:: 19.8
You can now disable sending access logs by using the
:ref:`disable-redirect-access-to-syslog` setting.
"""
class SyslogPrefix(Setting):
name = "syslog_prefix"
section = "Logging"
cli = ["--log-syslog-prefix"]
meta = "SYSLOG_PREFIX"
validator = validate_string
default = None
desc = """\
Makes Gunicorn use the parameter as program-name in the syslog entries.
All entries will be prefixed by ``gunicorn.<prefix>``. By default the
program name is the name of the process.
"""
class SyslogFacility(Setting):
name = "syslog_facility"
section = "Logging"
cli = ["--log-syslog-facility"]
meta = "SYSLOG_FACILITY"
validator = validate_string
default = "user"
desc = """\
Syslog facility name
"""
class EnableStdioInheritance(Setting):
name = "enable_stdio_inheritance"
section = "Logging"
cli = ["-R", "--enable-stdio-inheritance"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable stdio inheritance.
Enable inheritance for stdio file descriptors in daemon mode.
Note: To disable the Python stdout buffering, you can to set the user
environment variable ``PYTHONUNBUFFERED`` .
"""
# statsD monitoring
class StatsdHost(Setting):
name = "statsd_host"
section = "Logging"
cli = ["--statsd-host"]
meta = "STATSD_ADDR"
default = None
validator = validate_hostport
desc = """\
``host:port`` of the statsd server to log to.
.. versionadded:: 19.1
"""
# Datadog Statsd (dogstatsd) tags. https://docs.datadoghq.com/developers/dogstatsd/
class DogstatsdTags(Setting):
name = "dogstatsd_tags"
section = "Logging"
cli = ["--dogstatsd-tags"]
meta = "DOGSTATSD_TAGS"
default = ""
validator = validate_string
desc = """\
A comma-delimited list of datadog statsd (dogstatsd) tags to append to
statsd metrics.
.. versionadded:: 20
"""
class StatsdPrefix(Setting):
name = "statsd_prefix"
section = "Logging"
cli = ["--statsd-prefix"]
meta = "STATSD_PREFIX"
default = ""
validator = validate_string
desc = """\
Prefix to use when emitting statsd metrics (a trailing ``.`` is added,
if not provided).
.. versionadded:: 19.2
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
If not set, the *default_proc_name* setting will be used.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class PythonPath(Setting):
name = "pythonpath"
section = "Server Mechanics"
cli = ["--pythonpath"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A comma-separated list of directories to add to the Python path.
e.g.
``'/home/djangoprojects/myproject,/home/python/mylibrary'``.
"""
class Paste(Setting):
name = "paste"
section = "Server Mechanics"
cli = ["--paste", "--paster"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
Load a PasteDeploy config file. The argument may contain a ``#``
symbol followed by the name of an app section from the config file,
e.g. ``production.ini#admin``.
At this time, using alternate server blocks is not supported. Use the
command line arguments to control server configuration instead.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def on_reload(server):
pass
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def when_ready(server):
pass
default = staticmethod(when_ready)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PostWorkerInit(Setting):
name = "post_worker_init"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def post_worker_init(worker):
pass
default = staticmethod(post_worker_init)
desc = """\
Called just after a worker has initialized the application.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerInt(Setting):
name = "worker_int"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_int(worker):
pass
default = staticmethod(worker_int)
desc = """\
Called just after a worker exited on SIGINT or SIGQUIT.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class WorkerAbort(Setting):
name = "worker_abort"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def worker_abort(worker):
pass
default = staticmethod(worker_abort)
desc = """\
Called when a worker received the SIGABRT signal.
This call generally happens on timeout.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = callable
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_post_request
type = callable
def post_request(worker, req, environ, resp):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class ChildExit(Setting):
name = "child_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def child_exit(server, worker):
pass
default = staticmethod(child_exit)
desc = """\
Called just after a worker has been exited, in the master process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
.. versionadded:: 19.7
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = callable
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited, in the worker process.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
class NumWorkersChanged(Setting):
name = "nworkers_changed"
section = "Server Hooks"
validator = validate_callable(3)
type = callable
def nworkers_changed(server, new_value, old_value):
pass
default = staticmethod(nworkers_changed)
desc = """\
Called just after *num_workers* has been changed.
The callable needs to accept an instance variable of the Arbiter and
two integers of number of workers after and before change.
If the number of workers is set for the first time, *old_value* would
be ``None``.
"""
class OnExit(Setting):
name = "on_exit"
section = "Server Hooks"
validator = validate_callable(1)
def on_exit(server):
pass
default = staticmethod(on_exit)
desc = """\
Called just before exiting Gunicorn.
The callable needs to accept a single instance variable for the Arbiter.
"""
class ProxyProtocol(Setting):
name = "proxy_protocol"
section = "Server Mechanics"
cli = ["--proxy-protocol"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable detect PROXY protocol (PROXY mode).
Allow using HTTP and Proxy together. It may be useful for work with
stunnel as HTTPS frontend and Gunicorn as HTTP server.
PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
Example for stunnel config::
[https]
protocol = proxy
accept = 443
connect = 80
cert = /etc/ssl/certs/stunnel.pem
key = /etc/ssl/certs/stunnel.key
"""
class ProxyAllowFrom(Setting):
name = "proxy_allow_ips"
section = "Server Mechanics"
cli = ["--proxy-allow-from"]
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed accept proxy requests (comma separate).
Set to ``*`` to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment)
"""
class KeyFile(Setting):
name = "keyfile"
section = "SSL"
cli = ["--keyfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL key file
"""
class CertFile(Setting):
name = "certfile"
section = "SSL"
cli = ["--certfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL certificate file
"""
class SSLVersion(Setting):
name = "ssl_version"
section = "SSL"
cli = ["--ssl-version"]
validator = validate_ssl_version
if hasattr(ssl, "PROTOCOL_TLS"):
default = ssl.PROTOCOL_TLS
else:
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use (see stdlib ssl module's)
.. versionchanged:: 20.0.1
The default value has been changed from ``ssl.PROTOCOL_SSLv23`` to
``ssl.PROTOCOL_TLS`` when Python >= 3.6 .
"""
default = ssl.PROTOCOL_SSLv23
desc = """\
SSL version to use.
============= ============
--ssl-version Description
============= ============
SSLv3 SSLv3 is not-secure and is strongly discouraged.
SSLv23 Alias for TLS. Deprecated in Python 3.6, use TLS.
TLS Negotiate highest possible version between client/server.
Can yield SSL. (Python 3.6+)
TLSv1 TLS 1.0
TLSv1_1 TLS 1.1 (Python 3.4+)
TLSv1_2 TLS 1.2 (Python 3.4+)
TLS_SERVER Auto-negotiate the highest protocol version like TLS,
but only support server-side SSLSocket connections.
(Python 3.6+)
============= ============
.. versionchanged:: 19.7
The default value has been changed from ``ssl.PROTOCOL_TLSv1`` to
``ssl.PROTOCOL_SSLv23``.
.. versionchanged:: 20.0
This setting now accepts string names based on ``ssl.PROTOCOL_``
constants.
"""
class CertReqs(Setting):
name = "cert_reqs"
section = "SSL"
cli = ["--cert-reqs"]
validator = validate_pos_int
default = ssl.CERT_NONE
desc = """\
Whether client certificate is required (see stdlib ssl module's)
"""
class CACerts(Setting):
name = "ca_certs"
section = "SSL"
cli = ["--ca-certs"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
CA certificates file
"""
class SuppressRaggedEOFs(Setting):
name = "suppress_ragged_eofs"
section = "SSL"
cli = ["--suppress-ragged-eofs"]
action = "store_true"
default = True
validator = validate_bool
desc = """\
Suppress ragged EOFs (see stdlib ssl module's)
"""
class DoHandshakeOnConnect(Setting):
name = "do_handshake_on_connect"
section = "SSL"
cli = ["--do-handshake-on-connect"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Whether to perform SSL handshake on socket connect (see stdlib ssl module's)
"""
class Ciphers(Setting):
name = "ciphers"
section = "SSL"
cli = ["--ciphers"]
validator = validate_string
default = None
desc = """\
SSL Cipher suite to use, in the format of an OpenSSL cipher list.
By default we use the default cipher list from Python's ``ssl`` module,
which contains ciphers considered strong at the time of each Python
release.
As a recommended alternative, the Open Web App Security Project (OWASP)
offers `a vetted set of strong cipher strings rated A+ to C-
<https://www.owasp.org/index.php/TLS_Cipher_String_Cheat_Sheet>`_.
OWASP provides details on user-agent compatibility at each security level.
See the `OpenSSL Cipher List Format Documentation
<https://www.openssl.org/docs/manmaster/man1/ciphers.html#CIPHER-LIST-FORMAT>`_
for details on the format of an OpenSSL cipher list.
"""
class PasteGlobalConf(Setting):
name = "raw_paste_global_conf"
action = "append"
section = "Server Mechanics"
cli = ["--paste-global"]
meta = "CONF"
validator = validate_list_string
default = []
desc = """\
Set a PasteDeploy global config variable in ``key=value`` form.
The option can be specified multiple times.
The variables are passed to the the PasteDeploy entrypoint. Example::
$ gunicorn -b 127.0.0.1:8000 --paste development.ini --paste-global FOO=1 --paste-global BAR=2
.. versionadded:: 19.7
"""
class StripHeaderSpaces(Setting):
name = "strip_header_spaces"
section = "Server Mechanics"
cli = ["--strip-header-spaces"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Strip spaces present between the header name and the the ``:``.
This is known to induce vulnerabilities and is not compliant with the HTTP/1.1 standard.
See https://portswigger.net/research/http-desync-attacks-request-smuggling-reborn.
Use with care and only if necessary.
"""
| 28.225564 | 144 | 0.597684 |
f7221873d9ecbf77edfe45ec72ecc412e695cb21 | 11,169 | py | Python | rmgweb/database/forms.py | sean-v8/RMG-website | db6c70670c83b3fbe71b02b0874613823c982c9b | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | rmgweb/database/forms.py | sean-v8/RMG-website | db6c70670c83b3fbe71b02b0874613823c982c9b | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | rmgweb/database/forms.py | sean-v8/RMG-website | db6c70670c83b3fbe71b02b0874613823c982c9b | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG Website - A Django-powered website for Reaction Mechanism Generator
#
# Copyright (c) 2011 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
from django import forms
from django.forms.util import ErrorList
from django.utils.safestring import mark_safe
from rmgpy.molecule.molecule import Molecule
import rmgpy
import copy
import sys
class DivErrorList(ErrorList):
def __unicode__(self):
return self.as_divs()
def as_divs(self):
if not self: return u''
return mark_safe(u'<label> </label>%s' % (''.join([u'<div class="error">%s</div>' % e for e in self])))
class ThermoSearchForm(forms.Form):
"""
This form provides a means of specifying a species to get thermodynamic
data for.
"""
species = forms.CharField(widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}))
def clean_species(self):
"""
Custom validation for the species field to ensure that a valid adjacency
list has been provided.
"""
try:
molecule = Molecule()
molecule.fromAdjacencyList(str(self.cleaned_data['species']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['species'])
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = u'<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br />%s',
errors_on_separate_row = False)
class KineticsSearchForm(forms.Form):
"""
This form provides a means of specifying a set of reactants to get
kinetic data for.
"""
reactant1_identifier = forms.CharField(label="Reactant #1 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("reactant1");','class':'identifier'}), required=False)
reactant1 = forms.CharField(label="Reactant #1", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}))
reactant2_identifier = forms.CharField(label="Reactant #2 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("reactant2");','class':'identifier'}), required=False)
reactant2 = forms.CharField(label="Reactant #2", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
product1_identifier = forms.CharField(label="Product #1 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("product1");','class':'identifier'}), required=False)
product1 = forms.CharField(label="Product #1", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
product2_identifier = forms.CharField(label="Product #2 Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("product2");','class':'identifier'}), required=False)
product2 = forms.CharField(label="Product #2", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
def clean_reactant1(self):
"""
Custom validation for the reactant1 field to ensure that a valid
adjacency list has been provided.
"""
try:
molecule = Molecule()
molecule.fromAdjacencyList(str(self.cleaned_data['reactant1']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['reactant1'])
def clean_reactant2(self):
"""
Custom validation for the reactant1 field to ensure that a valid
adjacency list has been provided.
"""
try:
adjlist = str(self.cleaned_data['reactant2'])
if adjlist.strip() == '': return ''
molecule = Molecule()
molecule.fromAdjacencyList(adjlist)
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['reactant2'])
def clean_product1(self):
"""
Custom validation for the product1 field to ensure that a valid
adjacency list has been provided.
"""
try:
adjlist = str(self.cleaned_data['product1'])
if adjlist.strip() == '': return ''
molecule = Molecule()
molecule.fromAdjacencyList(adjlist)
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['product1'])
def clean_product2(self):
"""
Custom validation for the product1 field to ensure that a valid
adjacency list has been provided.
"""
try:
adjlist = str(self.cleaned_data['product2'])
if adjlist.strip() == '': return ''
molecule = Molecule()
molecule.fromAdjacencyList(adjlist)
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return str(self.cleaned_data['product2'])
class MoleculeSearchForm(forms.Form):
"""
Form for drawing molecule from adjacency list
"""
species_identifier = forms.CharField(label="Species Identifier", widget=forms.TextInput(attrs={'onchange':'resolve();', 'style':'width:100%;'}), required=False)
species = forms.CharField(label ="Adjacency List", widget = forms.Textarea(attrs={'cols': 50, 'rows': 20, 'onchange':"$('.result').hide();" }), required=True)
def clean_species(self):
"""
Custom validation for the species field to ensure that a valid adjacency
list has been provided.
"""
try:
adjlist = str(self.cleaned_data['species'])
if adjlist == '' : return ''
molecule = Molecule()
molecule.fromAdjacencyList(str(self.cleaned_data['species']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid adjacency list.')
return adjlist
class EniSearchForm(forms.Form):
"""
Form for drawing molecule from adjacency list
"""
detergent_identifier = forms.CharField(label="Detergent Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("detergent");','class':'identifier'}), required=False)
detergent = forms.CharField(label="Detergent", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}))
deposit_identifier = forms.CharField(label="Deposit Identifier", widget=forms.TextInput(attrs={'onchange':'resolve("deposit");','class':'identifier'}), required=False)
deposit = forms.CharField(label="Deposit", widget=forms.widgets.Textarea(attrs={'rows': 6, 'cols': 30}), required=False)
def clean_detergent(self):
"""
Return molecular representation of input detergent structure """
try:
detergent = Molecule()
detergent.fromAdjacencyList(str(self.cleaned_data['detergent']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid SMILES entry.')
return str(self.cleaned_data['detergent'])
def clean_deposit(self):
"""
Return molecular representation of input deposit structure
"""
try:
deposit = Molecule()
deposit.fromAdjacencyList(str(self.cleaned_data['deposit']))
except Exception, e:
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid SMILES entry.')
return str(self.cleaned_data['deposit'])
class KineticsEntryEditForm(forms.Form):
"""
Form for editing kinetics database entries
"""
entry = forms.CharField(label="Database Entry", widget = forms.Textarea(attrs={'cols': 80, 'rows': 40, 'class':'data_entry'}), required=True)
change = forms.CharField(label="Summary of changes", widget=forms.TextInput(attrs={'class':'change_summary'}), required=True)
def clean_entry(self):
"""
Custom validation for the entry field to ensure that a valid
entry has been provided.
"""
new_database = rmgpy.data.kinetics.KineticsDatabase()
new_depository = rmgpy.data.kinetics.KineticsDepository()
global_context = {'__builtins__': None} # disable even builtins
local_context = copy.copy(new_database.local_context)
local_context['entry'] = new_depository.loadEntry
for key,value in rmgpy.data.base.Database.local_context.iteritems():
local_context[key]=value
print local_context
try:
entry_string = str(self.cleaned_data['entry'])
entry = eval("entry( index=-1, {0})".format(entry_string), global_context, local_context)
except Exception, e:
print "Invalid entry from KineticsEntryEditForm."
print repr(entry_string)
import traceback
traceback.print_exc(e)
raise forms.ValidationError('Invalid entry.'+ str(sys.exc_info()[1]))
return entry
class TemperatureForm(forms.Form):
"""
This form allows the user to enter a specific temperature and display the resulting rates
on a collection of kinetics search results
"""
temperature = forms.FloatField(label="Specify Temperature (K)") | 45.218623 | 179 | 0.637031 |
f7222f402986219a0e09068b7d7e002f2a798ee8 | 6,931 | py | Python | streamer/node_base.py | shaka-bot/shaka-streamer | 60588ea0be83074d29538fa851338fc0183f1909 | [
"Apache-2.0"
] | 154 | 2019-08-29T16:53:24.000Z | 2022-02-25T00:29:56.000Z | streamer/node_base.py | shaka-bot/shaka-streamer | 60588ea0be83074d29538fa851338fc0183f1909 | [
"Apache-2.0"
] | 101 | 2019-08-30T17:34:51.000Z | 2022-03-02T18:46:22.000Z | streamer/node_base.py | shaka-bot/shaka-streamer | 60588ea0be83074d29538fa851338fc0183f1909 | [
"Apache-2.0"
] | 56 | 2019-09-08T17:47:22.000Z | 2022-02-23T17:35:11.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for nodes."""
import abc
import enum
import os
import shlex
import subprocess
import sys
import threading
import time
import traceback
from . import node_base
from typing import Any, Dict, IO, List, Optional, Union
class ProcessStatus(enum.Enum):
# Use number values so we can sort based on value.
Finished = 0
"""The node has completed its task and shut down."""
Running = 1
"""The node is still running."""
Errored = 2
"""The node has failed."""
class NodeBase(object):
"""A base class for nodes that run a single subprocess."""
@abc.abstractmethod
def __init__(self) -> None:
self._process: Optional[subprocess.Popen] = None
def __del__(self) -> None:
# If the process isn't stopped by now, stop it here. It is preferable to
# explicitly call stop().
self.stop(None)
@abc.abstractmethod
def start(self):
"""Start the subprocess.
Should be overridden by the subclass to construct a command line, call
self._create_process, and assign the result to self._process.
"""
pass
def _create_process(self,
args: Union[str, List[str]],
env: Dict[str, str] = {},
merge_env: bool = True,
stdout: Union[int, IO[Any], None] = None,
stderr: Union[int, IO[Any], None] = None,
shell: bool = False, **kwargs) -> subprocess.Popen:
"""A central point to create subprocesses, so that we can debug the
command-line arguments.
Args:
args: An array of strings if shell is False, or a single string is shell
is True; the command line of the subprocess.
env: A dictionary of environment variables to pass to the subprocess.
merge_env: If true, merge env with the parent process environment.
shell: If true, args must be a single string, which will be executed as a
shell command.
Returns:
The Popen object of the subprocess.
"""
if merge_env:
child_env = os.environ.copy()
child_env.update(env)
else:
child_env = env
# Print arguments formatted as output from bash -x would be.
# This makes it easy to see the arguments and easy to copy/paste them for
# debugging in a shell.
if shell:
assert isinstance(args, str)
print('+ ' + args)
else:
assert type(args) is list
print('+ ' + ' '.join([shlex.quote(arg) for arg in args]))
return subprocess.Popen(args,
env=child_env,
stdin=subprocess.DEVNULL,
stdout=stdout, stderr=stderr,
shell=shell, **kwargs)
def check_status(self) -> ProcessStatus:
"""Returns the current ProcessStatus of the node."""
if not self._process:
raise ValueError('Must have a process to check')
self._process.poll()
if self._process.returncode is None:
return ProcessStatus.Running
if self._process.returncode == 0:
return ProcessStatus.Finished
else:
return ProcessStatus.Errored
def stop(self, status: Optional[ProcessStatus]) -> None:
"""Stop the subprocess if it's still running."""
if self._process:
# Slightly more polite than kill. Try this first.
self._process.terminate()
if self.check_status() == ProcessStatus.Running:
# If it's not dead yet, wait 1 second.
time.sleep(1)
if self.check_status() == ProcessStatus.Running:
# If it's still not dead, use kill.
self._process.kill()
# Wait for the process to die and read its exit code. There is no way
# to ignore a kill signal, so this will happen quickly. If we don't do
# this, it can create a zombie process.
self._process.wait()
class PolitelyWaitOnFinish(node_base.NodeBase):
"""A mixin that makes stop() wait for the subprocess if status is Finished.
This is as opposed to the base class behavior, in which stop() forces
the subprocesses of a node to terminate.
"""
def stop(self, status: Optional[ProcessStatus]) -> None:
if self._process and status == ProcessStatus.Finished:
try:
print('Waiting for', self.__class__.__name__)
self._process.wait(timeout=300) # 5m timeout
except subprocess.TimeoutExpired:
traceback.print_exc() # print the exception
# Fall through.
super().stop(status)
class ThreadedNodeBase(NodeBase):
"""A base class for nodes that run a thread.
The thread repeats some callback in a background thread.
"""
def __init__(self, thread_name: str, continue_on_exception: bool, sleep_time: float):
super().__init__()
self._status = ProcessStatus.Finished
self._thread_name = thread_name
self._continue_on_exception = continue_on_exception
self._sleep_time = sleep_time
self._thread = threading.Thread(target=self._thread_main, name=thread_name)
def _thread_main(self) -> None:
while self._status == ProcessStatus.Running:
try:
self._thread_single_pass()
except:
print('Exception in', self._thread_name, '-', sys.exc_info())
if self._continue_on_exception:
print(self.__class__.__name__+": 'Continuing.'")
else:
print(self.__class__.__name__+": 'Quitting.'")
self._status = ProcessStatus.Errored
return
# Wait a little bit before performing the next pass.
time.sleep(self._sleep_time)
@abc.abstractmethod
def _thread_single_pass(self) -> None:
"""Runs a single step of the thread loop.
This is implemented by subclasses to do whatever it is they do. It will be
called repeatedly by the base class from the node's background thread. If
this method raises an exception, the behavior depends on the
continue_on_exception argument in the constructor. If
continue_on_exception is true, the the thread will continue. Otherwise, an
exception will stop the thread and therefore the node.
"""
pass
def start(self) -> None:
self._status = ProcessStatus.Running
self._thread.start()
def stop(self, status: Optional[ProcessStatus]) -> None:
self._status = ProcessStatus.Finished
self._thread.join()
def check_status(self) -> ProcessStatus:
return self._status
| 32.848341 | 87 | 0.664118 |
f722364d6824f270c535f653164145ae812490d3 | 2,405 | py | Python | hardest/binary.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 2 | 2018-02-03T13:43:25.000Z | 2021-12-03T16:13:49.000Z | hardest/binary.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 8 | 2017-08-16T08:34:59.000Z | 2018-02-05T18:30:44.000Z | hardest/binary.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 1 | 2018-02-05T18:26:20.000Z | 2018-02-05T18:26:20.000Z | """Binary class."""
import os
from subprocess import CalledProcessError
from subprocess import check_output
from subprocess import STDOUT
class Binary(object): # pylint: disable=too-few-public-methods
"""Represent Binary structure."""
def __init__(self, path):
# type: (str) -> None
"""Binary constructor."""
self.executable = os.path.basename(path) # type: str
self.path = path # type: str
self._version = '' # type: str
def version(self):
# type: () -> str
"""Return version, by trying to get from binary."""
if not self._version:
return self._get_version()
return self._version
def _get_version(self):
# type: () -> str
raw_result = b'' # type: bytes
try:
raw_result = check_output([self.path, '-V'],
stderr=STDOUT) # type: ignore
except CalledProcessError:
return 'Unknown'
except OSError: # type: ignore
return 'Error'
decoded_result = str(raw_result.decode()) # type: str
if not decoded_result:
return 'Unknown'
stripped_version = decoded_result.strip()
self._version = stripped_version.replace('\n', ' ')
return self._version
def __eq__(self, second_addend):
# type: (object) -> bool
"""Test equality of two binaries."""
if not isinstance(second_addend, Binary):
return False
first_addend = self # type : Binary
equal_path = bool(first_addend.path == second_addend.path)
equal_version = bool(first_addend.version() == second_addend.version())
return equal_path and equal_version
def __ne__(self, second_addend):
# type: (object) -> bool
"""Test not equality of two binaries."""
return not bool(self == second_addend)
def __hash__(self):
# type: () -> int
"""Return hash."""
return hash(self.path) ^ hash(self.version())
def __repr__(self):
# type: () -> str
"""Return object representation."""
return "Binary obj ({}, {})".format(self.path, self.version())
def __str__(self):
# type: () -> str
"""Return string representation."""
return "{} ({})".format(self.path, self.version())
| 32.945205 | 79 | 0.565489 |
f72278214081544cda312e677f9ea50f74607e24 | 834 | py | Python | alipay/aop/api/domain/FinUserInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/FinUserInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/FinUserInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class FinUserInfo(object):
def __init__(self):
self._user_id = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FinUserInfo()
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 20.341463 | 65 | 0.565947 |
f7227c7ba9bcd083859d4202044f6b22b7cbd5d6 | 3,260 | py | Python | joj/horse/models/problem.py | joint-online-judge/horse | ec08ecd0528f6a4fad3fa5f5932aef1495721437 | [
"MIT"
] | 6 | 2020-12-28T07:05:52.000Z | 2022-01-16T04:44:02.000Z | joj/horse/models/problem.py | joint-online-judge/horse | ec08ecd0528f6a4fad3fa5f5932aef1495721437 | [
"MIT"
] | 56 | 2021-02-02T02:21:52.000Z | 2022-03-13T02:39:05.000Z | joj/horse/models/problem.py | joint-online-judge/horse | ec08ecd0528f6a4fad3fa5f5932aef1495721437 | [
"MIT"
] | 3 | 2021-01-28T17:52:58.000Z | 2021-12-17T17:42:42.000Z | from typing import TYPE_CHECKING, List, Optional, Type
from uuid import UUID
from sqlalchemy import event
from sqlalchemy.schema import Column, ForeignKey, UniqueConstraint
from sqlmodel import Field, Relationship
from sqlmodel.sql.sqltypes import GUID
from joj.horse.models.base import DomainURLORMModel, url_pre_save
from joj.horse.models.link_tables import ProblemProblemSetLink
from joj.horse.schemas.problem import ProblemDetail, WithLatestRecordType
from joj.horse.services.db import db_session
if TYPE_CHECKING:
from joj.horse.models import (
Domain,
ProblemConfig,
ProblemGroup,
ProblemSet,
Record,
User,
)
class Problem(DomainURLORMModel, ProblemDetail, table=True): # type: ignore[call-arg]
__tablename__ = "problems"
__table_args__ = (UniqueConstraint("domain_id", "url"),)
domain_id: UUID = Field(
sa_column=Column(
GUID, ForeignKey("domains.id", ondelete="CASCADE"), nullable=False
)
)
domain: "Domain" = Relationship(back_populates="problems")
owner_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("users.id", ondelete="SET NULL"), nullable=True
)
)
owner: Optional["User"] = Relationship(back_populates="owned_problems")
problem_group_id: Optional[UUID] = Field(
sa_column=Column(
GUID, ForeignKey("problem_groups.id", ondelete="SET NULL"), nullable=True
)
)
problem_group: Optional["ProblemGroup"] = Relationship(back_populates="problems")
problem_sets: List["ProblemSet"] = Relationship(
back_populates="problems",
link_model=ProblemProblemSetLink,
)
problem_problem_set_links: List[ProblemProblemSetLink] = Relationship(
back_populates="problem",
)
records: List["Record"] = Relationship(back_populates="problem")
problem_configs: List["ProblemConfig"] = Relationship(back_populates="problem")
@classmethod
async def get_problems_with_record_states(
cls,
result_cls: Type[WithLatestRecordType],
problem_set_id: Optional[UUID],
problems: List["Problem"],
user_id: UUID,
) -> List[WithLatestRecordType]:
from joj.horse import models
problem_ids = [problem.id for problem in problems]
records = await models.Record.get_user_latest_records(
problem_set_id=problem_set_id, problem_ids=problem_ids, user_id=user_id
)
problems = [
result_cls(**problems[i].dict(), latest_record=records[i])
for i, record in enumerate(records)
]
return problems
async def get_latest_problem_config(self) -> Optional["ProblemConfig"]:
from joj.horse import models
statement = (
models.ProblemConfig.sql_select()
.where(models.ProblemConfig.problem_id == self.id)
.order_by(models.ProblemConfig.created_at.desc()) # type: ignore
.limit(1)
)
async with db_session() as session:
results = await session.exec(statement)
return results.one_or_none()
event.listen(Problem, "before_insert", url_pre_save)
event.listen(Problem, "before_update", url_pre_save)
| 33.608247 | 86 | 0.677914 |
f722827390cb58b1a6cd72a02b31a1dfb88f2244 | 17,043 | py | Python | clients/hydra/python/ory_hydra_client/configuration.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | clients/hydra/python/ory_hydra_client/configuration.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | clients/hydra/python/ory_hydra_client/configuration.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | """
ORY Hydra
Welcome to the ORY Hydra HTTP API documentation. You will find documentation for all HTTP APIs here. # noqa: E501
The version of the OpenAPI document: v1.10.5
Generated by: https://openapi-generator.tech
"""
import copy
import logging
import multiprocessing
import sys
import urllib3
from http import client as http_client
from ory_hydra_client.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems'
}
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:param disabled_client_side_validations (string): Comma-separated list of
JSON schema validation keywords to disable JSON schema structural validation
rules. The following keywords may be specified: multipleOf, maximum,
exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern,
maxItems, minItems.
By default, the validation is performed for data generated locally by the client
and data received from the server, independent of any validation performed by
the server side. If the input data does not satisfy the JSON schema validation
rules specified in the OpenAPI document, an exception is raised.
If disabled_client_side_validations is set, structural validation is
disabled. This can be useful to troubleshoot data validation problem, such as
when the OpenAPI document validation rules do not match the actual API data
received by the server.
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
:param ssl_ca_cert: str - the path to a file of concatenated CA certificates
in PEM format
:Example:
HTTP Basic Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
http_basic_auth:
type: http
scheme: basic
Configure API client with HTTP basic authentication:
conf = ory_hydra_client.Configuration(
username='the-user',
password='the-password',
)
"""
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
access_token=None,
username=None, password=None,
discard_unknown_keys=False,
disabled_client_side_validations="",
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
ssl_ca_cert=None,
):
"""Constructor
"""
self._base_path = "http://localhost" if host is None else host
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.access_token = access_token
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.disabled_client_side_validations = disabled_client_side_validations
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("ory_hydra_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = ssl_ca_cert
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
# Options to pass down to the underlying urllib3 socket
self.socket_options = None
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == 'disabled_client_side_validations':
s = set(filter(None, value.split(',')))
for v in s:
if v not in JSON_SCHEMA_VALIDATION_KEYWORDS:
raise ApiValueError(
"Invalid keyword: '{0}''".format(v))
self._disabled_client_side_validations = s
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on http_client debug
http_client.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off http_client debug
http_client.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if self.username is not None and self.password is not None:
auth['basic'] = {
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
}
if self.access_token is not None:
auth['oauth2'] = {
'type': 'oauth2',
'in': 'header',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1.10.5\n"\
"SDK Package Version: v1.10.5".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
| 35.88 | 118 | 0.619022 |
f7228f888e0c1319b41e87dce8b0a43b5bb32b32 | 6,784 | py | Python | my_tools/test.py | StephenStorm/SlowFast | 9e3616ec05bd0433c721d0b9438ac3ac0f145ac5 | [
"Apache-2.0"
] | null | null | null | my_tools/test.py | StephenStorm/SlowFast | 9e3616ec05bd0433c721d0b9438ac3ac0f145ac5 | [
"Apache-2.0"
] | null | null | null | my_tools/test.py | StephenStorm/SlowFast | 9e3616ec05bd0433c721d0b9438ac3ac0f145ac5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import sys, os
TRT_LOGGER = trt.Logger()
def get_engine(onnx_file_path, engine_file_path=""):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine():
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30 # 256MiB
builder.max_batch_size = 1
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run export_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# last_layer = network.get_layer(network.num_layers - 1)
# network.mark_output(last_layer.get_output(0))
# print(type(network))
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
engine = builder.build_cuda_engine(network)
print("Completed creating Engine")
# with open(engine_file_path, "wb") as f:
# f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine()
def build_my_engine(engine_file_path, onnx_file_path):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
# if os.path.exists(engine_file_path):
if False:
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 30
# The maximum GPU temporary memory which the engine can use at execution time.
builder.fp16_mode = True
builder.max_batch_size = 3
config = builder.create_builder_config()
profile = builder.create_optimization_profile()
# set_shape(self: tensorrt.tensorrt.IOptimizationProfile, input: str,
# min: tensorrt.tensorrt.Dims, opt: tensorrt.tensorrt.Dims,
# max: tensorrt.tensorrt.Dims) → None
profile.set_shape("slow", (1, 3, 8, 256, 256), (1, 3, 8, 256, 256), (2, 3, 8, 256, 256))
profile.set_shape("fast", (1, 3, 32, 256, 256), (1, 3, 32, 256, 256), (2, 3, 32, 256, 256))
config.add_optimization_profile(profile)
# This function must be called at least once if the network has dynamic or shape input tensors.
# Parse model file
if not os.path.exists(onnx_file_path):
print('ONNX file {} not found, please run export_onnx.py first to generate it.'.format(onnx_file_path))
exit(0)
print('Loading ONNX file from path {}...'.format(onnx_file_path))
with open(onnx_file_path, 'rb') as model:
print('Beginning ONNX file parsing')
if not parser.parse(model.read()):
print('error occurd ~')
for error in range(parser.num_errors):
print(parser.get_error(error))
print('Completed parsing of ONNX file')
print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))
# engine = builder.build_cuda_engine(network)
engine = builder.build_engine(network, config)
print("Completed creating Engine")
# with open(engine_file_path, "wb") as f:
# f.write(engine.serialize())
print(profile.get_shape('slow'))
print(profile.get_shape('fast'))
print(profile.get_shape_input('slow'))
return engine
'''
context.set_binding_shape(0, (3, 150, 250))
profile = builder.create_optimization_profile();
profile.set_shape("foo", (3, 100, 200), (3, 150, 250), (3, 200, 300))
config.add_optimization_profile(profile)
with trt.Builder(TRT_LOGGER) as builder, builder.create_builder_config() as config:
config.max_workspace_size = 1 << 20 # This determines the amount of memory available to the builder when building an optimized engine and should generally be set as high as possible.
with builder.build_engine(network, config) as engine:
# Do inference here.
'''
onnx_file_path = '/home/stephen/workspace/ActionRecognition/my_SlowFast/onnx/slowfast_mul_batch_sim.onnx'
onnx_file_path2 = '/home/stephen/workspace/ActionRecognition/onnx_trt/test15_sim.onnx'
engine_file_path = '/home/stephen/workspace/ActionRecognition/my_SlowFast/onnx/slowfast_mul_batch.trt'
# engine_file_path = ''
'''
# engine = get_engine(onnx_file_path)
if engine is None:
print('fail build engine')
print(engine.get_binding_shape(0),
engine.get_binding_shape(1),
engine.get_binding_shape(2)
)
# The number of binding indices.
print('num_bindings: {}'.format(engine.num_bindings))
# The maximum batch size which can be used for inference. implicit 1
print('max batch size: {}'.format(engine.max_batch_size))
# 优化合并后的层数
print('num_layers: {}'.format(engine.num_layers))
# Workspace will be allocated for each IExecutionContext
print('max_workspace_size: {}'.format(engine.max_workspace_size))
# num_optimization_profiles
print('optimizition profiles for this engine: {}'.format(engine.num_optimization_profiles))
'''
engine = build_my_engine(engine_file_path, onnx_file_path)
with engine.create_execution_context() as context:
print(context.get_binding_shape(0))
| 49.882353 | 186 | 0.664652 |
f7229fecc8abcfa996481e7128a83b81f606b917 | 1,314 | py | Python | gdmix-trainer/setup.py | seraconlp/gdmix | a7405c4dde9b201741f44d4ac954b7e3492b088d | [
"BSD-2-Clause"
] | null | null | null | gdmix-trainer/setup.py | seraconlp/gdmix | a7405c4dde9b201741f44d4ac954b7e3492b088d | [
"BSD-2-Clause"
] | null | null | null | gdmix-trainer/setup.py | seraconlp/gdmix | a7405c4dde9b201741f44d4ac954b7e3492b088d | [
"BSD-2-Clause"
] | null | null | null | from pathlib import Path
from setuptools import find_namespace_packages, setup
from sys import platform as _platform
import sys
VERSION="0.3.0"
current_dir = Path(__file__).resolve().parent
with open(current_dir.joinpath('README.md'), encoding='utf-8') as f:
long_description = f.read()
if _platform not in ["linux", "linux2", "darwin"]:
print("ERROR: platform {} isn't supported".format(_platform))
sys.exit(1)
setup(
name="gdmix-trainer",
python_requires='>=3.7',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=["Programming Language :: Python :: 3.7",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved"],
license='BSD-2-CLAUSE',
version=f'{VERSION}',
package_dir={'': 'src'},
packages=find_namespace_packages(where='src'),
include_package_data=True,
install_requires=[
"setuptools>=41.0.0",
"tensorflow==1.15.2",
"tensorflow_ranking==0.1.4",
"fastavro==0.21.22",
"decorator==4.4.2",
"detext-nodep==2.0.9",
"psutil==5.7.0",
"scipy==1.3.2",
"scikit-learn==0.21.2",
"smart-arg==0.2.12"
],
tests_require=['pytest']
)
| 29.2 | 68 | 0.619482 |
f722b3e29e34689cf05526ca96e5fb3e45c1cb35 | 318 | py | Python | gym_jiminy/gym_jiminy/__init__.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | gym_jiminy/gym_jiminy/__init__.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | gym_jiminy/gym_jiminy/__init__.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | from gym.envs.registration import register
register(
id='jiminy-cartpole-v0',
entry_point='gym_jiminy.envs:JiminyCartPoleEnv',
reward_threshold=10000.0,
)
register(
id='jiminy-acrobot-v0',
entry_point='gym_jiminy.envs:JiminyAcrobotEnv',
max_episode_steps=12000,
reward_threshold=-3000.0
) | 22.714286 | 52 | 0.742138 |
f722b84767f8fa5c57fce1563a9d9b91a26ec9ed | 270 | py | Python | Python/String Validators.py | shivendra90/HackerRank_Solutions | 0e6f9b907c5f8ca93b2945787cb1fe7ed172bed6 | [
"MIT"
] | null | null | null | Python/String Validators.py | shivendra90/HackerRank_Solutions | 0e6f9b907c5f8ca93b2945787cb1fe7ed172bed6 | [
"MIT"
] | null | null | null | Python/String Validators.py | shivendra90/HackerRank_Solutions | 0e6f9b907c5f8ca93b2945787cb1fe7ed172bed6 | [
"MIT"
] | null | null | null | if __name__ == '__main__':
string = str(input())
methods = [".isalnum()", ".isalpha()", ".isdigit()",
".islower()", ".isupper()"]
for i, method in enumerate(methods):
print(eval("any(alpha{0} for alpha in string)".format(method)))
| 27 | 71 | 0.548148 |
f722f183360b21031a113892752701feb5e37dee | 2,189 | py | Python | src/protocol/on_chain_info_request.py | hvuhsg/yoyocoin | aad0f413479728dc4e0842447cf1910e5dff1418 | [
"MIT"
] | 11 | 2021-05-25T07:42:27.000Z | 2022-01-03T07:46:38.000Z | src/protocol/on_chain_info_request.py | hvuhsg/yoyocoin | aad0f413479728dc4e0842447cf1910e5dff1418 | [
"MIT"
] | 18 | 2021-05-25T17:42:46.000Z | 2021-09-13T15:14:38.000Z | src/protocol/on_chain_info_request.py | hvuhsg/yoyocoin | aad0f413479728dc4e0842447cf1910e5dff1418 | [
"MIT"
] | 5 | 2021-06-23T17:38:51.000Z | 2022-03-03T12:40:53.000Z | """
Handle chain info request
if chain info request is initiated the handler will execute those steps:
1. validate message
2. get chain info and summery
3. publish chain info and get cid
4. send the cid and summery
"""
from typing import Tuple
from blockchain import Blockchain
from network.ipfs import Node, Message
from .handler import Handler
class ChainInfoRequestHandler(Handler):
topic = "chain-request"
topic_response = "chain-response"
def __init__(self):
self.node = Node.get_instance()
def validate(self, message: Message):
blockchain: Blockchain = Blockchain.get_main_chain()
score_exist = message.meta.get("score", None) is not None
score_is_lower = score_exist and message.meta.get("score") < blockchain.score
# TODO: check length
return score_is_lower
def get_chain_info(self) -> Tuple[dict, dict]:
"""
Return blockchain block hashes
:return: tuple of chain info (block hashes) and chain summery (chain length and score)
"""
blockchain: Blockchain = Blockchain.get_main_chain()
blocks = blockchain.chain
score = blockchain.score
length = blockchain.length
return {"blocks": blocks}, {"score": score, "length": length}
def publish_chain_info(self, chain_info: dict) -> str:
blocks_cids = []
blocks_hashes = []
for block in chain_info["blocks"]:
block_dict = block.to_dict()
blocks_cids.append(self.node.create_cid(block_dict))
blocks_hashes.append(block.hash())
return self.node.create_cid(
{"blocks_cid": blocks_cids, "blocks_hash": blocks_hashes}
)
def send_cid_and_summery(self, cid: str, summery: dict):
return self.node.publish_to_topic(
topic=self.topic_response, message=Message(cid=cid, meta=summery)
)
def __call__(self, message: Message):
super().log(message)
if not self.validate(message):
return
chain_info, chain_summery = self.get_chain_info()
cid = self.publish_chain_info(chain_info)
self.send_cid_and_summery(cid, chain_summery)
| 33.676923 | 94 | 0.664687 |
f7230d9cc74110fcd7eacb880b7388d767c4d9d2 | 13,781 | py | Python | veros/setups/north_atlantic/north_atlantic.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 115 | 2019-11-23T02:31:30.000Z | 2022-03-29T12:58:30.000Z | veros/setups/north_atlantic/north_atlantic.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 207 | 2019-11-21T13:21:22.000Z | 2022-03-31T23:36:09.000Z | veros/setups/north_atlantic/north_atlantic.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 21 | 2020-01-28T13:13:39.000Z | 2022-02-02T13:46:33.000Z | #!/usr/bin/env python
import os
import h5netcdf
from PIL import Image
import scipy.spatial
import scipy.ndimage
from veros import VerosSetup, veros_routine, veros_kernel, KernelOutput
from veros.variables import Variable
from veros.core.operators import numpy as npx, update, at
import veros.tools
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_FILES = veros.tools.get_assets("north_atlantic", os.path.join(BASE_PATH, "assets.json"))
TOPO_MASK_FILE = os.path.join(BASE_PATH, "topo_mask.png")
class NorthAtlanticSetup(VerosSetup):
"""A regional model of the North Atlantic, inspired by `Smith et al., 2000`_.
Forcing and initial conditions are taken from the FLAME PyOM2 setup. Bathymetry
data from ETOPO1 (resolution of 1 arcmin).
Boundary forcings are implemented via sponge layers in the Greenland Sea, by the
Strait of Gibraltar, and in the South Atlantic. This setup runs with arbitrary resolution;
upon changing the number of grid cells, all forcing files will be interpolated to
the new grid. Default resolution corresponds roughly to :math:`0.5 \\times 0.25` degrees.
.. _Smith et al., 2000:
http://journals.ametsoc.org/doi/10.1175/1520-0485%282000%29030%3C1532%3ANSOTNA%3E2.0.CO%3B2
"""
x_boundary = 17.2
y_boundary = 70.0
max_depth = 5800.0
@veros_routine
def set_parameter(self, state):
settings = state.settings
settings.identifier = "na"
settings.nx, settings.ny, settings.nz = 250, 350, 50
settings.x_origin = -98.0
settings.y_origin = -18.0
settings.dt_mom = 3600.0 / 2.0
settings.dt_tracer = 3600.0 / 2.0
settings.runlen = 86400 * 365.0 * 10.0
settings.coord_degree = True
settings.enable_neutral_diffusion = True
settings.enable_skew_diffusion = True
settings.K_iso_0 = 1000.0
settings.K_iso_steep = 200.0
settings.iso_dslope = 1.0 / 1000.0
settings.iso_slopec = 4.0 / 1000.0
settings.enable_hor_friction = True
settings.A_h = 1e3
settings.enable_hor_friction_cos_scaling = True
settings.hor_friction_cosPower = 1
settings.enable_tempsalt_sources = True
settings.enable_implicit_vert_friction = True
settings.enable_tke = True
settings.c_k = 0.1
settings.c_eps = 0.7
settings.alpha_tke = 30.0
settings.mxl_min = 1e-8
settings.tke_mxl_choice = 2
settings.kappaM_min = 2e-4
settings.kappaH_min = 2e-5
settings.enable_kappaH_profile = True
settings.K_gm_0 = 1000.0
settings.enable_eke = False
settings.enable_idemix = False
settings.eq_of_state_type = 5
state.dimensions["nmonths"] = 12
state.var_meta.update(
{
"sss_clim": Variable("sss_clim", ("xt", "yt", "nmonths"), "g/kg", "Monthly sea surface salinity"),
"sst_clim": Variable("sst_clim", ("xt", "yt", "nmonths"), "deg C", "Monthly sea surface temperature"),
"sss_rest": Variable(
"sss_rest", ("xt", "yt", "nmonths"), "g/kg", "Monthly sea surface salinity restoring"
),
"sst_rest": Variable(
"sst_rest", ("xt", "yt", "nmonths"), "deg C", "Monthly sea surface temperature restoring"
),
"t_star": Variable(
"t_star", ("xt", "yt", "zt", "nmonths"), "deg C", "Temperature sponge layer forcing"
),
"s_star": Variable("s_star", ("xt", "yt", "zt", "nmonths"), "g/kg", "Salinity sponge layer forcing"),
"rest_tscl": Variable("rest_tscl", ("xt", "yt", "zt"), "1/s", "Forcing restoration time scale"),
"taux": Variable("taux", ("xt", "yt", "nmonths"), "N/s^2", "Monthly zonal wind stress"),
"tauy": Variable("tauy", ("xt", "yt", "nmonths"), "N/s^2", "Monthly meridional wind stress"),
}
)
@veros_routine
def set_grid(self, state):
vs = state.variables
settings = state.settings
vs.dxt = update(vs.dxt, at[2:-2], (self.x_boundary - settings.x_origin) / settings.nx)
vs.dyt = update(vs.dyt, at[2:-2], (self.y_boundary - settings.y_origin) / settings.ny)
vs.dzt = veros.tools.get_vinokur_grid_steps(settings.nz, self.max_depth, 10.0, refine_towards="lower")
@veros_routine
def set_coriolis(self, state):
vs = state.variables
settings = state.settings
vs.coriolis_t = update(
vs.coriolis_t, at[...], 2 * settings.omega * npx.sin(vs.yt[npx.newaxis, :] / 180.0 * settings.pi)
)
@veros_routine(dist_safe=False, local_variables=["kbot", "xt", "yt", "zt"])
def set_topography(self, state):
vs = state.variables
settings = state.settings
with h5netcdf.File(DATA_FILES["topography"], "r") as topo_file:
topo_x, topo_y, topo_bottom_depth = (self._get_data(topo_file, k) for k in ("x", "y", "z"))
topo_mask = npx.flipud(npx.asarray(Image.open(TOPO_MASK_FILE))).T
topo_bottom_depth = npx.where(topo_mask, 0, topo_bottom_depth)
topo_bottom_depth = scipy.ndimage.gaussian_filter(
topo_bottom_depth, sigma=(len(topo_x) / settings.nx, len(topo_y) / settings.ny)
)
interp_coords = npx.meshgrid(vs.xt[2:-2], vs.yt[2:-2], indexing="ij")
interp_coords = npx.rollaxis(npx.asarray(interp_coords), 0, 3)
z_interp = scipy.interpolate.interpn(
(topo_x, topo_y), topo_bottom_depth, interp_coords, method="nearest", bounds_error=False, fill_value=0
)
vs.kbot = update(
vs.kbot,
at[2:-2, 2:-2],
npx.where(
z_interp < 0.0,
1 + npx.argmin(npx.abs(z_interp[:, :, npx.newaxis] - vs.zt[npx.newaxis, npx.newaxis, :]), axis=2),
0,
),
)
vs.kbot = npx.where(vs.kbot < settings.nz, vs.kbot, 0)
def _get_data(self, f, var):
"""Retrieve variable from h5netcdf file"""
var_obj = f.variables[var]
return npx.array(var_obj).T
@veros_routine(
dist_safe=False,
local_variables=[
"tau",
"xt",
"yt",
"zt",
"temp",
"maskT",
"salt",
"taux",
"tauy",
"sst_clim",
"sss_clim",
"sst_rest",
"sss_rest",
"t_star",
"s_star",
"rest_tscl",
],
)
def set_initial_conditions(self, state):
vs = state.variables
with h5netcdf.File(DATA_FILES["forcing"], "r") as forcing_file:
t_hor = (vs.xt[2:-2], vs.yt[2:-2])
t_grid = (vs.xt[2:-2], vs.yt[2:-2], vs.zt)
forc_coords = [self._get_data(forcing_file, k) for k in ("xt", "yt", "zt")]
forc_coords[0] = forc_coords[0] - 360
forc_coords[2] = -0.01 * forc_coords[2][::-1]
temp_raw = self._get_data(forcing_file, "temp_ic")[..., ::-1]
temp = veros.tools.interpolate(forc_coords, temp_raw, t_grid, missing_value=-1e20)
vs.temp = update(vs.temp, at[2:-2, 2:-2, :, vs.tau], vs.maskT[2:-2, 2:-2, :] * temp)
salt_raw = self._get_data(forcing_file, "salt_ic")[..., ::-1]
salt = 35.0 + 1000 * veros.tools.interpolate(forc_coords, salt_raw, t_grid, missing_value=-1e20)
vs.salt = update(vs.salt, at[2:-2, 2:-2, :, vs.tau], vs.maskT[2:-2, 2:-2, :] * salt)
forc_u_coords_hor = [self._get_data(forcing_file, k) for k in ("xu", "yu")]
forc_u_coords_hor[0] = forc_u_coords_hor[0] - 360
taux = self._get_data(forcing_file, "taux")
tauy = self._get_data(forcing_file, "tauy")
for k in range(12):
vs.taux = update(
vs.taux,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_u_coords_hor, taux[..., k], t_hor, missing_value=-1e20) / 10.0),
)
vs.tauy = update(
vs.tauy,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_u_coords_hor, tauy[..., k], t_hor, missing_value=-1e20) / 10.0),
)
# heat flux and salinity restoring
sst_clim, sss_clim, sst_rest, sss_rest = [
forcing_file.variables[k][...].T for k in ("sst_clim", "sss_clim", "sst_rest", "sss_rest")
]
for k in range(12):
vs.sst_clim = update(
vs.sst_clim,
at[2:-2, 2:-2, k],
veros.tools.interpolate(forc_coords[:-1], sst_clim[..., k], t_hor, missing_value=-1e20),
)
vs.sss_clim = update(
vs.sss_clim,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sss_clim[..., k], t_hor, missing_value=-1e20) * 1000 + 35),
)
vs.sst_rest = update(
vs.sst_rest,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sst_rest[..., k], t_hor, missing_value=-1e20) * 41868.0),
)
vs.sss_rest = update(
vs.sss_rest,
at[2:-2, 2:-2, k],
(veros.tools.interpolate(forc_coords[:-1], sss_rest[..., k], t_hor, missing_value=-1e20) / 100.0),
)
with h5netcdf.File(DATA_FILES["restoring"], "r") as restoring_file:
rest_coords = [self._get_data(restoring_file, k) for k in ("xt", "yt", "zt")]
rest_coords[0] = rest_coords[0] - 360
# sponge layers
vs.rest_tscl = update(
vs.rest_tscl,
at[2:-2, 2:-2, :],
veros.tools.interpolate(rest_coords, self._get_data(restoring_file, "tscl")[..., 0], t_grid),
)
t_star = self._get_data(restoring_file, "t_star")
s_star = self._get_data(restoring_file, "s_star")
for k in range(12):
vs.t_star = update(
vs.t_star,
at[2:-2, 2:-2, :, k],
veros.tools.interpolate(rest_coords, t_star[..., k], t_grid, missing_value=0.0),
)
vs.s_star = update(
vs.s_star,
at[2:-2, 2:-2, :, k],
veros.tools.interpolate(rest_coords, s_star[..., k], t_grid, missing_value=0.0),
)
@veros_routine
def set_forcing(self, state):
vs = state.variables
vs.update(set_forcing_kernel(state))
@veros_routine
def set_diagnostics(self, state):
diagnostics = state.diagnostics
settings = state.settings
diagnostics["snapshot"].output_frequency = 3600.0 * 24 * 10
diagnostics["averages"].output_frequency = 3600.0 * 24 * 10
diagnostics["averages"].sampling_frequency = settings.dt_tracer
diagnostics["averages"].output_variables = [
"temp",
"salt",
"u",
"v",
"w",
"surface_taux",
"surface_tauy",
"psi",
]
diagnostics["cfl_monitor"].output_frequency = settings.dt_tracer * 10
@veros_routine
def after_timestep(self, state):
pass
@veros_kernel
def set_forcing_kernel(state):
vs = state.variables
settings = state.settings
year_in_seconds = 360 * 86400.0
(n1, f1), (n2, f2) = veros.tools.get_periodic_interval(vs.time, year_in_seconds, year_in_seconds / 12.0, 12)
vs.surface_taux = f1 * vs.taux[:, :, n1] + f2 * vs.taux[:, :, n2]
vs.surface_tauy = f1 * vs.tauy[:, :, n1] + f2 * vs.tauy[:, :, n2]
if settings.enable_tke:
vs.forc_tke_surface = update(
vs.forc_tke_surface,
at[1:-1, 1:-1],
npx.sqrt(
(0.5 * (vs.surface_taux[1:-1, 1:-1] + vs.surface_taux[:-2, 1:-1]) / settings.rho_0) ** 2
+ (0.5 * (vs.surface_tauy[1:-1, 1:-1] + vs.surface_tauy[1:-1, :-2]) / settings.rho_0) ** 2
)
** 1.5,
)
cp_0 = 3991.86795711963
vs.forc_temp_surface = (
(f1 * vs.sst_rest[:, :, n1] + f2 * vs.sst_rest[:, :, n2])
* (f1 * vs.sst_clim[:, :, n1] + f2 * vs.sst_clim[:, :, n2] - vs.temp[:, :, -1, vs.tau])
* vs.maskT[:, :, -1]
/ cp_0
/ settings.rho_0
)
vs.forc_salt_surface = (
(f1 * vs.sss_rest[:, :, n1] + f2 * vs.sss_rest[:, :, n2])
* (f1 * vs.sss_clim[:, :, n1] + f2 * vs.sss_clim[:, :, n2] - vs.salt[:, :, -1, vs.tau])
* vs.maskT[:, :, -1]
)
ice_mask = (vs.temp[:, :, -1, vs.tau] * vs.maskT[:, :, -1] <= -1.8) & (vs.forc_temp_surface <= 0.0)
vs.forc_temp_surface = npx.where(ice_mask, 0.0, vs.forc_temp_surface)
vs.forc_salt_surface = npx.where(ice_mask, 0.0, vs.forc_salt_surface)
if settings.enable_tempsalt_sources:
vs.temp_source = (
vs.maskT
* vs.rest_tscl
* (f1 * vs.t_star[:, :, :, n1] + f2 * vs.t_star[:, :, :, n2] - vs.temp[:, :, :, vs.tau])
)
vs.salt_source = (
vs.maskT
* vs.rest_tscl
* (f1 * vs.s_star[:, :, :, n1] + f2 * vs.s_star[:, :, :, n2] - vs.salt[:, :, :, vs.tau])
)
return KernelOutput(
surface_taux=vs.surface_taux,
surface_tauy=vs.surface_tauy,
temp_source=vs.temp_source,
salt_source=vs.salt_source,
forc_tke_surface=vs.forc_tke_surface,
forc_temp_surface=vs.forc_temp_surface,
forc_salt_surface=vs.forc_salt_surface,
)
| 38.387187 | 118 | 0.550468 |
f72338223fcd579b0d8eab318317428813bb0d97 | 2,216 | py | Python | tests/common/test_run/sqrt_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | tests/common/test_run/sqrt_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | tests/common/test_run/sqrt_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import akg
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math import Sqrt
from tests.common.tensorio import compare_tensor
from tests.common.gen_random import random_gaussian
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array
def sqrt_run(shape, dtype, attrs):
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
expect, input, output = gen_data(dtype, shape)
mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name='sqrt', attrs=attrs)
output = utils.mod_launch(mod, (input, output), expect=expect)
if attrs.get("profiling", False):
target_name = attrs["target"].split()[0]
args_list = to_tvm_nd_array([input, output], akg.tvm.context(target_name, 0))
target_profiling(mod, *args_list, target=target_name, repeat_time=attrs["repeat_times"])
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
# Generate data for testing the op
input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)
input = np.abs(input)
expect = np.sqrt(input)
output = np.full(expect.shape, np.nan, dtype)
return expect, input, output
| 43.45098 | 105 | 0.704874 |
f7236d3fcab67683773e445ba66effeb0a887648 | 7,422 | py | Python | src/client.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | null | null | null | src/client.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | null | null | null | src/client.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | null | null | null | import enum
import json
from target_voice import create_voice, gender_string
import api.stt.util
class Provider(enum.Enum):
GCLOUD = 1
AWS = 2
AWS_DEEPL = 3
class Client:
def __init__(
self,
upload_filename,
stt_provider=Provider.GCLOUD,
translate_provider=Provider.GCLOUD,
tts_provider=Provider.GCLOUD,
gcloud_speedup=False, # temporary flag
gender=None,
):
self.upload_filename = upload_filename
self.stt_provider = stt_provider
self.translate_provider = translate_provider
self.tts_provider = tts_provider
self.setup_clients()
self.gcloud_speedup = gcloud_speedup
self.gender = gender
def setup_clients(self):
if self.stt_provider == Provider.GCLOUD:
from api.storage import gcloud_storage as storage
from api.stt import gcloud_stt as stt
if self.translate_provider == Provider.GCLOUD:
from api.translate import gcloud_translate as translate
if self.tts_provider == Provider.GCLOUD:
from api.tts import gcloud_tts as tts
if self.stt_provider == Provider.AWS:
from api.storage import aws_storage as storage
from api.stt import aws_stt as stt
if self.translate_provider == Provider.AWS:
from api.translate import aws_translate as translate
if self.tts_provider == Provider.AWS:
from api.tts import aws_tts as tts
if self.stt_provider == Provider.AWS_DEEPL:
from api.storage import aws_storage as storage
from api.stt import aws_stt as stt
if self.translate_provider == Provider.AWS_DEEPL:
from api.translate import deepl_translate as translate
if self.tts_provider == Provider.AWS_DEEPL:
from api.tts import aws_tts as tts
self.storage = storage
self.stt = stt
self.translate = translate
self.tts = tts
self.storage_client = storage.get_client()
self.stt_client = stt.get_client()
if not self.translate_provider == Provider.AWS_DEEPL:
self.translate_client = translate.get_client()
self.tts_client = tts.get_client()
self.target_voices = {}
# input_file should be a complete path
def upload_file_to_cloud(self, input_file):
self.storage.upload_file_to_cloud(
self.storage_client, input_file, self.upload_filename
)
def transcribe_sentences(self, locale):
response = self.stt.transcribe_storage_uri(
self.stt_client, self.upload_filename, locale
)
word_list = self.stt.get_word_list(response)
return api.stt.util.create_sentences_from_word_list(word_list, locale)
def get_translation(self, original_text, target_language):
if not self.translate_provider == Provider.AWS_DEEPL:
return self.translate.get_translation(
self.translate_client, original_text, target_language
)
else:
return self.translate.get_translation(original_text, target_language)
def get_target_voice(self, locale, gender):
response = self.tts.list_voices(self.tts_client, locale)
voices = self.tts.normalize_voices(response)
# find the best matches
options = [
v for v in voices if (v.gender == self.gender and v.locale == locale)
]
if voices == []:
# TODO add error handling
return None
if options == []:
print("Couldn't find a matching voice.")
return voices[0]
# if there is only one option, there is no option
if len(options) == 1:
return options[0]
print(f"Options for {gender_string(gender)} - {locale}")
for idx, voice in enumerate(options):
print(f"#{idx} - {voice.name}")
choice = input(
f"Choose a voice by entering a number between 0:{len(options)-1} [Default: 0]: "
)
if choice.strip() == "":
choice = 0
return options[int(choice)]
def get_audio_chunk_for_sentence(self, text, locale, speedup=1.0):
if locale not in self.target_voices:
self.target_voices[locale] = self.get_target_voice(locale, self.gender)
print(self.target_voices[locale])
update_best_voices = input(
"Would you like to update the best voices file? (y/N) "
)
if update_best_voices == "y":
self.save_best_voices()
if self.tts_provider == Provider.GCLOUD:
return self.tts.get_audio_chunk_for_sentence(
self.tts_client, text, self.target_voices[locale], speedup=speedup
)
else:
return self.tts.get_audio_chunk_for_sentence(
self.tts_client, text, self.target_voices[locale]
)
# Returns a list of voices which match the gender of the client
def get_all_matching_voices(self):
response = self.tts.list_voices(self.tts_client)
voices = self.tts.normalize_voices(response)
translation_lang_codes = self.translate.get_supported_languages(
self.translate_client
)
# don't synthesize if the translation doesn't exist
broken = []
for v in voices:
if v.lang_code not in translation_lang_codes:
broken.append(v.locale)
return [
v for v in voices if (v.gender == self.gender and v.locale not in broken)
]
def load_best_voices(self, voices_file, target_locales):
# json will have the following structure
# { gender: {
# "AWS" : {
# locale : voiceId
# ...
# },
# "gcloud" : {
# locale : name
# ...
# },
# }
# }
self.voices_file = voices_file
with open(voices_file) as f:
voices = json.load(f)
provider = self.tts.provider_name()
gender = gender_string(self.gender)
if gender not in voices or provider not in voices[gender]:
return
for locale, name in voices[gender][
provider
].items(): # this should be a list of locale : name
if provider == "AWS":
self.target_voices[locale] = create_voice(
locale, self.gender, voiceId=name
)
if provider == "gcloud":
self.target_voices[locale] = create_voice(
locale, self.gender, gcloud_name=name
)
if locale in target_locales:
print(self.target_voices[locale])
def save_best_voices(self):
with open(self.voices_file) as f:
voices = json.load(f)
provider = self.tts.provider_name()
gender = gender_string(self.gender)
if gender not in voices:
voices[gender] = {}
if provider not in voices[gender]:
voices[gender][provider] = {}
for locale, voice in self.target_voices.items():
voices[gender][provider][locale] = voice.name
with open(self.voices_file, "w") as w:
w.write(json.dumps(voices, indent=2))
| 34.202765 | 92 | 0.597683 |
f72391e581e5477cb6c44e276c56fea7cf53fa01 | 479 | py | Python | alipay/aop/api/response/AlipayFundJointaccountOperationApproveResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayFundJointaccountOperationApproveResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayFundJointaccountOperationApproveResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayFundJointaccountOperationApproveResponse(AlipayResponse):
def __init__(self):
super(AlipayFundJointaccountOperationApproveResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayFundJointaccountOperationApproveResponse, self).parse_response_content(response_content)
| 29.9375 | 119 | 0.797495 |
f723bb3a1ed4cb91e25d960a0f850160a5553547 | 112,619 | py | Python | pygsti/models/modelconstruction.py | maij/pyGSTi | 70e83e05fa689f53550feb3914c4fac40ca4a943 | [
"Apache-2.0"
] | 73 | 2016-01-28T05:02:05.000Z | 2022-03-30T07:46:33.000Z | pygsti/models/modelconstruction.py | 00mjk/pyGSTi | 4f8bf5337b01b7afcb7b0580b717b5d1fe281be4 | [
"Apache-2.0"
] | 113 | 2016-02-25T15:32:18.000Z | 2022-03-31T13:18:13.000Z | pygsti/models/modelconstruction.py | 00mjk/pyGSTi | 4f8bf5337b01b7afcb7b0580b717b5d1fe281be4 | [
"Apache-2.0"
] | 41 | 2016-03-15T19:32:07.000Z | 2022-02-16T10:22:05.000Z | """
Functions for the construction of new models.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import itertools as _itertools
from os import stat
from pygsti.modelmembers.instruments.instrument import Instrument
import numpy as _np
import scipy as _scipy
import scipy.linalg as _spl
from pygsti.evotypes import Evotype as _Evotype
from pygsti.modelmembers import operations as _op
from pygsti.modelmembers import povms as _povm
from pygsti.modelmembers import states as _state
from pygsti.modelmembers import instruments as _instrument
from pygsti.modelmembers.operations import opfactory as _opfactory
from pygsti.models import stencillabel as _stencil
from pygsti.models.modelnoise import OpModelNoise as _OpModelNoise
from pygsti.models.modelnoise import OpModelPerOpNoise as _OpModelPerOpNoise
from pygsti.models.modelnoise import ComposedOpModelNoise as _ComposedOpModelNoise
from pygsti.models.modelnoise import LindbladNoise as _LindbladNoise
from pygsti.models.modelnoise import StochasticNoise as _StochasticNoise
from pygsti.models.modelnoise import DepolarizationNoise as _DepolarizationNoise
from pygsti.models import explicitmodel as _emdl
from pygsti.models import gaugegroup as _gg
from pygsti.models.localnoisemodel import LocalNoiseModel as _LocalNoiseModel
from pygsti.models.cloudnoisemodel import CloudNoiseModel as _CloudNoiseModel
from pygsti.baseobjs import label as _label
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
from pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis
from pygsti.baseobjs.basis import DirectSumBasis as _DirectSumBasis
from pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph
from pygsti.tools import basistools as _bt
from pygsti.tools import internalgates as _itgs
from pygsti.tools import optools as _ot
from pygsti.tools import listtools as _lt
from pygsti.baseobjs.basisconstructors import sqrt2, id2x2, sigmax, sigmay, sigmaz
from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from pygsti.tools.legacytools import deprecate as _deprecated_fn
#############################################
# Build gates based on "standard" gate names
############################################
def create_spam_vector(vec_expr, state_space, basis):
"""
Build a rho or E vector from an expression.
Parameters
----------
vec_expr : string
the expression which determines which vector to build. Currenlty, only
integers are allowed, which specify a the vector for the pure state of
that index. For example, "1" means return vectorize(``|1><1|``). The
index labels the absolute index of the state within the entire state
space, and is independent of the direct-sum decomposition of density
matrix space.
state_space : StateSpace
The state space that the created operation should act upon.
basis : str or Basis
The basis of the returned vector. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
The vector specified by vec_expr in the desired basis.
"""
#So far just allow integer prep_expressions that give the index of state (within the state space) that we
#prep/measure
try:
index = int(vec_expr)
except:
raise ValueError("Expression must be the index of a state (as a string)")
state_space = _statespace.StateSpace.cast(state_space)
if isinstance(basis, str):
basis = _Basis.cast(basis, state_space)
assert (state_space.dim == basis.dim), \
"State space labels dim (%s) != basis dim (%s)" % (state_space.dim, basis.dim)
#standard basis that has the same direct-sum structure as `basis`:
std_basis = basis.create_equivalent('std')
vecInSimpleStdBasis = _np.zeros(std_basis.elshape, 'd') # a matrix, but flattened it is our spamvec
vecInSimpleStdBasis[index, index] = 1.0 # now a matrix with just a single 1 on the diag
vecInReducedStdBasis = _np.dot(std_basis.from_elementstd_transform_matrix, vecInSimpleStdBasis.flatten())
# translates the density matrix / state vector to the std basis with our desired block structure
vec = _bt.change_basis(vecInReducedStdBasis, std_basis, basis)
return vec.reshape(-1, 1)
def create_identity_vec(basis):
"""
Build a the identity vector for a given space and basis.
Parameters
----------
basis : Basis object
The basis of the returned vector. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
The identity vector in the desired basis.
"""
opDim = basis.dim
if isinstance(basis, _DirectSumBasis):
blockDims = [c.dim for c in basis.component_bases]
else: blockDims = [opDim]
# assume index given as vec_expr refers to a Hilbert-space state index, so "reduced-std" basis
vecInReducedStdBasis = _np.zeros((opDim, 1), 'd')
#set all diagonal elements of density matrix to 1.0 (end result = identity density mx)
start = 0; vecIndex = 0
for blockVecDim in blockDims:
blockDim = int(_np.sqrt(blockVecDim)) # vec -> matrix dim
for i in range(start, start + blockDim):
for j in range(start, start + blockDim):
if i == j: vecInReducedStdBasis[vecIndex, 0] = 1.0 # set diagonal element of density matrix
vecIndex += 1
start += blockDim
return _bt.change_basis(vecInReducedStdBasis, "std", basis)
def create_operation(op_expr, state_space, basis="pp", parameterization="full", evotype='default'):
"""
Build an operation object from an expression.
Parameters
----------
op_expr : string
expression for the gate to build. String is first split into parts
delimited by the colon (:) character, which are composed together to
create the final gate. Each part takes on of the allowed forms:
- I(ssl_0, ...) = identity operation on one or more state space labels
(ssl_i)
- X(theta, ssl) = x-rotation by theta radians of qubit labeled by ssl
- Y(theta, ssl) = y-rotation by theta radians of qubit labeled by ssl
- Z(theta, ssl) = z-rotation by theta radians of qubit labeled by ssl
- CX(theta, ssl0, ssl1) = controlled x-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CY(theta, ssl0, ssl1) = controlled y-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CZ(theta, ssl0, ssl1) = controlled z-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CNOT(ssl0, ssl1) = standard controlled-not gate. Acts on qubit
labeled by ssl1 with ssl0 being the control.
- CPHASE(ssl0, ssl1) = standard controlled-phase gate. Acts on qubit
labeled by ssl1 with ssl0 being the control.
- LX(theta, i0, i1) = leakage between states i0 and i1. Implemented as
an x-rotation between states with integer indices i0 and i1 followed
by complete decoherence between the states.
state_space : StateSpace
The state space that the created operation should act upon.
basis : str or Basis
The basis the returned operation should be represented in.
parameterization : {"full","TP","static"}, optional
How to parameterize the resulting gate.
- "full" = return a FullArbitraryOp.
- "TP" = return a FullTPOp.
- "static" = return a StaticArbitraryOp.
evotype : Evotype or str, optional
The evolution type of this operation, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
LinearOperator
A gate object representing the gate given by op_expr in the desired
basis.
"""
# op_expr can contain single qubit ops: X(theta) ,Y(theta) ,Z(theta)
# two qubit ops: CNOT
# clevel qubit ops: Leak
# two clevel opts: Flip
# each of which is given additional parameters specifying which indices it acts upon
#Working with a StateSpaceLabels object gives us access to all the info we'll need later
state_space = _statespace.StateSpace.cast(state_space)
if isinstance(basis, str):
basis = _Basis.cast(basis, state_space)
assert(state_space.dim == basis.dim), \
"State space labels dim (%s) != basis dim (%s)" % (state_space.dim, basis.dim)
# ------------------------------------------------------------------------------------------------------------------
# -- Helper Functions ----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
def to_label(lbl):
""" Convert integer-strings to integers in state space label """
try: return int(lbl)
except: return lbl.strip()
def to_labels(lbls):
""" Convert integer-strings to integers in state space labels """
return [to_label(lbl) for lbl in lbls]
# ------------------------------------------------------------------------------------------------------------------
# -- End Helper Functions ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
#FUTURE?: type_preferences = ('static standard', 'static clifford', 'static unitary')
build_evotype = 'default'
superop_mxs_in_basis = []
exprTerms = op_expr.split(':')
for exprTerm in exprTerms:
l = exprTerm.index('('); r = exprTerm.rindex(')')
opName = exprTerm[0:l]
argsStr = exprTerm[l + 1:r]
args = argsStr.split(',')
if opName == "I":
# qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
labels = to_labels(args)
stateSpaceUDim = int(_np.product([state_space.label_udimension(l) for l in labels]))
# a complex 2x2 mx unitary for the identity in Pauli-product basis
Uop = _op.StaticUnitaryOp(_np.identity(stateSpaceUDim, 'complex'), 'pp', build_evotype)
#FUTURE?:
# stdname = 'Gi' if (stateSpaceUDim == 2) else None
# Uop = _op.create_from_unitary_mx(_np.identity(stateSpaceUDim, complex), type_preferences, 'pp',
# stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, labels, Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "D":
# like 'I', but only parameterize the diagonal elements - so can be a depolarization-type map
raise NotImplementedError("Removed temporarily - need to update using embedded gates")
# # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
# labels = to_labels(args)
# stateSpaceDim = sslbls.product_dim(labels)
# if parameterization not in ("linear","linearTP"):
# raise ValueError("'D' gate only makes sense to use when and parameterization == 'linear'")
# if defaultI2P == "TP":
# # parameterize only the diagonals els after the first
# indicesToParameterize = [ (i,i) for i in range(1,stateSpaceDim**2) ]
# else:
# # parameterize only the diagonals els
# indicesToParameterize = [ (i,i) for i in range(0,stateSpaceDim**2) ]
# # *real* 4x4 mx in Pauli-product basis -- still just the identity!
# pp_opMx = _np.identity(stateSpaceDim**2, 'd')
# # pp_opMx assumed to be in the Pauli-product basis
# opTermInFinalBasis = embed_operation(pp_opMx, tuple(labels), indicesToParameterize)
elif opName in ('X', 'Y', 'Z'): # single-qubit gate names
assert(len(args) == 2) # theta, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label = to_label(args[1])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
if opName == 'X': ex = -1j * theta * sigmax / 2
elif opName == 'Y': ex = -1j * theta * sigmay / 2
elif opName == 'Z': ex = -1j * theta * sigmaz / 2
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', build_evotype)
#FUTURE?:
#stdname = None
#if _np.isclose(theta, _np.pi): stdname = 'G%spi' % opName.lower()
#elif _np.isclose(theta, _np.pi/2): stdname = 'G%spi2' % opName.lower()
# Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == 'N': # more general single-qubit gate
assert(len(args) == 5) # theta, sigmaX-coeff, sigmaY-coeff, sigmaZ-coeff, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
sxCoeff = eval(args[1], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
syCoeff = eval(args[2], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
szCoeff = eval(args[3], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
label = to_label(args[4])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
ex = -1j * theta * (sxCoeff * sigmax / 2. + syCoeff * sigmay / 2. + szCoeff * sigmaz / 2.)
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', evotype=build_evotype)
#FUTURE?: Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName in ('CX', 'CY', 'CZ', 'CNOT', 'CPHASE'): # two-qubit gate names
if opName in ('CX', 'CY', 'CZ'):
assert(len(args) == 3) # theta, qubit-label1, qubit-label2
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label1 = to_label(args[1]); label2 = to_label(args[2])
if opName == 'CX': ex = -1j * theta * sigmax / 2
elif opName == 'CY': ex = -1j * theta * sigmay / 2
elif opName == 'CZ': ex = -1j * theta * sigmaz / 2
Utarget = _spl.expm(ex) # 2x2 unitary matrix operating on target qubit
else: # opName in ('CNOT','CPHASE')
assert(len(args) == 2) # qubit-label1, qubit-label2
label1 = to_label(args[0]); label2 = to_label(args[1])
if opName == 'CNOT':
Utarget = _np.array([[0, 1],
[1, 0]], 'd')
elif opName == 'CPHASE':
Utarget = _np.array([[1, 0],
[0, -1]], 'd')
# 4x4 unitary matrix operating on isolated two-qubit space
U = _np.identity(4, 'complex'); U[2:, 2:] = Utarget
assert(state_space.label_dimension(label1) == 4 and state_space.label_dimension(label2) == 4), \
"%s gate must act on qubits!" % opName
# complex 4x4 unitary matrix operating on two-qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(U, 'pp', build_evotype)
#FUTURE?:
# if opName == "CNOT": stdname = "Gcnot"
# elif opName == "CPHASE": stdname = "Gcphase"
# else: stdname = None
# Uop = _op.create_from_unitary_mx(U, type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space
Uop_embed = _op.EmbeddedOp(state_space, [label1, label2], Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "LX": # TODO - better way to describe leakage?
assert(len(args) == 3) # theta, dmIndex1, dmIndex2 - X rotation between any two density matrix basis states
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
i1 = int(args[1]) # row/column index of a single *state* within the density matrix
i2 = int(args[2]) # row/column index of a single *state* within the density matrix
ex = -1j * theta * sigmax / 2
Uop = _spl.expm(ex) # 2x2 unitary matrix operating on the i1-th and i2-th states of the state space basis
opDim = basis.dim
dmDim = int(_np.sqrt(basis.elsize)) # matrix dim of the "embedding space"
if isinstance(basis, _DirectSumBasis):
blockDims = [c.dim for c in basis.component_bases]
else: blockDims = [opDim]
Utot = _np.identity(dmDim, 'complex')
Utot[i1, i1] = Uop[0, 0]
Utot[i1, i2] = Uop[0, 1]
Utot[i2, i1] = Uop[1, 0]
Utot[i2, i2] = Uop[1, 1]
# dmDim^2 x dmDim^2 mx operating on vectorized total densty matrix
opTermInStdBasis = _ot.unitary_to_process_mx(Utot)
# contract [3] to [2, 1]
embedded_std_basis = _Basis.cast('std', 9) # [2]
std_basis = _Basis.cast('std', blockDims) # std basis w/blockdim structure, i.e. [4,1]
opTermInReducedStdBasis = _bt.resize_std_mx(opTermInStdBasis, 'contract',
embedded_std_basis, std_basis)
superop_mx_in_basis = _bt.change_basis(opTermInReducedStdBasis, std_basis, basis)
else: raise ValueError("Invalid gate name: %s" % opName)
superop_mxs_in_basis.append(superop_mx_in_basis)
#Note: expressions are listed in "matrix composition order"
final_superop_mx = superop_mxs_in_basis[0]
for mx in superop_mxs_in_basis[1:]:
final_superop_mx = _np.dot(final_superop_mx, mx)
if basis.real:
assert(_np.linalg.norm(final_superop_mx.imag) < 1e-6), "Operation matrix should be real but isn't!"
final_superop_mx = _np.real(final_superop_mx)
return _op.create_from_superop_mx(final_superop_mx, parameterization, basis,
evotype=evotype, state_space=state_space)
def _create_explicit_model_from_expressions(state_space, basis,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', gate_type="full", prep_type="auto",
povm_type="auto", instrument_type="auto", evotype='default'):
"""
Build a new Model given lists of operation labels and expressions.
Parameters
----------
state_space : StateSpace
The state space for this model.
basis : Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
op_labels : list of strings
A list of labels for each created gate in the final model. To
conform with text file parsing conventions these names should begin
with a capital G and can be followed by any number of lowercase
characters, numbers, or the underscore character.
op_expressions : list of strings
A list of gate expressions, each corresponding to a operation label in
op_labels, which determine what operation each gate performs (see
documentation for :meth:`create_operation`).
prep_labels : list of string, optional
A list of labels for each created state preparation in the final
model. To conform with conventions these labels should begin with
"rho".
prep_expressions : list of strings, optional
A list of vector expressions for each state preparation vector (see
documentation for :meth:`_create_spam_vector`).
effect_labels : list, optional
If `povm_labels` is a string, then this is just a list of the effect
(outcome) labels for the single POVM. If `povm_labels` is a tuple,
then `effect_labels` must be a list of lists of effect labels, each
list corresponding to a POVM. If set to the special string `"standard"`
then the length-n binary strings are used when the state space consists
of n qubits (e.g. `"000"`, `"001"`, ... `"111"` for 3 qubits) and
the labels `"0"`, `"1"`, ... `"<dim>"` are used, where `<dim>`
is the dimension of the state space, in all non-qubit cases.
effect_expressions : list, optional
A list or list-of-lists of (string) vector expressions for each POVM
effect vector (see documentation for :meth:`_create_spam_vector`). Expressions
correspond to labels in `effect_labels`. If set to the special string
`"standard"`, then the expressions `"0"`, `"1"`, ... `"<dim>"` are used,
where `<dim>` is the dimension of the state space.
povm_labels : list or string, optional
A list of POVM labels, or a single (string) label. In the latter case,
only a single POVM is created and the format of `effect_labels` and
`effect_expressions` is simplified (see above).
parameterization : {"full","TP","static"}, optional
How to parameterize the gates of the resulting Model (see
documentation for :meth:`create_operation`).
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
Model
The created model.
"""
#defP = "TP" if (parameterization in ("TP","linearTP")) else "full"
state_space = _statespace.StateSpace.cast(state_space)
ret = _emdl.ExplicitOpModel(state_space, basis.copy(), default_gate_type=gate_type,
default_prep_type=prep_type, default_povm_type=povm_type,
default_instrument_type=instrument_type, evotype=evotype)
#prep_prefix="rho", effect_prefix="E", gate_prefix="G")
if prep_type == "auto":
prep_type = _state.state_type_from_op_type(gate_type)
if povm_type == "auto":
povm_type = _povm.povm_type_from_op_type(gate_type)
if instrument_type == "auto":
instrument_type = _instrument.instrument_type_from_op_type(gate_type)
for label, rhoExpr in zip(prep_labels, prep_expressions):
vec = create_spam_vector(rhoExpr, state_space, basis)
ret.preps[label] = _state.create_from_dmvec(vec, prep_type, basis, evotype, state_space)
if isinstance(povm_labels, str):
povm_labels = [povm_labels]
effect_labels = [effect_labels]
effect_expressions = [effect_expressions]
dmDim = int(_np.sqrt(basis.dim)) # "densitymx" evotype assumed... FIX?
for povmLbl, ELbls, EExprs in zip(povm_labels,
effect_labels, effect_expressions):
effect_vecs = {}
if ELbls == "standard":
qubit_dim = 4
if state_space.num_tensor_product_blocks == 1 and \
all([ldim == qubit_dim for ldim in state_space.tensor_product_block_dimensions(0)]):
# a single tensor product block comprised of qubits: '000', '001', etc.
nQubits = len(state_space.tensor_product_block_dimensions(0))
ELbls = [''.join(t) for t in _itertools.product(('0', '1'), repeat=nQubits)]
else:
ELbls = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim
if EExprs == "standard":
EExprs = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim
effect_vecs = {label: create_spam_vector(expr, state_space, basis)
for label, expr in zip(ELbls, EExprs)}
if len(effect_vecs) > 0: # don't add POVMs with 0 effects
ret.povms[povmLbl] = _povm.create_from_dmvecs(effect_vecs, povm_type, basis, evotype, state_space)
for (opLabel, opExpr) in zip(op_labels, op_expressions):
ret.operations[opLabel] = create_operation(opExpr, state_space, basis, gate_type, evotype)
if gate_type == "full":
ret.default_gauge_group = _gg.FullGaugeGroup(ret.state_space, evotype)
elif gate_type == "full TP":
ret.default_gauge_group = _gg.TPGaugeGroup(ret.state_space, evotype)
elif gate_type == 'CPTP':
ret.default_gauge_group = _gg.UnitaryGaugeGroup(ret.state_space, basis, evotype)
else:
ret.default_gauge_group = _gg.TrivialGaugeGroup(ret.state_space)
ret._clean_paramvec()
return ret
def create_explicit_model_from_expressions(state_space,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', basis="auto", gate_type="full",
prep_type="auto", povm_type="auto", instrument_type="auto",
evotype='default'):
"""
Build a new :class:`ExplicitOpModel` given lists of labels and expressions.
Parameters
----------
state_space : StateSpace
the state space for the model.
op_labels : list of strings
A list of labels for each created gate in the final model. To
conform with text file parsing conventions these names should begin
with a capital G and can be followed by any number of lowercase
characters, numbers, or the underscore character.
op_expressions : list of strings
A list of gate expressions, each corresponding to a operation label in
op_labels, which determine what operation each gate performs (see
documentation for :meth:`create_operation`).
prep_labels : list of string
A list of labels for each created state preparation in the final
model. To conform with conventions these labels should begin with
"rho".
prep_expressions : list of strings
A list of vector expressions for each state preparation vector (see
documentation for :meth:`_create_spam_vector`).
effect_labels : list, optional
If `povm_labels` is a string, then this is just a list of the effect
(outcome) labels for the single POVM. If `povm_labels` is a tuple,
then `effect_labels` must be a list of lists of effect labels, each
list corresponding to a POVM. If set to the special string `"standard"`
then the length-n binary strings are used when the state space consists
of n qubits (e.g. `"000"`, `"001"`, ... `"111"` for 3 qubits) and
the labels `"0"`, `"1"`, ... `"<dim>"` are used, where `<dim>`
is the dimension of the state space, in all non-qubit cases.
effect_expressions : list, optional
A list or list-of-lists of (string) vector expressions for each POVM
effect vector (see documentation for :meth:`_create_spam_vector`). Expressions
correspond to labels in `effect_labels`. If set to the special string
`"standard"`, then the expressions `"0"`, `"1"`, ... `"<dim>"` are used,
where `<dim>` is the dimension of the state space.
povm_labels : list or string, optional
A list of POVM labels, or a single (string) label. In the latter case,
only a single POVM is created and the format of `effect_labels` and
`effect_expressions` is simplified (see above).
basis : {'gm','pp','std','qt','auto'}, optional
the basis of the matrices in the returned Model
- "std" = operation matrix operates on density mx expressed as sum of matrix
units
- "gm" = operation matrix operates on dentity mx expressed as sum of
normalized Gell-Mann matrices
- "pp" = operation matrix operates on density mx expresses as sum of
tensor-product of Pauli matrices
- "qt" = operation matrix operates on density mx expressed as sum of
Qutrit basis matrices
- "auto" = "pp" if possible (integer num of qubits), "qt" if density
matrix dim == 3, and "gm" otherwise.
parameterization : {"full","TP"}, optional
How to parameterize the gates of the resulting Model (see
documentation for :meth:`create_operation`).
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
ExplicitOpModel
The created model.
"""
#Note: so far, all allowed `parameterization` values => densitymx evotype
state_space = _statespace.StateSpace.cast(state_space)
stateSpaceDim = state_space.dim
# Note: what about state_space_labels.tpb_dims?
if basis == "auto":
if _np.isclose(_np.log2(stateSpaceDim) / 2,
round(_np.log2(stateSpaceDim) / 2)):
basis = "pp"
elif stateSpaceDim == 9:
basis = "qt"
else: basis = "gm"
return _create_explicit_model_from_expressions(state_space,
_Basis.cast(basis, state_space),
op_labels, op_expressions,
prep_labels, prep_expressions,
effect_labels, effect_expressions,
povm_labels, gate_type=gate_type,
prep_type=prep_type, povm_type=povm_type,
instrument_type=instrument_type, evotype=evotype)
def create_explicit_alias_model(mdl_primitives, alias_dict):
"""
Creates a model by applying aliases to an existing model.
The new model is created by composing the gates of an existing `Model`,
`mdl_primitives`, according to a dictionary of `Circuit`s, `alias_dict`.
The keys of `alias_dict` are the operation labels of the returned `Model`.
state preparations and POVMs are unaltered, and simply copied from `mdl_primitives`.
Parameters
----------
mdl_primitives : Model
A Model containing the "primitive" gates (those used to compose
the gates of the returned model).
alias_dict : dictionary
A dictionary whose keys are strings and values are Circuit objects
specifying sequences of primitive gates. Each key,value pair specifies
the composition rule for a creating a gate in the returned model.
Returns
-------
Model
A model whose gates are compositions of primitive gates and whose
spam operations are the same as those of `mdl_primitives`.
"""
mdl_new = mdl_primitives.copy()
for gl in mdl_primitives.operations.keys():
del mdl_new.operations[gl] # remove all gates from mdl_new
for gl, opstr in alias_dict.items():
mdl_new.operations[gl] = mdl_primitives.sim.product(opstr)
#Creates fully parameterized gates by default...
mdl_new._clean_paramvec()
return mdl_new
def create_explicit_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_spam_type='computational',
embed_gates=False, basis='pp'):
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_explicit_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, ideal_gate_type, ideal_spam_type, ideal_spam_type, embed_gates, basis)
def _create_explicit_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_prep_type='auto', ideal_povm_type='auto',
embed_gates=False, basis='pp'):
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if custom_gates is None:
custom_gates = {}
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
def _embed_unitary(statespace, target_labels, unitary):
dummyop = _op.EmbeddedOp(statespace, target_labels,
_op.StaticUnitaryOp(unitary, basis='pp', evotype="statevec_slow")) # basis hardcode?
return dummyop.to_dense("Hilbert")
local_gates = _setup_local_gates(processor_spec, evotype, None, {}, ideal_gate_type) # no custom *local* gates
ret = _emdl.ExplicitOpModel(state_space, basis, default_gate_type=ideal_gate_type, evotype=evotype,
simulator=simulator)
# Special rule: when initializng an explicit model, if the processor spec has an implied global idle
# gate (e.g. "(idle)", then the created model instead has a empty-tuple Label as the key for this op.
global_idle_name = processor_spec.global_idle_gate_name
if (global_idle_name is not None) and global_idle_name.startswith('(') and global_idle_name.endswith(')'):
gn_to_make_emptytup = global_idle_name
else:
gn_to_make_emptytup = None
for gn, gate_unitary in processor_spec.gate_unitaries.items():
gate_is_factory = callable(gate_unitary)
resolved_avail = processor_spec.resolved_availability(gn)
if callable(resolved_avail) or resolved_avail == '*':
assert (embed_gates), "Cannot create factories with `embed_gates=False` yet!"
key = _label.Label(gn) if (gn != gn_to_make_emptytup) else _label.Label(())
allowed_sslbls_fn = resolved_avail if callable(resolved_avail) else None
gate_nQubits = processor_spec.gate_num_qubits(gn)
ideal_factory = _opfactory.EmbeddingOpFactory(
state_space, local_gates[gn], num_target_labels=gate_nQubits, allowed_sslbls_fn=allowed_sslbls_fn)
noiseop = modelnoise.create_errormap(key, evotype, state_space) # No target indices... just local errs?
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else: # resolved_avail is a list/tuple of available sslbls for the current gate/factory
for inds in resolved_avail: # inds are target qubit labels
key = _label.Label(()) if (inds is None and gn == gn_to_make_emptytup) else _label.Label(gn, inds)
if key in custom_gates: # allow custom_gates to specify gate elements directly
if isinstance(custom_gates[key], _opfactory.OpFactory):
ret.factories[key] = custom_gates[key]
elif isinstance(custom_gates[key], _op.LinearOperator):
ret.operations[key] = custom_gates[key]
else: # presumably a numpy array or something like it.
ret.operations[key] = _op.StaticArbitraryOp(custom_gates[key], evotype,
state_space) # static gates by default
continue
if gate_is_factory:
assert(embed_gates), "Cannot create factories with `embed_gates=False` yet!"
# TODO: check for modelnoise on *local* factory, i.e. create_errormap(gn, ...)??
if inds is None or inds == tuple(qubit_labels): # then no need to embed
ideal_factory = local_gates[gn]
else:
ideal_factory = _opfactory.EmbeddedOpFactory(state_space, inds, local_gates[gn])
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else:
if inds is None or inds == tuple(qubit_labels): # then no need to embed
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
assert(gate_unitary == len(qubit_labels)), \
"Idle unitary as int should be on all qubits for %s" % (str(gn))
ideal_gate = _op.ComposedOp([], evotype, state_space) # (identity gate on *all* qubits)
else:
ideal_gate = _op.create_from_unitary_mx(gate_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
else:
if embed_gates:
ideal_gate = local_gates[gn]
ideal_gate = _op.EmbeddedOp(state_space, inds, ideal_gate)
else:
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
gate_unitary = _np.identity(2**gate_unitary, 'd') # turn into explicit identity op
if gate_unitary.shape[0] == state_space.udim: # no need to embed!
embedded_unitary = gate_unitary
else:
embedded_unitary = _embed_unitary(state_space, inds, gate_unitary)
ideal_gate = _op.create_from_unitary_mx(embedded_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
#TODO: check for modelnoise on *local* gate, i.e. create_errormap(gn, ...)??
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
layer = _op.ComposedOp([ideal_gate, noiseop]) if (noiseop is not None) else ideal_gate
ret.operations[key] = layer
# SPAM:
local_noise = False; independent_gates = True; independent_spam = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
for k, v in prep_layers.items():
ret.preps[k] = v
for k, v in povm_layers.items():
ret.povms[k] = v
modelnoise.warn_about_zero_counters()
ret._clean_paramvec()
return ret
def _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype, state_space, independent_gates, independent_spam):
""" local_noise=True creates lindblad ops that are embedded & composed 1Q ops, and assumes
that modelnoise specifies 1Q noise. local_noise=False assumes modelnoise specifies n-qubit noise"""
qubit_labels = processor_spec.qubit_labels
num_qubits = processor_spec.num_qubits
singleQ_state_space = _statespace.default_space_for_udim(2) # single qubit state space
# Step 1 -- get the ideal prep and POVM, created as the types we want
# Step 2 -- add noise, by composing ideal with a noise operation (if desired)
prep_layers = {}
povm_layers = {}
def _add_prep_noise(prep_ops):
""" Adds one or more noise ops to prep_ops lists (to compose later) """
if local_noise: # then assume modelnoise specifies 1Q errors
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
err_gates = [prep_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [prep_noiseop1Q] * num_qubits
prep_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
prepNoiseMap = modelnoise.create_errormap('prep', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if prepNoiseMap is not None: prep_ops.append(prepNoiseMap)
def _add_povm_noise(povm_ops):
""" Adds one or more noise ops to prep_ops lists (to compose later) """
if local_noise: # then assume modelnoise specifies 1Q errors
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
err_gates = [povm_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [povm_noiseop1Q] * num_qubits
povm_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
povmNoiseMap = modelnoise.create_errormap('povm', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if povmNoiseMap is not None: povm_ops.append(povmNoiseMap)
def _add_to_prep_layers(ideal_prep, prep_ops):
""" Adds noise elements to prep_layers """
if len(prep_ops_to_compose) == 0:
prep_layers['rho0'] = ideal_prep
elif len(prep_ops_to_compose) == 1:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, prep_ops[0])
else:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, _op.ComposedOp(prep_ops))
def _add_to_povm_layers(ideal_povm, povm_ops):
""" Adds noise elements to povm_layers """
if len(povm_ops_to_compose) == 0:
povm_layers['Mdefault'] = ideal_povm
elif len(povm_ops_to_compose) == 1:
povm_layers['Mdefault'] = _povm.ComposedPOVM(povm_ops[0], ideal_povm, 'pp')
else:
povm_layers['Mdefault'] = _povm.ComposedPOVM(_op.ComposedOp(povm_ops), ideal_povm, 'pp')
def _create_nq_noise(lndtype):
if local_noise:
# create a 1-qubit exp(errorgen) that is applied to each qubit independently
errgen_1Q = _op.LindbladErrorgen.from_error_generator(singleQ_state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype, state_space=None)
err_gateNQ = _op.ComposedOp([_op.EmbeddedOp(state_space, [qubit_labels[i]],
_op.ExpErrorgenOp(errgen_1Q.copy()))
for i in range(num_qubits)], evotype, state_space)
else:
# create an n-qubit exp(errorgen)
errgen_NQ = _op.LindbladErrorgen.from_error_generator(state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype,
state_space=state_space)
err_gateNQ = _op.ExpErrorgenOp(errgen_NQ)
return err_gateNQ
# Here's where the actual logic starts. The above functions avoid repeated blocks within the different
# cases below.
# Prep logic
if isinstance(ideal_prep_type, (tuple, list)): ideal_prep_type = ideal_prep_type[0] # HACK to support multiple vals
if ideal_prep_type == 'computational' or ideal_prep_type.startswith('lindblad '):
ideal_prep = _state.ComputationalBasisState([0] * num_qubits, 'pp', evotype, state_space)
prep_ops_to_compose = []
if ideal_prep_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM
lndtype = ideal_prep_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
prep_ops_to_compose.append(err_gateNQ)
# Add noise
_add_prep_noise(prep_ops_to_compose)
#Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
elif ideal_prep_type.startswith('tensor product '):
#Note: with "tensor product <X>" types, e.g. "tensor product static", we assume modelnoise specifies just
# a 1Q noise operation, even when `local_noise=False`
vectype = ideal_prep_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_prep1Q = _state.create_from_pure_vector(v0, vectype, 'pp', evotype, state_space=None)
prep_factors = [ideal_prep1Q.copy() for i in range(num_qubits)]
# Add noise
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
prep_factors = [_state.ComposedState(
factor, (prep_noiseop1Q.copy() if independent_spam else prep_noiseop1Q)) for factor in prep_factors]
prep_layers['rho0'] = _state.TensorProductState(prep_factors, state_space)
else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs
vectype = ideal_prep_type
vecs = [] # all the basis vectors for num_qubits
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_prep = _state.create_from_pure_vector(vecs[0], vectype, 'pp', evotype, state_space=state_space)
# Add noise
prep_ops_to_compose = []
_add_prep_noise(prep_ops_to_compose)
# Add final ops to returned dictionaries
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
# Povm logic
if isinstance(ideal_povm_type, (tuple, list)): ideal_povm_type = ideal_povm_type[0] # HACK to support multiple vals
if ideal_povm_type == 'computational' or ideal_povm_type.startswith('lindblad '):
ideal_povm = _povm.ComputationalBasisPOVM(num_qubits, evotype, state_space=state_space)
povm_ops_to_compose = []
if ideal_povm_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM
lndtype = ideal_povm_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
povm_ops_to_compose.append(err_gateNQ.copy()) # .copy() => POVM errors independent
# Add noise
_add_povm_noise(povm_ops_to_compose)
#Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)
effective_ideal_povm = None if len(povm_ops_to_compose) > 0 else ideal_povm
_add_to_povm_layers(effective_ideal_povm, povm_ops_to_compose)
elif ideal_povm_type.startswith('tensor product '):
#Note: with "tensor product <X>" types, e.g. "tensor product static", we assume modelnoise specifies just
# a 1Q noise operation, even when `local_noise=False`
vectype = ideal_povm_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_povm1Q = _povm.create_from_pure_vectors([('0', v0), ('1', v1)], vectype, 'pp',
evotype, state_space=None)
povm_factors = [ideal_povm1Q.copy() for i in range(num_qubits)]
# Add noise
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
povm_factors = [_povm.ComposedPOVM(
(povm_noiseop1Q.copy() if independent_spam else povm_noiseop1Q), factor, 'pp')
for factor in povm_factors]
povm_layers['Mdefault'] = _povm.TensorProductPOVM(povm_factors, evotype, state_space)
else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs
vectype = ideal_povm_type
vecs = [] # all the basis vectors for num_qubits
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_povm = _povm.create_from_pure_vectors(
[(format(i, 'b').zfill(num_qubits), v) for i, v in enumerate(vecs)],
vectype, 'pp', evotype, state_space=state_space)
# Add noise
povm_ops_to_compose = []
_add_povm_noise(povm_ops_to_compose)
# Add final ops to returned dictionaries
_add_to_povm_layers(ideal_povm, povm_ops_to_compose)
return prep_layers, povm_layers
def _setup_local_gates(processor_spec, evotype, modelnoise=None, custom_gates=None,
ideal_gate_type=('static standard', 'static clifford', 'static unitary')):
"""
Construct a dictionary of potentially noisy gates that act only on their target qubits.
These gates are "local" because they act only on their intended target qubits. The gates
consist of an ideal gate (obviously local, and crosstalk free) of the type given by
`ideal_gate_type` composed with a noise operation given by `modelnoise`, if one exists.
The returned dictionary contains keys for all the gate names in `processor_spec`. Custom
gate objects can be given by `custom_gates`, which override the normal gate construction.
Parameters
----------
processor_spec : ProcessorSpec
The processor to create gate operations for. This object specifies the
gate names and unitaries for the processor, among other things.
evotype : Evotype
Create gate objects with this evolution type.
modelnoise : ModelNoise, optional
Noise that should be applied after the ideal gates. This noise must
be *local* to each gate (i.e. acting on its target qubits). See the
:class:`ModelNoise` object documentation for details regarding how
to specify different types of noise. If `None`, then no noise is added .
custom_gates : dict, optional
A dictionary of gate objects that should be placed in the returned
dictionary in lieu of objects that would normally be constructed.
Keys are gate names and values are gates.
ideal_gate_type : str or tuple, optional
A gate type or tuple of gate types (listed in order of priority) which
is used to construct the ideal gates. A gate type usually specifies the
Python class that will be created, which determines 1) the parameterization
of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`
operation has no parameters and is a Clifford operation).
Returns
-------
gatedict : dict
A dictionary mapping gate names to local gate operations.
"""
std_gate_unitaries = _itgs.standard_gatename_unitaries()
if custom_gates is None: custom_gates = {}
if modelnoise is None: modelnoise = _OpModelPerOpNoise({})
# All possible entries into the upcoming gate dictionary
# Not just gatenames as it is possible to override in qubit-specific operations
all_keys = _lt.remove_duplicates(list(processor_spec.gate_names)
+ list(custom_gates.keys())
+ list(modelnoise.keys()))
# Cache ideal ops to ensure only one copy for each name
ideal_gates = {}
ideal_factories = {}
gatedict = _collections.OrderedDict()
for key in all_keys:
# Use custom gate directly as error gate
if key in custom_gates:
gatedict[key] = custom_gates[key]
continue
# Skip prep, and povm here, just do gates
if key in ['prep', 'povm']:
continue
# If key has qubits, get base name for lookup
label = _label.Label(key)
name = label.name
U = processor_spec.gate_unitaries[name] # all gate names must be in the processorspec
if ((name not in processor_spec.nonstd_gate_unitaries)
or (not callable(processor_spec.nonstd_gate_unitaries[name]) and (name in std_gate_unitaries)
and processor_spec.nonstd_gate_unitaries[name].shape == std_gate_unitaries[name].shape
and _np.allclose(processor_spec.nonstd_gate_unitaries[name], std_gate_unitaries[name]))):
stdname = name # setting `stdname` != None means we can try to create a StaticStandardOp below
else:
stdname = None
if isinstance(U, (int, _np.int64)): # signals that the gate is an identity on `U` qubits
ideal_gate_state_space = _statespace.default_space_for_num_qubits(U)
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate_state_space, target_labels=None)
if noiseop is not None:
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([], evotype, ideal_gate_state_space) # (identity gate on N qubits)
elif not callable(U): # normal operation (not a factory)
ideal_gate = ideal_gates.get(name, None)
if ideal_gate is None:
ideal_gate = _op.create_from_unitary_mx(U, ideal_gate_type, 'pp', stdname, evotype, state_space=None)
ideal_gates[name] = ideal_gate
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate.state_space, target_labels=None)
# Note: above line creates a *local* noise op, working entirely in the ideal gate's target space.
# This means it will fail to create error maps with a given (non-local/stencil) set of sslbls, as desired
if noiseop is None:
gatedict[key] = ideal_gate
else:
if isinstance(noiseop, _op.ComposedOp): # avoid additional nested ComposedOp if we already have one
noiseop.insert(0, ideal_gate)
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([ideal_gate, noiseop])
else: # a factory, given by the unitary-valued function U: args -> unitary
ideal_factory = ideal_factories.get(name, None)
if ideal_factory is None:
local_state_space = _statespace.default_space_for_udim(U.shape[0]) # factory *function* SHAPE
ideal_factory = _opfactory.UnitaryOpFactory(U, local_state_space, 'pp', evotype)
ideal_factories[name] = ideal_factory
noiseop = modelnoise.create_errormap(key, evotype, ideal_factory.state_space, target_labels=None)
gatedict[key] = _opfactory.ComposedOpFactory([ideal_factory, noiseop]) \
if (noiseop is not None) else ideal_factory
return gatedict
def create_crosstalk_free_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto", on_construction_error='raise',
independent_gates=False, independent_spam=True, ensure_composed_gates=False,
ideal_gate_type='auto', ideal_spam_type='computational', implicit_idle_mode='none'):
"""
Create a n-qubit "crosstalk-free" model.
By virtue of being crosstalk-free, this model's operations only
act nontrivially on their target qubits. Gates consist of an ideal gate
operation possibly followed by an error operation.
Errors can be specified using any combination of the 4 error rate/coeff arguments,
but each gate name must be provided exclusively to one type of specification.
Each specification results in a different type of operation, depending on the parameterization:
- `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)
- `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)
- `lindblad_error_coeffs` -> exp(LindbladErrorgen)
In addition to the gate names, the special values `"prep"` and `"povm"` may be
used as keys to specify the error on the state preparation, measurement, respectively.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict, optional
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects override any other behavior for constructing
their designated operations. Keys of this dictionary may
be string-type gate *names* or labels that include target qubits.
depolarization_strengths : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are floats that specify the strength of uniform depolarization.
stochastic_error_probs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are tuples that specify Pauli-stochastic rates for each of the non-trivial
Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).
lindblad_error_coeffs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are dictionaries corresponding to the `lindblad_term_dict` kwarg taken
by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` can be `"H"` (Hamiltonian), `"S"`
(Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always
have a single basis label (so key is a 2-tuple) whereas Stochastic
tuples with 1 basis label indicate a *diagonal* term, and are the
only types of terms allowed when `nonham_mode != "all"`. Otherwise,
Stochastic term tuples can include 2 basis labels to specify
"off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be
strings or integers. Values are complex coefficients.
depolarization_parameterization : str of {"depolarize", "stochastic", or "lindblad"}
Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen
is used to parameterize the depolarization noise, respectively.
When "depolarize" (the default), a DepolarizeOp is created with the strength given
in `depolarization_strengths`. When "stochastic", the depolarization strength is split
evenly among the stochastic channels of a StochasticOp. When "lindblad", the depolarization
strength is split evenly among the coefficients of the stochastic error generators
(which are exponentiated to form a LindbladErrorgen with the "depol" parameterization).
stochastic_parameterization : str of {"stochastic", or "lindblad"}
Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the
stochastic noise, respectively. When "stochastic", elements of `stochastic_error_probs`
are used as coefficients in a linear combination of stochastic channels (the default).
When "lindblad", the elements of `stochastic_error_probs` are coefficients of
stochastic error generators (which are exponentiated to form a LindbladErrorgen with the
"cptp" parameterization).
lindblad_parameterization : "auto" or a LindbladErrorgen paramtype
Determines the parameterization of the LindbladErrorgen. When "auto" (the default), the parameterization
is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.
When not "auto", the parameterization type is passed through to the LindbladErrorgen.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The simulator used to compute predicted probabilities for the
resulting :class:`Model`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
on_construction_error : {'raise','warn',ignore'}
What to do when the creation of a gate with the given
`parameterization` fails. Usually you'll want to `"raise"` the error.
In some cases, for example when converting as many gates as you can
into `parameterization="clifford"` gates, `"warn"` or even `"ignore"`
may be useful.
independent_gates : bool, optional
Whether gates are allowed independent local noise or not. If False,
then all gates with the same name (e.g. "Gx") will have the *same*
(local) noise (e.g. an overrotation by 1 degree), and the
`operation_bks['gates']` dictionary contains a single key per gate
name. If True, then gates with the same name acting on different
qubits may have different local noise, and so the
`operation_bks['gates']` dictionary contains a key for each gate
available gate placement.
ensure_composed_gates : bool, optional
If True then the elements of the `operation_bks['gates']` will always
be :class:`ComposedOp` objects. The purpose of this is to
facilitate modifying the gate operations after the model is created.
If False, then the appropriately parameterized gate objects (often
dense gates) are used directly.
ideal_gate_type : str or tuple, optional
A gate type or tuple of gate types (listed in order of priority) which
is used to construct the ideal gates. A gate type usually specifies the
Python class that will be created, which determines 1) the parameterization
of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`
operation has no parameters and is a Clifford operation).
ideal_spam_type : str or tuple, optional
Similar to `ideal_gate_type` but for SPAM elements (state preparations
and POVMs).
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"none"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
Returns
-------
LocalNoiseModel
A model with `"rho0"` prep, `"Mdefault"` POVM, and gates labeled by
the gate names and qubit labels (as specified by `processor_spec`).
For instance, the operation label for the `"Gx"` gate on the second
qubit might be `Label("Gx",1)`.
"""
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=False)
return _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, on_construction_error, independent_gates, independent_spam,
ensure_composed_gates, ideal_gate_type, ideal_spam_type, ideal_spam_type,
implicit_idle_mode)
def _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
on_construction_error='raise', independent_gates=False, independent_spam=True,
ensure_composed_gates=False, ideal_gate_type='auto', ideal_prep_type='auto',
ideal_povm_type='auto', implicit_idle_mode='none'):
"""
Create a n-qubit "crosstalk-free" model.
Similar to :method:`create_crosstalk_free_model` but the noise is input more generally,
as a :class:`ModelNoise` object. Arguments are the same as this function except that
`modelnoise` is given instead of several more specific noise-describing arguments.
Returns
-------
LocalNoiseModel
"""
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
gatedict = _setup_local_gates(processor_spec, evotype, modelnoise, custom_gates, ideal_gate_type)
# (Note: global idle is now handled through processor-spec processing)
# SPAM:
local_noise = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
modelnoise.warn_about_zero_counters()
return _LocalNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
evotype, simulator, on_construction_error,
independent_gates, ensure_composed_gates,
implicit_idle_mode)
def create_cloud_crosstalk_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto', evotype="default", simulator="auto",
independent_gates=False, independent_spam=True, errcomp_type="gates",
implicit_idle_mode="none", verbosity=0):
"""
Create a n-qubit "cloud-crosstalk" model.
In a cloud crosstalk model, gates consist of a (local) ideal gates followed
by an error operation that can act nontrivially on *any* of the processor's qubits
(not just a gate's target qubits). Typically a gate's errors are specified
relative to the gate's target qubits, forming a "cloud" of errors around the
target qubits using some notion of locality (that may not be spatial, e.g.
local in frequency). Currently, the "ideal" portion of each gate can only be
created as a *static* (parameterless) object -- all gate parameters come from
the error operation.
Errors can be specified using any combination of the 4 error rate/coeff arguments,
but each gate name must be provided exclusively to one type of specification.
Each specification results in a different type of operation, depending on the parameterization:
- `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)
- `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)
- `lindblad_error_coeffs` -> exp(LindbladErrorgen)
In addition to the gate names, the special values `"prep"` and `"povm"` may be
used as keys to specify the error on the state preparation, measurement, respectively.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict, optional
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects override any other behavior for constructing
their designated operations. Keys of this dictionary may
be string-type gate *names* or labels that include target qubits.
depolarization_strengths : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are floats that specify the strength of uniform depolarization.
stochastic_error_probs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are tuples that specify Pauli-stochastic rates for each of the non-trivial
Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).
lindblad_error_coeffs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are dictionaries corresponding to the `lindblad_term_dict` kwarg taken
by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` can be `"H"` (Hamiltonian), `"S"`
(Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always
have a single basis label (so key is a 2-tuple) whereas Stochastic
tuples with 1 basis label indicate a *diagonal* term, and are the
only types of terms allowed when `nonham_mode != "all"`. Otherwise,
Stochastic term tuples can include 2 basis labels to specify
"off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be
strings or integers. Values are complex coefficients.
depolarization_parameterization : str of {"depolarize", "stochastic", or "lindblad"}
Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen
is used to parameterize the depolarization noise, respectively.
When "depolarize" (the default), a DepolarizeOp is created with the strength given
in `depolarization_strengths`. When "stochastic", the depolarization strength is split
evenly among the stochastic channels of a StochasticOp. When "lindblad", the depolarization
strength is split evenly among the coefficients of the stochastic error generators
(which are exponentiated to form a LindbladErrorgen with the "depol" parameterization).
stochastic_parameterization : str of {"stochastic", or "lindblad"}
Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the
stochastic noise, respectively. When "stochastic", elements of `stochastic_error_probs`
are used as coefficients in a linear combination of stochastic channels (the default).
When "lindblad", the elements of `stochastic_error_probs` are coefficients of
stochastic error generators (which are exponentiated to form a LindbladErrorgen with the
"cptp" parameterization).
lindblad_parameterization : "auto" or a LindbladErrorgen paramtype
Determines the parameterization of the LindbladErrorgen. When "auto" (the default), the parameterization
is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.
When not "auto", the parameterization type is passed through to the LindbladErrorgen.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The simulator used to compute predicted probabilities for the
resulting :class:`Model`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
independent_gates : bool, optional
Whether gates are allowed independent noise or not. If False,
then all gates with the same name (e.g. "Gx") will have the *same*
noise (e.g. an overrotation by 1 degree), and the
`operation_bks['cloudnoise']` dictionary will contains a single key per gate
name. If True, then gates with the same name acting on different
qubits may have different local noise, and so the
`operation_bks['cloudnoise']` dictionary contains a key for each gate
available gate placement.
independent_spam : bool, optional
Similar to `indepenent_gates` but for SPAM operations.
errcomp_type : {'gates', 'errorgens'}
Whether errors should be combined by composing error maps (`gates`) or by
exponentiating the sum of error generators (composing the error generators,
`errorgens`). The latter is only an option when the noise is given solely
in terms of Lindblad error coefficients.
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"none"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
verbosity : int or VerbosityPrinter, optional
Amount of detail to print to stdout.
Returns
-------
CloudNoiseModel
"""
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, independent_gates, independent_spam, errcomp_type,
implicit_idle_mode, verbosity)
def _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates=None,
evotype="default", simulator="auto", independent_gates=False,
independent_spam=True, errcomp_type="errorgens",
implicit_idle_mode="none", verbosity=0):
"""
Create a n-qubit "cloud-crosstalk" model.
Similar to :method:`create_cloud_crosstalk_model` but the noise is input more generally,
as a :class:`ModelNoise` object. Arguments are the same as this function except that
`modelnoise` is given instead of several more specific noise-describing arguments.
Returns
-------
CloudNoiseModel
"""
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels) # FUTURE: allow other types of state spaces somehow?
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
printer = _VerbosityPrinter.create_printer(verbosity)
#Create static ideal gates without any noise (we use `modelnoise` further down)
gatedict = _setup_local_gates(processor_spec, evotype, None, custom_gates,
ideal_gate_type=('static standard', 'static clifford', 'static unitary'))
stencils = _collections.OrderedDict()
# (Note: global idle is now processed with other processorspec gates)
# SPAM
local_noise = False
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
'computational', 'computational', evotype, state_space,
independent_gates, independent_spam)
if errcomp_type == 'gates':
create_stencil_fn = modelnoise.create_errormap_stencil
apply_stencil_fn = modelnoise.apply_errormap_stencil
elif errcomp_type == 'errorgens':
create_stencil_fn = modelnoise.create_errorgen_stencil
apply_stencil_fn = modelnoise.apply_errorgen_stencil
else:
raise ValueError("Invalid `errcomp_type` value: %s" % str(errcomp_type))
def build_cloudnoise_fn(lbl):
# lbl will be for a particular gate and target qubits. If we have error rates for this specific gate
# and target qubits (i.e this primitive layer op) then we should build it directly (and independently,
# regardless of the value of `independent_gates`) using these rates. Otherwise, if we have a stencil
# for this gate, then we should use it to construct the output, using a copy when gates are independent
# and a reference to the *same* stencil operations when `independent_gates==False`.
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
return None # no cloudnoise error for this label
return apply_stencil_fn(stencil, evotype, state_space, target_labels=lbl.sslbls,
qubit_graph=processor_spec.qubit_graph,
copy=independent_gates and (lbl not in modelnoise)) # no need to copy if first case
def build_cloudkey_fn(lbl):
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
# simple cloud-key when there is no cloud noise
return tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
#Otherwise, process stencil to get a list of all the qubit labels `lbl`'s cloudnoise error
# touches and form this into a key
cloud_sslbls = modelnoise.compute_stencil_absolute_sslbls(stencil, state_space, lbl.sslbls,
processor_spec.qubit_graph)
hashable_sslbls = tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
cloud_key = (hashable_sslbls, tuple(sorted(cloud_sslbls))) # (sets are unhashable)
return cloud_key
ret = _CloudNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
build_cloudnoise_fn, build_cloudkey_fn,
simulator, evotype, errcomp_type,
implicit_idle_mode, printer)
modelnoise.warn_about_zero_counters() # must do this after model creation so build_ fns have been run
return ret
def create_cloud_crosstalk_model_from_hops_and_weights(
processor_spec, custom_gates=None,
max_idle_weight=1, max_spam_weight=1,
maxhops=0, extra_weight_1_hops=0, extra_gate_weight=0,
simulator="auto", evotype='default',
gate_type="H+S", spam_type="H+S",
implicit_idle_mode="none", errcomp_type="gates",
independent_gates=True, independent_spam=True,
connected_highweight_errors=True,
verbosity=0):
"""
Create a "cloud crosstalk" model based on maximum error weights and hops along the processor's qubit graph.
This function provides a convenient way to construct cloud crosstalk models whose gate errors
consist of Pauli elementary error generators (i.e. that correspond to Lindblad error coefficients)
that are limited in weight (number of non-identity Paulis) and support (which qubits have non-trivial
Paulis on them). Errors are taken to be approximately local, meaning they are concentrated near the
target qubits of a gate, with the notion of locality taken from the processor specification's qubit graph.
The caller provides maximum-weight, maximum-hop (a "hop" is the movement along a single graph edge), and
gate type arguments to specify the set of possible errors on a gate.
- The global idle gate (corresponding to an empty circuit layer) has errors that are limited only by
a maximum weight, `max_idle_weight`.
- State preparation and POVM errors are constructed similarly, with a global-idle-like error following
or preceding the preparation or measurement, respectively.
- Gate errors are placed on all the qubits that can be reached with at most `maxhops` hops from (any of)
the gate's target qubits. Elementary error generators up to weight `W`, where `W` equals the number
of target qubits (e.g., 2 for a CNOT gate) plus `extra_gate_weight` are allowed. Weight-1 terms
are a special case, and the `extra_weight_1_hops` argument adds to the usual `maxhops` in this case
to allow weight-1 errors on a possibly larger region of qubits around the target qubits.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects describe the full action of the gate or
primitive-layer they're labeled by (so if the model represents
states by density matrices these objects are superoperators, not
unitaries), and override any standard construction based on builtin
gate names or `nonstd_gate_unitaries`. Keys of this dictionary must
be string-type gate *names* -- they cannot include state space labels
-- and they must be *static* (have zero parameters) because they
represent only the ideal behavior of each gate -- the cloudnoise
operations represent the parameterized noise. To fine-tune how this
noise is parameterized, call the :class:`CloudNoiseModel` constructor
directly.
max_idle_weight : int, optional
The maximum-weight for errors on the global idle gate.
max_spam_weight : int, optional
The maximum-weight for state preparation and measurement (SPAM) errors.
maxhops : int
The locality constraint: for a gate, errors (of weight up to the
maximum weight for the gate) are allowed to occur on the gate's
target qubits and those reachable by hopping at most `maxhops` times
from a target qubit along nearest-neighbor links (defined by the
`geometry`).
extra_weight_1_hops : int, optional
Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0
can be useful for allowing just weight-1 errors (of which there are
relatively few) to be dispersed farther from a gate's target qubits.
For example, a crosstalk-detecting model might use this.
extra_gate_weight : int, optional
Addtional weight, beyond the number of target qubits (taken as a "base
weight" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If
this equals 1, for instance, then 1-qubit gates can have up to weight-2
errors and 2-qubit gates can have up to weight-3 errors.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The circuit simulator used to compute any
requested probabilities, e.g. from :method:`probs` or
:method:`bulk_probs`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
gate_type : str, optional
The Lindblad-error parameterization type used for gate operations. This
may be expanded in the future, but currently the gate errors *must* be of
the Lindblad error-generator coefficients type, and this argument specifies
what elementary error-generator coefficients are initially allowed (and linked to
model parameters), before maximum-weight and locality constraints are imposed.
In addition to the usual Lindblad error types, (e.g. `"H"`, `"H+S"`) the special
values `"none"` is allowed to indicate that there should be no errors on the gates
(useful if you only want errors on the SPAM, for instance).
spam_type : str, optional
Similar to `gate_type` but for SPAM elements (state preparations
and POVMs). This specifies the Lindblad-error parameterization for the
state prepearation and POVM.
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"nonw"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
errcomp_type : {"gates","errorgens"}
How errors are composed when creating layer operations in the created
model. `"gates"` means that the errors on multiple gates in a single
layer are composed as separate and subsequent processes. Specifically,
the layer operation has the form `Composed(target,idleErr,cloudErr)`
where `target` is a composition of all the ideal gate operations in the
layer, `idleErr` is the global idle error if `implicit_idle_mode == 'add_global'`,
and `cloudErr` is the composition (ordered as layer-label) of cloud-
noise contributions, i.e. a map that acts as the product of exponentiated
error-generator matrices. `"errorgens"` means that layer operations
have the form `Composed(target, error)` where `target` is as above and
`error` results from composing (summing) the idle and cloud-noise error
*generators*, i.e. a map that acts as the exponentiated sum of error
generators (ordering is irrelevant in this case).
independent_gates : bool, optional
Whether the noise added to a gate when it acts on one set of target
qubits is independent of its noise on a different set of target qubits.
If False, then all gates with the same name (e.g. "Gx") will be constrained
to having the *same* noise on the cloud around the target qubits (even though
the target qubits and cloud are different). If True, then gate noise operations
for different sets of target qubits are independent.
independent_spam : bool, optional
Similar to `independent_gates` but for state preparation and measurement operations.
When `False`, the noise applied to each set (individual or pair or triple etc.) of
qubits must be the same, e.g., if the state preparation is a perfect preparation followed
by a single-qubit rotation then this rotation must be by the *same* angle on all of
the qubits.
connected_highweight_errors : bool, optional
An additional constraint regarding high-weight errors. When `True`, only high weight
(weight 2+) elementary error generators whose non-trivial Paulis occupy a *connected*
portion of the qubit graph are allowed. For example, if the qubit graph is a 1D chain
of 4 qubits, 1-2-3-4, and weight-2 errors are allowed on a single-qubit gate with
target = qubit-2, then weight-2 errors on 1-2 and 2-3 would be allowed, but errors on
1-3 would be forbidden. When `False`, no constraint is imposed.
verbosity : int or VerbosityPrinter, optional
An integer >= 0 dictating how must output to send to stdout.
Returns
-------
CloudNoiseModel
"""
# construct noise specifications for the cloudnoise model
modelnoise = {}
all_qubit_labels = processor_spec.qubit_labels
conn = connected_highweight_errors # shorthand: whether high-weight errors must be connected on the graph
global_idle_name = processor_spec.global_idle_gate_name
# Global Idle
if max_idle_weight > 0:
assert(global_idle_name is not None), \
"`max_idle_weight` must equal 0 for processor specs without a global idle gate!"
#printer.log("Creating Idle:")
wt_maxhop_tuples = [(i, None) for i in range(1, max_idle_weight + 1)]
modelnoise[global_idle_name] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples,
gate_type, conn)
# SPAM
if max_spam_weight > 0:
wt_maxhop_tuples = [(i, None) for i in range(1, max_spam_weight + 1)]
modelnoise['prep'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
modelnoise['povm'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
# Gates
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
for gatenm, gate_unitary in processor_spec.gate_unitaries.items():
if gatenm == global_idle_name: continue # processed above
gate_nQubits = int(gate_unitary) if isinstance(gate_unitary, (int, _np.int64)) \
else int(round(_np.log2(gate_unitary.shape[0]))) # NOTE: integer gate_unitary => idle on n qubits
if gate_nQubits not in (1, 2):
raise ValueError("Only 1- and 2-qubit gates are supported. %s acts on %d qubits!"
% (str(gatenm), gate_nQubits))
weight_maxhops_tuples = weight_maxhops_tuples_1Q if gate_nQubits == 1 else weight_maxhops_tuples_2Q
target_sslbls = ('@0',) if gate_nQubits == 1 else ('@0', '@1')
modelnoise[gatenm] = _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples,
gate_type, conn)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates,
evotype, simulator, independent_gates, independent_spam,
errcomp_type, implicit_idle_mode, verbosity)
def _iter_basis_inds(weight):
""" Iterate over product of `weight` non-identity Pauli 1Q basis indices """
basisIndList = [[1, 2, 3]] * weight # assume pauli 1Q basis, and only iterate over non-identity els
for basisInds in _itertools.product(*basisIndList):
yield basisInds
def _pauli_product_matrix(sigma_inds):
"""
Construct the Pauli product matrix from the given `sigma_inds`
Parameters
----------
sigma_inds : iterable
A sequence of integers in the range [0,3] corresponding to the
I, X, Y, Z Pauli basis matrices.
Returns
-------
numpy.ndarray or scipy.sparse.csr_matrix
"""
sigmaVec = (id2x2 / sqrt2, sigmax / sqrt2, sigmay / sqrt2, sigmaz / sqrt2)
M = _np.identity(1, 'complex')
for i in sigma_inds:
M = _np.kron(M, sigmaVec[i])
return M
def _construct_restricted_weight_pauli_basis(wt, sparse=False):
basisEl_Id = _pauli_product_matrix(_np.zeros(wt, _np.int64))
errbasis = [basisEl_Id]
errbasis_lbls = ['I']
for err_basis_inds in _iter_basis_inds(wt):
error = _np.array(err_basis_inds, _np.int64) # length == wt
basisEl = _pauli_product_matrix(error)
errbasis.append(basisEl)
errbasis_lbls.append(''.join(["IXYZ"[i] for i in err_basis_inds]))
#printer.log("Error on qubits %s -> error basis of length %d" % (err_qubit_inds, len(errbasis)), 3)
return _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)
def _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples, lnd_parameterization, connected=True):
# This function:
# loop over all size-`wt` *connected* combinations, `err_qubit_inds`, of the qubit indices in
# `possible_err_qubit_inds`
# - construct a local weight-`wt` Pauli basis & corresponding LindbladErrorgen on `wt` qubits
# => replace with: opnoise.create_errorgen(evotype, state_space=None) where opnoise is for a wt-qubit op
# - embed this constructed local error onto `err_qubit_inds`
# - append embedded error onto running list
#
# Noise object structure:
# OpModelPerOpNoise( { op_key/'idle': { sslbls : opnoise } } )
# where sslbls can be absolute labels or stencil labels
# -- could have a fn that spreads a single opnoise onto all the sslbls
# given by size-`wt` connected combos of `possible_err_qubit_inds` - this would work for independent clouds
# -- have LindbladNoiseDict and another LindbladPauliAtWeight (?) noise objects,
# since we want to specify a lindblad noise by giving a weight and an initial basis (Pauli here)
# To build a cloudnoise model from hops & weights:
modelnoise_dict = {}
if lnd_parameterization == 'none' or lnd_parameterization is None:
return {} # special case when we don't want any error parameterization
for wt, max_hops in weight_maxhops_tuples:
if max_hops is None or max_hops == 0: # Note: maxHops not used in this case
stencil_lbl = _stencil.StencilLabelAllCombos(target_sslbls, wt, connected)
else:
stencil_lbl = _stencil.StencilLabelRadiusCombos(target_sslbls, max_hops, wt, connected)
local_state_space = _statespace.default_space_for_num_qubits(wt)
modelnoise_dict[stencil_lbl] = _LindbladNoise.from_basis_coefficients(
lnd_parameterization, _construct_restricted_weight_pauli_basis(wt),
local_state_space)
return modelnoise_dict
def _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization, lindblad_parameterization,
allow_nonlocal):
modelnoises = []
if depolarization_strengths is not None:
noise_dict = {}
for lbl, val in depolarization_strengths.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal depolarization strengths not allowed!")
noise_dict[lbl] = {k: _DepolarizationNoise(v, depolarization_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _DepolarizationNoise(val, depolarization_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if stochastic_error_probs is not None:
noise_dict = {}
for lbl, val in stochastic_error_probs.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal stochastic error probs not allowed!")
noise_dict[lbl] = {k: _StochasticNoise(v, stochastic_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _StochasticNoise(val, stochastic_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if lindblad_error_coeffs is not None:
if not allow_nonlocal: # the easy case
modelnoises.append(_OpModelPerOpNoise({lbl: _LindbladNoise(val, lindblad_parameterization)
for lbl, val in lindblad_error_coeffs.items()}))
else: # then need to process labels like ('H', 'XX:0,1') or 'HXX:0,1'
def process_stencil_labels(flat_lindblad_errs):
nonlocal_errors = _collections.OrderedDict()
local_errors = _collections.OrderedDict()
for nm, val in flat_lindblad_errs.items():
if isinstance(nm, str): nm = (nm[0], nm[1:]) # e.g. "HXX" => ('H','XX')
err_typ, basisEls = nm[0], nm[1:]
sslbls = None
local_nm = [err_typ]
for bel in basisEls: # e.g. bel could be "X:Q0" or "XX:Q0,Q1"
# OR "X:<n>" where n indexes a target qubit or "X:<dir>" where dir indicates
# a graph *direction*, e.g. "up"
if ':' in bel:
bel_name, bel_sslbls = bel.split(':') # should have form <name>:<comma-separated-sslbls>
bel_sslbls = bel_sslbls.split(',') # e.g. ('Q0','Q1')
integerized_sslbls = []
for ssl in bel_sslbls:
try: integerized_sslbls.append(int(ssl))
except: integerized_sslbls.append(ssl)
bel_sslbls = tuple(integerized_sslbls)
else:
bel_name = bel
bel_sslbls = None
if sslbls is None:
sslbls = bel_sslbls
else:
#Note: sslbls should always be the same if there are multiple basisEls,
# i.e for nm == ('S',bel1,bel2)
assert(sslbls is bel_sslbls or sslbls == bel_sslbls), \
"All basis elements of the same error term must operate on the *same* state!"
local_nm.append(bel_name) # drop the state space labels, e.g. "XY:Q0,Q1" => "XY"
# keep track of errors by the qubits they act on, as only each such
# set will have it's own LindbladErrorgen
local_nm = tuple(local_nm) # so it's hashable
if sslbls is not None:
sslbls = tuple(sorted(sslbls))
if sslbls not in nonlocal_errors:
nonlocal_errors[sslbls] = _collections.OrderedDict()
if local_nm in nonlocal_errors[sslbls]:
nonlocal_errors[sslbls][local_nm] += val
else:
nonlocal_errors[sslbls][local_nm] = val
else:
if local_nm in local_errors:
local_errors[local_nm] += val
else:
local_errors[local_nm] = val
if len(nonlocal_errors) == 0:
return _LindbladNoise(local_errors, lindblad_parameterization)
else:
all_errors = []
if len(local_errors) > 0:
all_errors.append((None, _LindbladNoise(local_errors, lindblad_parameterization)))
for sslbls, errdict in nonlocal_errors.items():
all_errors.append((sslbls, _LindbladNoise(errdict, lindblad_parameterization)))
return _collections.OrderedDict(all_errors)
modelnoises.append(_OpModelPerOpNoise({lbl: process_stencil_labels(val)
for lbl, val in lindblad_error_coeffs.items()}))
return _ComposedOpModelNoise(modelnoises)
@_deprecated_fn("This function is overly specific and will be removed soon.")
def _nparams_xycnot_cloudnoise_model(num_qubits, geometry="line", max_idle_weight=1, maxhops=0,
extra_weight_1_hops=0, extra_gate_weight=0, require_connected=False,
independent_1q_gates=True, zz_only=False, bidirectional_cnots=True, verbosity=0):
"""
Compute the number of parameters in a particular :class:`CloudNoiseModel`.
Returns the number of parameters in the :class:`CloudNoiseModel` containing
X(pi/2), Y(pi/2) and CNOT gates using the specified arguments without
actually constructing the model (useful for considering parameter-count
scaling).
Parameters
----------
num_qubits : int
The total number of qubits.
geometry : {"line","ring","grid","torus"} or QubitGraph
The type of connectivity among the qubits, specifying a
graph used to define neighbor relationships. Alternatively,
a :class:`QubitGraph` object may be passed directly.
max_idle_weight : int, optional
The maximum-weight for errors on the global idle gate.
maxhops : int
The locality constraint: for a gate, errors (of weight up to the
maximum weight for the gate) are allowed to occur on the gate's
target qubits and those reachable by hopping at most `maxhops` times
from a target qubit along nearest-neighbor links (defined by the
`geometry`).
extra_weight_1_hops : int, optional
Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0
can be useful for allowing just weight-1 errors (of which there are
relatively few) to be dispersed farther from a gate's target qubits.
For example, a crosstalk-detecting model might use this.
extra_gate_weight : int, optional
Addtional weight, beyond the number of target qubits (taken as a "base
weight" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If
this equals 1, for instance, then 1-qubit gates can have up to weight-2
errors and 2-qubit gates can have up to weight-3 errors.
require_connected : bool, optional
If True, then high-weight errors only occur on connected (via `geometry`) qubits.
For example in a line of qubits there would not be weight-2 errors on qubits 1 and 3.
independent_1q_gates : bool, optional
If True, 1Q gates on different qubits have separate (distinct) parameters. If
False, the 1Q gates of each type (e.g. an pi/2 X gate) for different qubits share
the same set of parameters.
zz_only : bool, optional
If True, the only high-weight errors allowed are of "Z^n" type.
bidirectional_cnots : bool
Whether CNOT gates can be performed in either direction (and each direction should
be treated as an indepedent gate)
verbosity : int, optional
An integer >= 0 dictating how much output to send to stdout.
Returns
-------
int
"""
# noise can be either a seed or a random array that is long enough to use
printer = _VerbosityPrinter.create_printer(verbosity)
printer.log("Computing parameters for a %d-qubit %s model" % (num_qubits, geometry))
qubitGraph = _QubitGraph.common_graph(num_qubits, geometry, directed=True, all_directions=True)
#printer.log("Created qubit graph:\n"+str(qubitGraph))
def idle_count_nparams(max_weight):
"""Parameter count of a `build_nqn_global_idle`-constructed gate"""
ret = 0
possible_err_qubit_inds = _np.arange(num_qubits)
for wt in range(1, max_weight + 1):
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
ret += nErrTargetLocations * nErrParams
return ret
def op_count_nparams(target_qubit_inds, weight_maxhops_tuples, debug=False):
"""Parameter count of a `build_nqn_composed_gate`-constructed gate"""
ret = 0
#Note: no contrib from idle noise (already parameterized)
for wt, maxHops in weight_maxhops_tuples:
possible_err_qubit_inds = _np.array(qubitGraph.radius(target_qubit_inds, maxHops), _np.int64)
if require_connected:
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
else:
nErrTargetLocations = _scipy.special.comb(len(possible_err_qubit_inds), wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
if debug:
print(" -- wt%d, hops%d: inds=%s locs = %d, eparams=%d, total contrib = %d" %
(wt, maxHops, str(possible_err_qubit_inds), nErrTargetLocations,
nErrParams, nErrTargetLocations * nErrParams))
ret += nErrTargetLocations * nErrParams
return ret
nParams = _collections.OrderedDict()
printer.log("Creating Idle:")
nParams[_label.Label('Gi')] = idle_count_nparams(max_idle_weight)
#1Q gates: X(pi/2) & Y(pi/2) on each qubit
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
if independent_1q_gates:
for i in range(num_qubits):
printer.log("Creating 1Q X(pi/2) and Y(pi/2) gates on qubit %d!!" % i)
nParams[_label.Label("Gx", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gy", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
else:
printer.log("Creating common 1Q X(pi/2) and Y(pi/2) gates")
rep = int(num_qubits / 2)
nParams[_label.Label("Gxrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gyrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
#2Q gates: CNOT gates along each graph edge
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
seen_pairs = set()
for i, j in qubitGraph.edges(): # note: all edges have i<j so "control" of CNOT is always lower index (arbitrary)
if bidirectional_cnots is False:
ordered_tup = (i, j) if i <= j else (j, i)
if ordered_tup in seen_pairs: continue
else: seen_pairs.add(ordered_tup)
printer.log("Creating CNOT gate between qubits %d and %d!!" % (i, j))
nParams[_label.Label("Gcnot", (i, j))] = op_count_nparams((i, j), weight_maxhops_tuples_2Q)
#SPAM
nPOVM_1Q = 4 # params for a single 1Q POVM
nParams[_label.Label('rho0')] = 3 * num_qubits # 3 b/c each component is TP
nParams[_label.Label('Mdefault')] = nPOVM_1Q * num_qubits # num_qubits 1Q-POVMs
return nParams, sum(nParams.values())
| 53.577069 | 120 | 0.64688 |
f723e84d3365845116c25dda9340902ab173b6cd | 799 | py | Python | core/data/dataloader/__init__.py | HareshKarnan/awesome-semantic-segmentation-pytorch | 3c53fc004973abcb88882dcc8be899570c3053cf | [
"Apache-2.0"
] | null | null | null | core/data/dataloader/__init__.py | HareshKarnan/awesome-semantic-segmentation-pytorch | 3c53fc004973abcb88882dcc8be899570c3053cf | [
"Apache-2.0"
] | null | null | null | core/data/dataloader/__init__.py | HareshKarnan/awesome-semantic-segmentation-pytorch | 3c53fc004973abcb88882dcc8be899570c3053cf | [
"Apache-2.0"
] | null | null | null | """
This module provides data loaders and transformers for popular vision datasets.
"""
from .mscoco import COCOSegmentation
from .cityscapes import CitySegmentation
from .ade import ADE20KSegmentation
from .pascal_voc import VOCSegmentation
from .pascal_aug import VOCAugSegmentation
from .sbu_shadow import SBUSegmentation
from .ycb import YCBSegmentation
from .robocup import RobocupSegmentation
datasets = {
'ade20k': ADE20KSegmentation,
'pascal_voc': VOCSegmentation,
'pascal_aug': VOCAugSegmentation,
'coco': COCOSegmentation,
'citys': CitySegmentation,
'sbu': SBUSegmentation,
'ycb': YCBSegmentation,
'robocup': RobocupSegmentation,
}
def get_segmentation_dataset(name, **kwargs):
"""Segmentation Datasets"""
return datasets[name.lower()](**kwargs)
| 28.535714 | 79 | 0.767209 |
f724061c6d8dbec2acb290ec23c12a1f23882924 | 2,260 | py | Python | scripts/quickstart_tooling_dpg/template/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | scripts/quickstart_tooling_dpg/template/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | scripts/quickstart_tooling_dpg/template/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import re
from setuptools import setup, find_packages
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "{{ package_name }}"
PACKAGE_PPRINT_NAME = "{{ package_pprint_name }}"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace("-", "/")
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, "_version.py"), "r") as fd:
version = re.search(
r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
).group(1)
if not version:
raise RuntimeError("Cannot find version information")
setup(
name=PACKAGE_NAME,
version=version,
description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME),
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
license="MIT License",
author="Microsoft Corporation",
author_email="azpysdkhelp@microsoft.com",
url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/{{ folder_parent }}/{{ package_name }}",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
packages=find_packages(
exclude=[
# Exclude packages that will be covered by PEP420 or nspkg
"{{ folder_first }}",
"{{ folder_first }}.{{ folder_second }}",
]
),
install_requires=[
"azure-core<2.0.0,>=1.23.0",
"msrest>=0.6.21",
],
python_requires=">=3.6",
)
| 35.3125 | 109 | 0.592035 |
f724310ed89a048b1602d1084baca21f8eecd141 | 1,427 | py | Python | examples/PsyNeuLink/SimpleLinear-conditional.reconstructed.py | singular-value/MDF | 227216ffb2c9beea8539829b0b891196787d33ee | [
"Apache-2.0"
] | 12 | 2021-01-18T20:38:21.000Z | 2022-03-29T15:01:10.000Z | examples/PsyNeuLink/SimpleLinear-conditional.reconstructed.py | singular-value/MDF | 227216ffb2c9beea8539829b0b891196787d33ee | [
"Apache-2.0"
] | 101 | 2020-12-14T15:23:07.000Z | 2022-03-31T17:06:19.000Z | examples/PsyNeuLink/SimpleLinear-conditional.reconstructed.py | singular-value/MDF | 227216ffb2c9beea8539829b0b891196787d33ee | [
"Apache-2.0"
] | 15 | 2020-12-04T22:37:14.000Z | 2022-03-31T09:48:03.000Z | import psyneulink as pnl
comp = pnl.Composition(name="comp")
A = pnl.TransferMechanism(
name="A",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
B = pnl.TransferMechanism(
name="B",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
C = pnl.TransferMechanism(
name="C",
function=pnl.Linear(default_variable=[[0]]),
termination_measure=pnl.Distance(
metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
),
)
comp.add_node(A)
comp.add_node(B)
comp.add_node(C)
comp.add_projection(
projection=pnl.MappingProjection(
name="MappingProjection from A[RESULT] to B[InputPort-0]",
function=pnl.LinearMatrix(matrix=[[1.0]]),
),
sender=A,
receiver=B,
)
comp.add_projection(
projection=pnl.MappingProjection(
name="MappingProjection from B[RESULT] to C[InputPort-0]",
function=pnl.LinearMatrix(matrix=[[1.0]]),
),
sender=B,
receiver=C,
)
comp.scheduler.add_condition(A, pnl.AtNCalls(A, 0))
comp.scheduler.add_condition(B, pnl.Always())
comp.scheduler.add_condition(C, pnl.EveryNCalls(B, 5))
comp.scheduler.termination_conds = {
pnl.TimeScale.RUN: pnl.Never(),
pnl.TimeScale.TRIAL: pnl.AllHaveRun(),
}
| 25.482143 | 66 | 0.669236 |
f72442631484d92c7dcede95d1d98e464d00507c | 3,779 | py | Python | Fancy_aggregations/moderate_deviations.py | iosurodri/Fancy_aggregations | 647019452a074767706893ecdd431a3ee503b554 | [
"MIT"
] | 1 | 2021-03-25T11:48:20.000Z | 2021-03-25T11:48:20.000Z | Fancy_aggregations/moderate_deviations.py | iosurodri/Fancy_aggregations | 647019452a074767706893ecdd431a3ee503b554 | [
"MIT"
] | null | null | null | Fancy_aggregations/moderate_deviations.py | iosurodri/Fancy_aggregations | 647019452a074767706893ecdd431a3ee503b554 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
File containing different functions to aggregate data using Moderate Deviations. The expressions have been obtained from the following paper:
A.H. Altalhi, J.I. Forcén, M. Pagola, E. Barrenechea, H. Bustince, Zdenko Takáč,
Moderate deviation and restricted equivalence functions for measuring similarity between data,
Information Sciences,
Volume 501,
2019,
Pages 19-29,
ISSN 0020-0255,
https://doi.org/10.1016/j.ins.2019.05.078.
(http://www.sciencedirect.com/science/article/pii/S0020025519305031)
Please, cite accordingly.
@author: Javier Fumanal Idocin (UPNA).
To suggest changes or submit new code please use the github page.
"""
import numpy as np
# =============================================================================
# ~ MODERATE DEVIATIONS
# =============================================================================
def custom_distance(x, y, Mp, Mn, R1, R2):
'''
:param R1:
:param R2:
:return:
'''
if x <= y:
return Mp - Mp*R1(x, y)
else:
return Mn*R2(x,y) - Mn
def custom_distance_morphs(x, y, Mp, Mn, F1, F2, T1, T2):
'''
TODO, and will probably stay like that for long.
:param x:
:param y:
:param Mp:
:param Mn:
:param F1:
:param F2:
:param T1:
:param T2:
:return:
'''
pass
def distance_f1(x, y, Mp, Mn):
'''
:return:
'''
if x <= y:
return Mp*(y - x)*(y - x)
else:
return Mn*(y*y - x*x)
def distance_f2(x, y, Mp, Mn):
'''
:return:
'''
if x <= y:
return Mp*(y - x)
else:
return Mn*(y - x)
def cut_point(D, x_sigma, Mp, Mn):
k = -1
for ix, element in enumerate(x_sigma):
if ix < len(x_sigma) - 1:
con1 = np.sum([D(x_sigma[i], element, Mp, Mn) for i in range(len(x_sigma))]) <= 0
cond2 = np.sum([D(x_sigma[i], x_sigma[ix + 1], Mp, Mn) for i in range(len(x_sigma))]) >= 0
if con1 and cond2:
k = ix
return k
def moderate_deviation_f(X, D=distance_f2, Mp=1, Mn=1, axis=0):
'''
'''
n = len(X)
x_sigma = np.sort(X, axis=0)
k = cut_point(D, x_sigma, Mp, Mn)
f = (Mp * np.sum(x_sigma[0:k+1]) + Mn*np.sum(x_sigma[k+1:])) / (k*Mp + (n - k)*Mn)
return f
def moderate_deviation_eq(X, D=distance_f1, Mp=1, Mn=1):
'''
'''
n = len(X)
x_sigma = np.sort(X)
k = cut_point(D, x_sigma, Mp ,Mn)
a = (k+1)*Mp + (n - k-1)*Mn
b = -2*Mp*np.sum(x_sigma[0:k+1])
x_sigma_squared = np.power(x_sigma, 2)
c = Mp*np.sum(x_sigma_squared[0:k+1]) - Mn*np.sum(x_sigma_squared[k+1:])
sqr_term = np.sqrt(b*b - 4*a*c)
y1 = (-b + sqr_term) / (2*a)
y2 = (-b - sqr_term) / (2*a)
return y1, y2
def md_aggregation(X, axis=0, keepdims=True, md_function=moderate_deviation_f, Mp=1, Mn=10):
'''
Designed to use the md functions using the same interface as the rest of the numpy aggregation functions.
IT ONLY WORKS IN 3 DIMENSIONAL ARRAY (features, samples, classes)
:param X:
:param axis:
:param keepdims:
:param md_function:
:return:
'''
if axis != 0:
X = np.transpose(X, (0, axis))
clasificadores, muestras, clases = X.shape
if keepdims:
result = np.zeros([1] +list(X.shape[1:]))
else:
result = np.zeros(X.shape[1:])
for m in range(muestras):
#print(md_function(X[:, m, 0], Mp=1, Mn=10))
if keepdims:
for clase in range(clases):
result[0, m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)
else:
for clase in range(clases):
result[m, clase] = md_function(X[:, m, clase], Mp=1, Mn=10)
if axis != 0:
X = np.transpose(X, (0, axis))
return result
| 24.861842 | 141 | 0.546705 |
f7244e9d2237fe01ddaeb6e8e95ca04552be563e | 3,688 | py | Python | airiam/terraform/entity_terraformers/IAMPolicyDocumentTransformer.py | metahertz/AirIAM | 212f84e1b1a51c7a614384f91b220e7f2a57a079 | [
"Apache-2.0"
] | 501 | 2020-03-04T16:00:54.000Z | 2022-03-30T17:31:10.000Z | airiam/terraform/entity_terraformers/IAMPolicyDocumentTransformer.py | rckasa/AirIAM | 5a99dc25354c1bc6525dbaf25a3afcd472f71b2f | [
"Apache-2.0"
] | 34 | 2020-03-23T08:12:18.000Z | 2022-02-13T08:50:39.000Z | airiam/terraform/entity_terraformers/IAMPolicyDocumentTransformer.py | rckasa/AirIAM | 5a99dc25354c1bc6525dbaf25a3afcd472f71b2f | [
"Apache-2.0"
] | 51 | 2020-04-16T06:43:29.000Z | 2022-03-20T14:20:24.000Z | import json
from airiam.terraform.entity_terraformers.BaseEntityTransformer import BaseEntityTransformer
class IAMPolicyDocumentTransformer(BaseEntityTransformer):
def __init__(self, entity_json: dict, policy_name, principal_name=None):
policy_document_name = f"{policy_name}_document"
if principal_name:
policy_document_name = f"{principal_name}_{policy_document_name}"
super().__init__('data.aws_iam_policy_document', policy_document_name, entity_json)
def _generate_hcl2_code(self, entity_json) -> str:
statements = IAMPolicyDocumentTransformer.force_list(entity_json['Statement'])
if 'Principal' in statements[0]:
statements = self.transform_assume_policy_statements(statements)
else:
statements = self.transform_execution_policy(statements)
code = f"""data "aws_iam_policy_document" "{self._safe_name}" {{
version = "{entity_json.get('Version', '2012-10-17')}"
{statements}}}"""
return code
@staticmethod
def transform_execution_policy(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
actions = IAMPolicyDocumentTransformer.force_list(statement.get('Action'))
if 'Action' in statement:
action_str = f"actions = {json.dumps(actions)}"
else:
actions = IAMPolicyDocumentTransformer.force_list(statement.get('NotAction'))
action_str = f"not_actions = {json.dumps(actions)}"
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
resources_list_str = json.dumps(IAMPolicyDocumentTransformer.force_list(statement.get('Resource'))).replace('${', '$\\u0024{')
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
{action_str}
resources = {resources_list_str}
{condition_block}
}}
"""
return statement_block
@staticmethod
def transform_assume_policy_statements(statements):
statement_block = ""
for statement in statements:
sid_string = ""
if statement.get('Sid', '') != '':
sid_string = f"sid = \"{statement['Sid']}\"\n "
condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement)
statement_block += f""" statement {{
{sid_string}effect = "{statement['Effect']}"
actions = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Action']))}
principals {{
type = "{list(statement['Principal'].keys())[0]}"
identifiers = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Principal'][list(statement['Principal'].keys())[0]]))}
}}
{condition_block}}}
"""
return statement_block
@staticmethod
def transform_conditions(statement):
condition_block = ""
if 'Condition' in statement:
for test, items in statement['Condition'].items():
for variable, values in items.items():
values_str = json.dumps(IAMPolicyDocumentTransformer.force_list(values)).replace('${', '$\\u0024{')
condition_block += f"""
condition {{
test = "{test}"
variable = "{variable}"
values = {values_str}
}}
"""
return condition_block
@staticmethod
def force_list(x):
if isinstance(x, list):
return x
return [x]
def entities_to_import(self) -> list:
return []
| 38.821053 | 138 | 0.632863 |
f7245440fe7dba32ffeb3a85b4b83af243aba25b | 290 | py | Python | ceuclid.py | jprzywoski/faster-python | 44252bf0a746dd862d752efbe2012a8a404ec7bf | [
"MIT"
] | null | null | null | ceuclid.py | jprzywoski/faster-python | 44252bf0a746dd862d752efbe2012a8a404ec7bf | [
"MIT"
] | null | null | null | ceuclid.py | jprzywoski/faster-python | 44252bf0a746dd862d752efbe2012a8a404ec7bf | [
"MIT"
] | null | null | null | import ctypes
from numpy.ctypeslib import ndpointer
lib = ctypes.cdll.LoadLibrary('./libdist.so')
fn = lib.dist
fn.restype = ctypes.c_double
fn.argtypes = [
ndpointer(ctypes.c_double),
ndpointer(ctypes.c_double),
ctypes.c_size_t
]
def dist(x, y):
return fn(x, y, len(x))
| 18.125 | 45 | 0.7 |
f7247128248055fc8b3fc7e0f99d36f794357c24 | 5,958 | py | Python | utils/evaluation.py | lippman1125/pytorch_FAN | ffc9c968478d55cb0c75c062bb8774923f961110 | [
"BSD-3-Clause"
] | 58 | 2019-03-14T20:13:10.000Z | 2022-03-17T07:59:34.000Z | utils/evaluation.py | lippman1125/pytorch_FAN | ffc9c968478d55cb0c75c062bb8774923f961110 | [
"BSD-3-Clause"
] | 7 | 2019-03-29T05:13:39.000Z | 2021-02-08T23:00:32.000Z | utils/evaluation.py | lippman1125/pytorch_FAN | ffc9c968478d55cb0c75c062bb8774923f961110 | [
"BSD-3-Clause"
] | 8 | 2019-05-29T09:05:32.000Z | 2022-03-12T17:00:02.000Z | from __future__ import absolute_import, print_function
import math
import numpy as np
import matplotlib.pyplot as plt
from random import randint
from .misc import *
from .transforms import transform, transform_preds
__all__ = ['accuracy', 'AverageMeter']
def get_preds(scores):
''' get predictions from score maps in torch Tensor
return type: torch.LongTensor
'''
assert scores.dim() == 4, 'Score maps should be 4-dim'
# batch, chn, height, width ===> batch, chn, height*width
# chn = 68
# height*width = score_map
maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)
maxval = maxval.view(scores.size(0), scores.size(1), 1)
idx = idx.view(scores.size(0), scores.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
# batchsize * numPoints * 2
# 0 is x coord
# 1 is y coord
# shape = batchsize, numPoints, 2
preds[:, :, 0] = (preds[:, :, 0] - 1) % scores.size(3) + 1
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / scores.size(2)) + 1
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
return preds
def calc_dists(preds, target, normalize):
preds = preds.float()
target = target.float()
# dists = 68 x batch
dists = torch.zeros(preds.size(1), preds.size(0))
for n in range(preds.size(0)):
for c in range(preds.size(1)):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
dists[c, n] = torch.dist(preds[n, c, :], target[n, c, :]) / normalize[n]
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
if dists.ne(-1).sum() > 0:
return dists.le(thr).eq(dists.ne(-1)).sum() * 1.0 / dists.ne(-1).sum()
else:
return -1
def calc_metrics(dists, path='', category=''):
errors = torch.mean(dists, 0).view(dists.size(1))
axes1 = np.linspace(0, 1, 1000)
axes2 = np.zeros(1000)
for i in range(1000):
axes2[i] = float((errors < axes1[i]).sum()) / float(errors.size(0))
auc = round(np.sum(axes2[:70]) / .7, 2)
if path:
label = '{}({}) : {}'.format(path.split('/')[2], category, str(auc))
plt.xlim(0, 7)
plt.ylim(0, 100)
plt.yticks(np.arange(0, 110, 10))
plt.xticks(np.arange(0, 8, 1))
plt.grid()
plt.title('NME (%)', fontsize=20)
plt.xlabel('NME (%)', fontsize=16)
plt.ylabel('Test images (%)', fontsize=16)
if category:
if category in ['Easy', 'Category A']:
plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3)
if category in ['Media', 'Category B']:
plt.plot(axes1 * 100, axes2 * 100, 'r-', label=label, lw=3)
if category in ['Hard', 'Category C']:
plt.plot(axes1 * 100, axes2 * 100, 'g-', label=label, lw=3)
else:
plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3)
plt.legend(loc=4, fontsize=12)
plt.savefig(os.path.join(path + '/CED.eps'))
return auc
def _get_bboxsize(iterable):
# iterable = 68 x 2
# torch.min return values, idxs
mins = torch.min(iterable, 0)[0].view(2)
maxs = torch.max(iterable, 0)[0].view(2)
center = torch.FloatTensor((maxs[0] - (maxs[0] - mins[0]) / 2,
maxs[1] - (maxs[1] - mins[1]) / 2))
# center[1] = center[1] - ((maxs[1] - mins[1]) * 0.12)
return np.sqrt(abs(maxs[0] - mins[0]) * abs(maxs[1] - mins[1]))
def accuracy(output, target, idxs, thr=0.08):
''' Calculate accuracy according to NME, but uses ground truth heatmap rather than x,y locations
First value to be returned is accuracy calculated based on overall 'idxs'
followed by individual accuracies
'''
# preds = batch, 68, 64, 64
preds = get_preds(output)
gts = get_preds(target)
# B * 2
norm = torch.ones(preds.size(0))
# use face bbox to normalize
for i, gt in enumerate(gts):
norm[i] = _get_bboxsize(gt)
dists = calc_dists(preds, gts, norm)
acc = torch.zeros(len(idxs) + 1)
avg_acc = 0
cnt = 0
mean_dists = torch.mean(dists, 0)
acc[0] = mean_dists.le(thr).sum() * 1.0 / preds.size(0)
# for i in range(len(idxs)):
# acc[i+1] = dist_acc(dists[idxs[i]-1], thr=thr)
# if acc[i+1] >= 0:
# avg_acc = avg_acc + acc[i+1]
# cnt += 1
# if cnt != 0:
# acc[0] = avg_acc / cnt
return acc, dists
def final_preds(output, center, scale, res):
if output.size(1) == 136:
coords = output.view((output.szie(0), 68, 2))
else:
coords = get_preds(output) # float type
# output shape is batch, 68, 64, 64
# coords shape is batch, 68, 2
# pose-processing
for n in range(coords.size(0)):
for p in range(coords.size(1)):
hm = output[n][p]
px = int(math.floor(coords[n][p][0]))
py = int(math.floor(coords[n][p][1]))
if px > 1 and px < res[0] and py > 1 and py < res[1]:
diff = torch.Tensor(
[hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1] - hm[py - 2][px - 1]])
coords[n][p] += diff.sign() * .25
coords += 0.5
preds = coords.clone()
# Transform back
for i in range(coords.size(0)):
preds[i] = transform_preds(coords[i], center[i], scale[i], res)
if preds.dim() < 3:
preds = preds.view(1, preds.size())
return preds
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 31.193717 | 100 | 0.553206 |
f72476bf2e961b26c53e96e9358bb4c0a54239b7 | 8,355 | py | Python | tron/Vocab/hubCommands.py | sdss/tron | 886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322 | [
"BSD-3-Clause"
] | null | null | null | tron/Vocab/hubCommands.py | sdss/tron | 886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322 | [
"BSD-3-Clause"
] | null | null | null | tron/Vocab/hubCommands.py | sdss/tron | 886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322 | [
"BSD-3-Clause"
] | null | null | null | __all__ = ['hubCommands']
import sys
import Vocab.InternalCmd as InternalCmd
from tron import Misc, g, hub
from tron.Hub.KV.KVDict import kvAsASCII
class hubCommands(InternalCmd.InternalCmd):
""" All the commands that the "hub" package provides.
The user executes these from the command window:
hub startNubs tspec
hub status
etc.
"""
def __init__(self, **argv):
argv['safeCmds'] = r'^\s*(actors|commanders|actorInfo|version|status|ping)\s*$'
argv['needsAuth'] = True
InternalCmd.InternalCmd.__init__(self, 'hub', **argv)
self.commands = {
'actors': self.actors,
'commanders': self.commanders,
'restart!': self.reallyReallyRestart,
'startNubs': self.startNubs,
'stopNubs': self.stopNubs,
'actorInfo': self.actorInfo,
'commands': self.commandInfo,
'setUsername': self.setUsername,
'status': self.status,
'loadWords': self.loadWords,
'getKeys': self.getKeys,
'listen': self.doListen,
'version': self.version,
'ping': self.status,
'relog': self.relog,
}
def version(self, cmd, finish=True):
""" Return the hub's version number. """
hub.getSetHubVersion()
vString = 'version=%s' % (g.KVs.getKV('hub', 'version', default='Unknown'))
if finish:
cmd.finish(vString)
else:
cmd.inform(vString)
def doListen(self, cmd):
""" Change what replies get sent to us. """
matched, unmatched, leftovers = cmd.match([('listen', None), ('addActors', None),
('delActors', None)])
cmdr = cmd.cmdr()
if not cmdr:
cmd.fail('debug=%s' % (Misc.qstr('cmdr=%s; cmd=%s' % (cmdr, cmd))))
return
Misc.log('doListen', 'start: %s' % (cmdr.taster))
Misc.log('doListen', 'leftovers: %s' % (leftovers))
if 'addActors' in matched:
actors = list(leftovers.keys())
Misc.log('doListen', 'addActors: %s' % (actors))
# cmd.inform('text="%s"' % (Misc.qstr("adding actors: %s" % (actors))))
cmdr.taster.addToFilter(actors, [], actors)
cmd.finish()
elif 'delActors' in matched:
actors = list(leftovers.keys())
Misc.log('doListen', 'delActors: %s' % (actors))
# cmd.inform('text="%s"' % (Misc.qstr("removing actors: %s" % (actors))))
cmdr.taster.removeFromFilter(actors, [], actors)
cmd.finish()
else:
cmd.fail('text="unknown listen command"')
Misc.log('doListen', 'finish: %s' % (cmdr.taster))
def actors(self, cmd, finish=True):
""" Return a list of the currently connected actors. """
g.actors.listSelf(cmd=cmd)
if finish:
cmd.finish('')
def commanders(self, cmd, finish=True):
""" Return a list of the currently connected commanders. """
g.commanders.listSelf(cmd=cmd)
if finish:
cmd.finish('')
def status(self, cmd, finish=True):
Misc.cfg.flush()
self.version(cmd, finish=False)
self.actors(cmd, finish=False)
self.commanders(cmd, finish=False)
if finish:
cmd.finish('')
def setUsername(self, cmd):
""" Change the username for the cmd's commander. """
args = cmd.cmd.split()
args = args[1:]
if len(args) != 1:
cmd.fail('cmdError="usage: setUsername newname"')
return
username = args[0]
cmdr = cmd.cmdr()
cmdr.setName(username)
cmd.finish('')
def stopNubs(self, cmd):
""" stop a list of nubs. """
nubs = list(cmd.argDict.keys())[1:]
if len(nubs) == 0:
cmd.fail('text="must specify one or more nubs to stop..."')
return
for nub in nubs:
try:
cmd.inform('text=%s' % (Misc.qstr('stopping nub %s' % (nub))))
hub.stopNub(nub)
except Exception as e:
cmd.warn('text=%s' % (Misc.qstr('failed to stop nub %s: %s' % (nub, e))))
cmd.finish('')
def startNubs(self, cmd):
""" (re-)start a list of nubs. """
# Flush the configuration to force a reload later. This allows to change the
# configuration or nubs during runtime without restarting tron.
Misc.cfg.flush()
nubs = list(cmd.argDict.keys())[1:]
if len(nubs) == 0:
cmd.fail('text="must specify one or more nubs to start..."')
return
for nub in nubs:
try:
cmd.inform('text=%s' % (Misc.qstr('(re-)starting nub %s' % (nub))))
hub.startNub(nub)
except Exception as e:
cmd.warn('text=%s' % (Misc.qstr('failed to start nub %s: %s' % (nub, e))))
cmd.finish('')
def actorInfo(self, cmd):
""" Get gory status about a list of actor nubs. """
# Query all actors if none are specified.
names = list(cmd.argDict.keys())[1:]
if len(names) == 0:
names = list(g.actors.keys())
for n in names:
try:
nub = g.actors[n]
nub.statusCmd(cmd, doFinish=False)
except Exception as e:
cmd.warn('text=%s' % (Misc.qstr('failed to query actor %s: %s' % (n, e))))
cmd.finish('')
def commandInfo(self, cmd):
""" Get gory status about a list of actor nubs. """
# Query all actors if none are specified.
names = list(cmd.argDict.keys())[1:]
if len(names) == 0:
names = list(g.actors.keys())
for n in names:
try:
nub = g.actors[n]
nub.listCommandsCmd(cmd, doFinish=False)
except Exception as e:
cmd.warn('text=%s' % (Misc.qstr('failed to query actor %s: %s' % (n, e))))
cmd.finish('')
def loadWords(self, cmd, finish=True):
""" (re-)load an internal vocabulary word. """
words = list(cmd.argDict.keys())[1:]
if len(words) == 0:
words = None
Misc.log('hubCmd', 'loadWords loading %s' % (words))
try:
hub.loadWords(words)
except Exception as e:
Misc.tback('hub.loadWords', e)
cmd.fail('text=%s' % (Misc.qstr(e)))
return
if finish:
cmd.finish()
def getKeys(self, cmd):
""" Return a bunch of keys for a given source.
Cmd args:
src - a key source name.
keys - 1 or more key names.
"""
words = cmd.cmd.split()
if len(words) < 3:
cmd.fail('text="usage: getKeys srcName key1 [key2 ... keyN]"')
return
src = words[1]
keys = words[2:]
matched, unmatched = g.KVs.getValues(src, keys)
Misc.log('hub.getKeys', 'matched=%s unmatched=%s' % (matched, unmatched))
for k, v in matched.items():
kvString = kvAsASCII(k, v)
cmd.inform(kvString, src='hub.%s' % (src))
if unmatched:
cmd.warn('text=%s' % (Misc.qstr('unmatched %s keys: %s' %
(src, ', '.join(unmatched)))))
cmd.finish('')
def reallyReallyRestart(self, cmd):
""" Restart the entire MC. Which among other things kills us now. """
cmd.warn('text=%s' %
(Misc.qstr('Restarting the hub now... bye, bye, and please call back soon!')))
# Give the poller a chance to flush out the warning.
g.poller.callMeIn(hub.restart, 1.0)
def relog(self, cmd):
""" Change where stderr goes to. """
args = cmd.cmd.split()
args = args[1:]
if len(args) != 1:
cmd.fail('cmdError="usage: relog filename"')
return
filename = args[0]
import os
f = open(filename, 'a', 1)
os.dup2(f.fileno(), 1)
os.dup2(f.fileno(), 2)
sys.stdout = os.fdopen(1, 'w', 1)
sys.stderr = os.fdopen(2, 'w', 1)
f.close()
cmd.finish('text="Jeebus, you done it now, whatever it was"')
| 30.830258 | 95 | 0.518971 |
f72480521f1fad6394a1656241b51fbd1c7d3230 | 14,939 | py | Python | Lib/site-packages/wx-3.0-msw/wx/lib/gridmovers.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | Lib/site-packages/wx-3.0-msw/wx/lib/gridmovers.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | Lib/site-packages/wx-3.0-msw/wx/lib/gridmovers.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | #----------------------------------------------------------------------------
# Name: GridColMover.py
# Purpose: Grid Column Mover Extension
#
# Author: Gerrit van Dyk (email: gerritvd@decillion.net)
#
# Version 0.1
# Date: Nov 19, 2002
# RCS-ID: $Id$
# Licence: wxWindows license
#----------------------------------------------------------------------------
# 12/07/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 Compatability changes
#
# 12/18/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxGridColMoveEvent -> GridColMoveEvent
# o wxGridRowMoveEvent -> GridRowMoveEvent
# o wxGridColMover -> GridColMover
# o wxGridRowMover -> GridRowMover
#
import wx
import wx.grid
#----------------------------------------------------------------------------
# event class and macros
#
# New style 12/7/03
#
wxEVT_COMMAND_GRID_COL_MOVE = wx.NewEventType()
wxEVT_COMMAND_GRID_ROW_MOVE = wx.NewEventType()
EVT_GRID_COL_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_COL_MOVE, 1)
EVT_GRID_ROW_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_ROW_MOVE, 1)
#----------------------------------------------------------------------------
class GridColMoveEvent(wx.PyCommandEvent):
def __init__(self, id, dCol, bCol):
wx.PyCommandEvent.__init__(self, id = id)
self.SetEventType(wxEVT_COMMAND_GRID_COL_MOVE)
self.moveColumn = dCol
self.beforeColumn = bCol
def GetMoveColumn(self):
return self.moveColumn
def GetBeforeColumn(self):
return self.beforeColumn
class GridRowMoveEvent(wx.PyCommandEvent):
def __init__(self, id, dRow, bRow):
wx.PyCommandEvent.__init__(self,id = id)
self.SetEventType(wxEVT_COMMAND_GRID_ROW_MOVE)
self.moveRow = dRow
self.beforeRow = bRow
def GetMoveRow(self):
return self.moveRow
def GetBeforeRow(self):
return self.beforeRow
#----------------------------------------------------------------------------
# graft new methods into the wxGrid class
def _ColToRect(self,col):
if self.GetNumberRows() > 0:
rect = self.CellToRect(0,col)
else:
rect = wx.Rect()
rect.height = self.GetColLabelSize()
rect.width = self.GetColSize(col)
for cCol in range(0,col):
rect.x += self.GetColSize(cCol)
rect.y = self.GetGridColLabelWindow().GetPosition()[1]
return rect
wx.grid.Grid.ColToRect = _ColToRect
def _RowToRect(self,row):
if self.GetNumberCols() > 0:
rect = self.CellToRect(row,0)
else:
rect = wx.Rect()
rect.width = self.GetRowLabelSize()
rect.height = self.GetRowSize(row)
for cRow in range(0,row):
rect.y += self.GetRowSize(cRow)
rect.x = self.GetGridRowLabelWindow().GetPosition()[0]
return rect
wx.grid.Grid.RowToRect = _RowToRect
#----------------------------------------------------------------------------
class ColDragWindow(wx.Window):
def __init__(self,parent,image,dragCol):
wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER)
self.image = image
self.SetSize((self.image.GetWidth(),self.image.GetHeight()))
self.ux = parent.GetScrollPixelsPerUnit()[0]
self.moveColumn = dragCol
self.Bind(wx.EVT_PAINT, self.OnPaint)
def DisplayAt(self,pos,y):
x = self.GetPositionTuple()[0]
if x == pos:
self.Refresh() # Need to display insertion point
else:
self.MoveXY(pos,y)
def GetMoveColumn(self):
return self.moveColumn
def _GetInsertionInfo(self):
parent = self.GetParent()
sx = parent.GetViewStart()[0] * self.ux
sx -= parent.GetRowLabelSize()
x = self.GetPosition()[0]
w = self.GetSize()[0]
sCol = parent.XToCol(x + sx)
eCol = parent.XToCol(x + w + sx)
iPos = xPos = xCol = 99999
centerPos = x + sx + (w / 2)
for col in range(sCol,eCol + 1):
cx = parent.ColToRect(col)[0]
if abs(cx - centerPos) < iPos:
iPos = abs(cx - centerPos)
xCol = col
xPos = cx
if xCol < 0 or xCol > parent.GetNumberCols():
xCol = parent.GetNumberCols()
return (xPos - sx - x,xCol)
def GetInsertionColumn(self):
return self._GetInsertionInfo()[1]
def GetInsertionPos(self):
return self._GetInsertionInfo()[0]
def OnPaint(self,evt):
dc = wx.PaintDC(self)
w,h = self.GetSize()
dc.DrawBitmap(self.image, 0,0)
dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(0,0, w,h)
iPos = self.GetInsertionPos()
dc.DrawLine(iPos,h - 10, iPos,h)
class RowDragWindow(wx.Window):
def __init__(self,parent,image,dragRow):
wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER)
self.image = image
self.SetSize((self.image.GetWidth(),self.image.GetHeight()))
self.uy = parent.GetScrollPixelsPerUnit()[1]
self.moveRow = dragRow
self.Bind(wx.EVT_PAINT, self.OnPaint)
def DisplayAt(self,x,pos):
y = self.GetPosition()[1]
if y == pos:
self.Refresh() # Need to display insertion point
else:
self.MoveXY(x,pos)
def GetMoveRow(self):
return self.moveRow
def _GetInsertionInfo(self):
parent = self.GetParent()
sy = parent.GetViewStart()[1] * self.uy
sy -= parent.GetColLabelSize()
y = self.GetPosition()[1]
h = self.GetSize()[1]
sRow = parent.YToRow(y + sy)
eRow = parent.YToRow(y + h + sy)
iPos = yPos = yRow = 99999
centerPos = y + sy + (h / 2)
for row in range(sRow,eRow + 1):
cy = parent.RowToRect(row)[1]
if abs(cy - centerPos) < iPos:
iPos = abs(cy - centerPos)
yRow = row
yPos = cy
if yRow < 0 or yRow > parent.GetNumberRows():
yRow = parent.GetNumberRows()
return (yPos - sy - y,yRow)
def GetInsertionRow(self):
return self._GetInsertionInfo()[1]
def GetInsertionPos(self):
return self._GetInsertionInfo()[0]
def OnPaint(self,evt):
dc = wx.PaintDC(self)
w,h = self.GetSize()
dc.DrawBitmap(self.image, 0,0)
dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(0,0, w,h)
iPos = self.GetInsertionPos()
dc.DrawLine(w - 10,iPos, w,iPos)
#----------------------------------------------------------------------------
class GridColMover(wx.EvtHandler):
def __init__(self,grid):
wx.EvtHandler.__init__(self)
self.grid = grid
self.lwin = grid.GetGridColLabelWindow()
self.lwin.PushEventHandler(self)
self.colWin = None
self.ux = self.grid.GetScrollPixelsPerUnit()[0]
self.startX = -10
self.cellX = 0
self.didMove = False
self.isDragging = False
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEFT_DOWN, self.OnPress)
self.Bind(wx.EVT_LEFT_UP, self.OnRelease)
def OnMouseMove(self,evt):
if not self.isDragging:
evt.Skip()
else:
_rlSize = self.grid.GetRowLabelSize()
if abs(self.startX - evt.X) >= 3 \
and abs(evt.X - self.lastX) >= 3:
self.lastX = evt.X
self.didMove = True
sx,y = self.grid.GetViewStart()
w,h = self.lwin.GetClientSize()
x = sx * self.ux
if (evt.X + x) < x:
x = evt.X + x
elif evt.X > w:
x += evt.X - w
if x < 1: x = 0
else: x /= self.ux
if x != sx:
if wx.Platform == '__WXMSW__':
self.colWin.Show(False)
self.grid.Scroll(x,y)
x,y = self.lwin.ClientToScreenXY(evt.X,0)
x,y = self.grid.ScreenToClientXY(x,y)
if not self.colWin.IsShown():
self.colWin.Show(True)
px = x - self.cellX
if px < 0 + _rlSize: px = 0 + _rlSize
if px > w - self.colWin.GetSize()[0] + _rlSize:
px = w - self.colWin.GetSize()[0] + _rlSize
self.colWin.DisplayAt(px,y)
return
def OnPress(self,evt):
self.startX = self.lastX = evt.X
_rlSize = self.grid.GetRowLabelSize()
sx = self.grid.GetViewStart()[0] * self.ux
sx -= _rlSize
px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y)
px,py = self.grid.ScreenToClientXY(px,py)
if self.grid.XToEdgeOfCol(px + sx) != wx.NOT_FOUND:
evt.Skip()
return
self.isDragging = True
self.didMove = False
col = self.grid.XToCol(px + sx)
rect = self.grid.ColToRect(col)
self.cellX = px + sx - rect.x
size = self.lwin.GetSize()
rect.y = 0
rect.x -= sx + _rlSize
rect.height = size[1]
colImg = self._CaptureImage(rect)
self.colWin = ColDragWindow(self.grid,colImg,col)
self.colWin.Show(False)
self.lwin.CaptureMouse()
evt.Skip()
def OnRelease(self,evt):
if self.isDragging:
self.lwin.ReleaseMouse()
self.colWin.Show(False)
self.isDragging = False
if not self.didMove:
px = self.lwin.ClientToScreenXY(self.startX,0)[0]
px = self.grid.ScreenToClientXY(px,0)[0]
sx = self.grid.GetViewStart()[0] * self.ux
sx -= self.grid.GetRowLabelSize()
col = self.grid.XToCol(px+sx)
if col != wx.NOT_FOUND:
self.grid.SelectCol(col,evt.ControlDown())
return
else:
bCol = self.colWin.GetInsertionColumn()
dCol = self.colWin.GetMoveColumn()
wx.PostEvent(self,
GridColMoveEvent(self.grid.GetId(), dCol, bCol))
self.colWin.Destroy()
evt.Skip()
def _CaptureImage(self,rect):
bmp = wx.EmptyBitmap(rect.width,rect.height)
memdc = wx.MemoryDC()
memdc.SelectObject(bmp)
dc = wx.WindowDC(self.lwin)
memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y)
memdc.SelectObject(wx.NullBitmap)
return bmp
class GridRowMover(wx.EvtHandler):
def __init__(self,grid):
wx.EvtHandler.__init__(self)
self.grid = grid
self.lwin = grid.GetGridRowLabelWindow()
self.lwin.PushEventHandler(self)
self.rowWin = None
self.uy = self.grid.GetScrollPixelsPerUnit()[1]
self.startY = -10
self.cellY = 0
self.didMove = False
self.isDragging = False
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEFT_DOWN, self.OnPress)
self.Bind(wx.EVT_LEFT_UP, self.OnRelease)
def OnMouseMove(self,evt):
if not self.isDragging:
evt.Skip()
else:
_clSize = self.grid.GetColLabelSize()
if abs(self.startY - evt.Y) >= 3 \
and abs(evt.Y - self.lastY) >= 3:
self.lastY = evt.Y
self.didMove = True
x,sy = self.grid.GetViewStart()
w,h = self.lwin.GetClientSizeTuple()
y = sy * self.uy
if (evt.Y + y) < y:
y = evt.Y + y
elif evt.Y > h:
y += evt.Y - h
if y < 1:
y = 0
else:
y /= self.uy
if y != sy:
if wx.Platform == '__WXMSW__':
self.rowWin.Show(False)
self.grid.Scroll(x,y)
x,y = self.lwin.ClientToScreenXY(0,evt.Y)
x,y = self.grid.ScreenToClientXY(x,y)
if not self.rowWin.IsShown():
self.rowWin.Show(True)
py = y - self.cellY
if py < 0 + _clSize:
py = 0 + _clSize
if py > h - self.rowWin.GetSize()[1] + _clSize:
py = h - self.rowWin.GetSize()[1] + _clSize
self.rowWin.DisplayAt(x,py)
return
def OnPress(self,evt):
self.startY = self.lastY = evt.Y
_clSize = self.grid.GetColLabelSize()
sy = self.grid.GetViewStart()[1] * self.uy
sy -= _clSize
px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y)
px,py = self.grid.ScreenToClientXY(px,py)
if self.grid.YToEdgeOfRow(py + sy) != wx.NOT_FOUND:
evt.Skip()
return
row = self.grid.YToRow(py + sy)
if row == wx.NOT_FOUND:
evt.Skip()
return
self.isDragging = True
self.didMove = False
rect = self.grid.RowToRect(row)
self.cellY = py + sy - rect.y
size = self.lwin.GetSize()
rect.x = 0
rect.y -= sy + _clSize
rect.width = size[0]
rowImg = self._CaptureImage(rect)
self.rowWin = RowDragWindow(self.grid,rowImg,row)
self.rowWin.Show(False)
self.lwin.CaptureMouse()
evt.Skip()
def OnRelease(self,evt):
if self.isDragging:
self.lwin.ReleaseMouse()
self.rowWin.Show(False)
self.isDragging = False
if not self.didMove:
py = self.lwin.ClientToScreenXY(0,self.startY)[1]
py = self.grid.ScreenToClientXY(0,py)[1]
sy = self.grid.GetViewStart()[1] * self.uy
sy -= self.grid.GetColLabelSize()
row = self.grid.YToRow(py + sy)
if row != wx.NOT_FOUND:
self.grid.SelectRow(row,evt.ControlDown())
return
else:
bRow = self.rowWin.GetInsertionRow()
dRow = self.rowWin.GetMoveRow()
wx.PostEvent(self,
GridRowMoveEvent(self.grid.GetId(), dRow, bRow))
self.rowWin.Destroy()
evt.Skip()
def _CaptureImage(self,rect):
bmp = wx.EmptyBitmap(rect.width,rect.height)
memdc = wx.MemoryDC()
memdc.SelectObject(bmp)
dc = wx.WindowDC(self.lwin)
memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y)
memdc.SelectObject(wx.NullBitmap)
return bmp
#----------------------------------------------------------------------------
| 30.240891 | 77 | 0.522391 |
f7248af32f72c111effbd60171246b9815ed3cb7 | 368 | py | Python | sol_runner.py | Square789/AoC | 041aecb9e1a06b5417bdef0eb0ab70a542be04b5 | [
"MIT"
] | 3 | 2020-12-05T17:43:51.000Z | 2020-12-06T10:37:29.000Z | sol_runner.py | Square789/AoC | 041aecb9e1a06b5417bdef0eb0ab70a542be04b5 | [
"MIT"
] | null | null | null | sol_runner.py | Square789/AoC | 041aecb9e1a06b5417bdef0eb0ab70a542be04b5 | [
"MIT"
] | null | null | null | import importlib
import sys
from aoc_input import get_input
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Specify which file to run! [year, day]")
sys.exit()
try:
year = int(sys.argv[1])
day = int(sys.argv[2])
except ValueError:
print("Integer required!")
sys.exit()
module = importlib.import_module(f"y{year}.d{day:>02}")
module.main()
| 18.4 | 56 | 0.673913 |
f7248ee621042e30291d461ffdf3dcab8f265bba | 106,231 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machines_operations import build_assess_patches_request_initial, build_capture_request_initial, build_convert_to_managed_disks_request_initial, build_create_or_update_request_initial, build_deallocate_request_initial, build_delete_request_initial, build_generalize_request, build_get_request, build_install_patches_request_initial, build_instance_view_request, build_list_all_request, build_list_available_sizes_request, build_list_by_location_request, build_list_request, build_perform_maintenance_request_initial, build_power_off_request_initial, build_reapply_request_initial, build_redeploy_request_initial, build_reimage_request_initial, build_restart_request_initial, build_retrieve_boot_diagnostics_data_request, build_run_command_request_initial, build_simulate_eviction_request, build_start_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations:
"""VirtualMachinesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_location(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Gets all the virtual machines under the specified subscription for the specified location.
:param location: The location for which virtual machines under the subscription are queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list_by_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines'} # type: ignore
async def _capture_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineCaptureResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineCaptureResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
request = build_capture_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._capture_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
@distributed_trace_async
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineCaptureResult"]:
"""Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used
to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._capture_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachine')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
"""The operation to create or update a virtual machine. Please note some properties can be set
only during virtual machine creation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachineUpdate')
request = build_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
"""The operation to update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Update Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
force_deletion=force_deletion,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param force_deletion: Optional parameter to force delete virtual machines.
:type force_deletion: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
force_deletion=force_deletion,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_name: str,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> "_models.VirtualMachine":
"""Retrieves information about the model view or the instance view of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' retrieves a
snapshot of the runtime properties of the virtual machine that is managed by the platform and
can change outside of control plane operations. 'UserData' retrieves the UserData property as
part of the VM model view that was provided by the user during the VM Create/Update operation.
:type expand: str or ~azure.mgmt.compute.v2021_04_01.models.InstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachine, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def instance_view(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> "_models.VirtualMachineInstanceView":
"""Retrieves information about the run-time state of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstanceView"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_instance_view_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView'} # type: ignore
async def _convert_to_managed_disks_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_convert_to_managed_disks_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._convert_to_managed_disks_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_convert_to_managed_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore
@distributed_trace_async
async def begin_convert_to_managed_disks(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Converts virtual machine disks from blob-based to managed disks. Virtual machine must be
stop-deallocated before invoking this operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._convert_to_managed_disks_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_convert_to_managed_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore
async def _deallocate_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_deallocate_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._deallocate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def begin_deallocate(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Shuts down the virtual machine and releases the compute resources. You are not billed for the
compute resources that this virtual machine uses.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._deallocate_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def generalize(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
"""Sets the OS state of the virtual machine to generalized. It is recommended to sysprep the
virtual machine before performing this operation. :code:`<br>`For Windows, please refer to
`Create a managed image of a generalized VM in Azure
<https://docs.microsoft.com/azure/virtual-machines/windows/capture-image-resource>`_.:code:`<br>`For
Linux, please refer to `How to create an image of a virtual machine or VHD
<https://docs.microsoft.com/azure/virtual-machines/linux/capture-image>`_.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generalize_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.generalize.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
generalize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified resource group. Use the nextLink property in
the response to get the next page of virtual machines.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_all(
self,
status_only: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified subscription. Use the nextLink property in
the response to get the next page of virtual machines.
:param status_only: statusOnly=true enables fetching run time status of all Virtual Machines in
the subscription.
:type status_only: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=self.list_all.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes to which the specified virtual machine can be
resized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'} # type: ignore
async def _power_off_initial(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_power_off_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
skip_shutdown=skip_shutdown,
template_url=self._power_off_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
@distributed_trace_async
async def begin_power_off(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to power off (stop) a virtual machine. The virtual machine can be restarted with
the same provisioned resources. You are still charged for this virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param skip_shutdown: The parameter to request non-graceful VM shutdown. True value for this
flag indicates non-graceful shutdown whereas false indicates otherwise. Default value for this
flag is false if not specified.
:type skip_shutdown: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._power_off_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
skip_shutdown=skip_shutdown,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
async def _reapply_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reapply_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._reapply_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reapply_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore
@distributed_trace_async
async def begin_reapply(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to reapply a virtual machine's state.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reapply_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reapply.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore
async def _restart_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
@distributed_trace_async
async def begin_restart(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to restart a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
@distributed_trace_async
async def begin_start(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to start a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
async def _redeploy_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_redeploy_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._redeploy_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
@distributed_trace_async
async def begin_redeploy(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Shuts down the virtual machine, moves it to a new node, and powers it back on.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._redeploy_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
async def _reimage_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'VirtualMachineReimageParameters')
else:
_json = None
request = build_reimage_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore
@distributed_trace_async
async def begin_reimage(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reimages the virtual machine which has an ephemeral OS disk back to its initial state.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Reimage Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineReimageParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reimage_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore
@distributed_trace_async
async def retrieve_boot_diagnostics_data(
self,
resource_group_name: str,
vm_name: str,
sas_uri_expiration_time_in_minutes: Optional[int] = None,
**kwargs: Any
) -> "_models.RetrieveBootDiagnosticsDataResult":
"""The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param sas_uri_expiration_time_in_minutes: Expiration duration in minutes for the SAS URIs with
a value between 1 to 1440 minutes. :code:`<br>`:code:`<br>`NOTE: If not specified, SAS URIs
will be generated with a default expiration duration of 120 minutes.
:type sas_uri_expiration_time_in_minutes: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RetrieveBootDiagnosticsDataResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.RetrieveBootDiagnosticsDataResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RetrieveBootDiagnosticsDataResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_boot_diagnostics_data_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
sas_uri_expiration_time_in_minutes=sas_uri_expiration_time_in_minutes,
template_url=self.retrieve_boot_diagnostics_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RetrieveBootDiagnosticsDataResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_boot_diagnostics_data.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData'} # type: ignore
async def _perform_maintenance_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_perform_maintenance_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._perform_maintenance_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_perform_maintenance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore
@distributed_trace_async
async def begin_perform_maintenance(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to perform maintenance on a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._perform_maintenance_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_perform_maintenance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore
@distributed_trace_async
async def simulate_eviction(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
"""The operation to simulate the eviction of spot virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_simulate_eviction_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.simulate_eviction.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
simulate_eviction.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction'} # type: ignore
async def _assess_patches_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.VirtualMachineAssessPatchesResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineAssessPatchesResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_assess_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._assess_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_assess_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore
@distributed_trace_async
async def begin_assess_patches(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineAssessPatchesResult"]:
"""Assess patches on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineAssessPatchesResult or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineAssessPatchesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineAssessPatchesResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._assess_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_assess_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore
async def _install_patches_initial(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineInstallPatchesResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineInstallPatchesResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(install_patches_input, 'VirtualMachineInstallPatchesParameters')
request = build_install_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._install_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_install_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore
@distributed_trace_async
async def begin_install_patches(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineInstallPatchesResult"]:
"""Installs patches on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param install_patches_input: Input for InstallPatches as directly received by the API.
:type install_patches_input:
~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineInstallPatchesResult
or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstallPatchesResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._install_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
install_patches_input=install_patches_input,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_install_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore
async def _run_command_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> Optional["_models.RunCommandResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.RunCommandResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RunCommandInput')
request = build_run_command_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._run_command_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_run_command_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore
@distributed_trace_async
async def begin_run_command(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> AsyncLROPoller["_models.RunCommandResult"]:
"""Run command on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Run command operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.RunCommandInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.RunCommandResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunCommandResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._run_command_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_run_command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore
| 46.167319 | 873 | 0.668468 |
f724bec965759ccd317b2b385268f2ab47cb4ab2 | 1,838 | py | Python | scripts/convert_protocols_to_exams.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | null | null | null | scripts/convert_protocols_to_exams.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | 1 | 2022-02-17T20:28:19.000Z | 2022-02-17T20:28:19.000Z | scripts/convert_protocols_to_exams.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
from pathlib import Path
def get_valid_file_path(file_path: str) -> Path:
"""Check if file exists and return valid Path object"""
path = Path(file_path).resolve()
if not path.is_file():
raise Exception("No file found! Please check your path and try again.")
return path
def convert_data(data: list) -> list:
"""Convert fixture to new format"""
print(f"Found {len(data)} entries, updating ... ", end='')
for item in data:
item['model'] = 'exams.exam'
fields: dict = item['fields']
fields['minute_author'] = fields.pop('author')
fields['minute_file'] = fields.pop('file')
fields['submitted_on'] = fields.pop('submitted')
fields['is_archived'] = False
print('Done!')
return data
def get_valid_folder_path(folder_path: str) -> Path:
"""Check if folder exists and return valid Path object"""
path = Path(folder_path).resolve()
if not path.parent.is_dir():
raise Exception("No folder found! Please check your path and try again.")
return path
def main():
"""Main entry-point for script"""
source = input("Please specify a file path where the dump file can be found.\n> ")
path = get_valid_file_path(source)
data: list = json.loads(path.read_text())
data = convert_data(data)
destination = input("Please specify a folder path where the new dump file should be stored.\n> ")
path = get_valid_folder_path(destination)
file = path / 'exams.json'
if file.exists():
raise Exception("File 'exams.json' already exists! Please move or delete the existing file first.")
else:
(path / 'exams.json').write_text(json.dumps(data, ensure_ascii=False))
print("New file 'exams.json' created!")
if __name__ == '__main__':
main()
| 31.689655 | 107 | 0.654516 |
f724c9e936f9b464bc9ef938bd84202c5c01e1e8 | 6,935 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import ApplicationGatewaysOperations
from ._operations import ApplicationSecurityGroupsOperations
from ._operations import AvailableDelegationsOperations
from ._operations import AvailableResourceGroupDelegationsOperations
from ._operations import AzureFirewallsOperations
from ._operations import AzureFirewallFqdnTagsOperations
from ._operations import NetworkManagementClientOperationsMixin
from ._operations import DdosProtectionPlansOperations
from ._operations import AvailableEndpointServicesOperations
from ._operations import ExpressRouteCircuitAuthorizationsOperations
from ._operations import ExpressRouteCircuitPeeringsOperations
from ._operations import ExpressRouteCircuitConnectionsOperations
from ._operations import ExpressRouteCircuitsOperations
from ._operations import ExpressRouteServiceProvidersOperations
from ._operations import ExpressRouteCrossConnectionsOperations
from ._operations import ExpressRouteCrossConnectionPeeringsOperations
from ._operations import ExpressRouteGatewaysOperations
from ._operations import ExpressRouteConnectionsOperations
from ._operations import ExpressRoutePortsLocationsOperations
from ._operations import ExpressRoutePortsOperations
from ._operations import ExpressRouteLinksOperations
from ._operations import InterfaceEndpointsOperations
from ._operations import LoadBalancersOperations
from ._operations import LoadBalancerBackendAddressPoolsOperations
from ._operations import LoadBalancerFrontendIPConfigurationsOperations
from ._operations import InboundNatRulesOperations
from ._operations import LoadBalancerLoadBalancingRulesOperations
from ._operations import LoadBalancerOutboundRulesOperations
from ._operations import LoadBalancerNetworkInterfacesOperations
from ._operations import LoadBalancerProbesOperations
from ._operations import NetworkInterfacesOperations
from ._operations import NetworkInterfaceIPConfigurationsOperations
from ._operations import NetworkInterfaceLoadBalancersOperations
from ._operations import NetworkInterfaceTapConfigurationsOperations
from ._operations import NetworkProfilesOperations
from ._operations import NetworkSecurityGroupsOperations
from ._operations import SecurityRulesOperations
from ._operations import DefaultSecurityRulesOperations
from ._operations import NetworkWatchersOperations
from ._operations import PacketCapturesOperations
from ._operations import ConnectionMonitorsOperations
from ._operations import Operations
from ._operations import PublicIPAddressesOperations
from ._operations import PublicIPPrefixesOperations
from ._operations import RouteFiltersOperations
from ._operations import RouteFilterRulesOperations
from ._operations import RouteTablesOperations
from ._operations import RoutesOperations
from ._operations import BgpServiceCommunitiesOperations
from ._operations import ServiceEndpointPoliciesOperations
from ._operations import ServiceEndpointPolicyDefinitionsOperations
from ._operations import UsagesOperations
from ._operations import VirtualNetworksOperations
from ._operations import SubnetsOperations
from ._operations import VirtualNetworkPeeringsOperations
from ._operations import VirtualNetworkTapsOperations
from ._operations import VirtualNetworkGatewaysOperations
from ._operations import VirtualNetworkGatewayConnectionsOperations
from ._operations import LocalNetworkGatewaysOperations
from ._operations import VirtualWansOperations
from ._operations import VpnSitesOperations
from ._operations import VpnSitesConfigurationOperations
from ._operations import VirtualHubsOperations
from ._operations import HubVirtualNetworkConnectionsOperations
from ._operations import VpnGatewaysOperations
from ._operations import VpnConnectionsOperations
from ._operations import P2SVpnServerConfigurationsOperations
from ._operations import P2SVpnGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'ApplicationSecurityGroupsOperations',
'AvailableDelegationsOperations',
'AvailableResourceGroupDelegationsOperations',
'AzureFirewallsOperations',
'AzureFirewallFqdnTagsOperations',
'NetworkManagementClientOperationsMixin',
'DdosProtectionPlansOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitConnectionsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'ExpressRouteCrossConnectionsOperations',
'ExpressRouteCrossConnectionPeeringsOperations',
'ExpressRouteGatewaysOperations',
'ExpressRouteConnectionsOperations',
'ExpressRoutePortsLocationsOperations',
'ExpressRoutePortsOperations',
'ExpressRouteLinksOperations',
'InterfaceEndpointsOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerOutboundRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkInterfaceTapConfigurationsOperations',
'NetworkProfilesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'ConnectionMonitorsOperations',
'Operations',
'PublicIPAddressesOperations',
'PublicIPPrefixesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'ServiceEndpointPoliciesOperations',
'ServiceEndpointPolicyDefinitionsOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkTapsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
'VirtualWansOperations',
'VpnSitesOperations',
'VpnSitesConfigurationOperations',
'VirtualHubsOperations',
'HubVirtualNetworkConnectionsOperations',
'VpnGatewaysOperations',
'VpnConnectionsOperations',
'P2SVpnServerConfigurationsOperations',
'P2SVpnGatewaysOperations',
]
| 46.858108 | 94 | 0.839366 |
f724ded074f8fa3a1a1d5041388c8593fb112856 | 924 | py | Python | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | 2 | 2021-07-05T12:00:39.000Z | 2021-07-05T12:00:49.000Z | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | null | null | null | cogs/slashes.py | mallusrgreatv2/PyHDISCORD | e414976441cbdb3a57b2c545ab164810bebe2e4b | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.ext.commands import cog
import discord_slash
from discord_slash import cog_ext
class Slashes(commands.Cog):
def __init__(self, client) -> None:
self.client: commands.Bot = client
@commands.Cog.listener()
async def on_ready(self):
print(f"[ {self.__class__.__name__} Cog Loaded ]")
@cog_ext.cog_slash(name = "ping", guild_ids=[853316413649190912], description="Bot's latency")
async def ping(self, ctx):
await ctx.send("Pong! {}".format(str(round(self.client.latency))+"ms"))
@cog_ext.cog_slash(name="say", description="say something with the bot", guild_ids=[853316413649190912])
async def say(ctx: discord_slash.SlashContext, *, text: str):
if '@' in text:
await ctx.send("no")
return
await ctx.send(text)
def setup(client):
client.add_cog(Slashes(client)) | 35.538462 | 108 | 0.676407 |
f7254891c728997635a95c7943f2f2e7d783a797 | 14,518 | py | Python | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/syncrep/test_basic.py | rodel-talampas/gpdb | 9c955e350334abbd922102f289f782697eb52069 | [
"PostgreSQL",
"Apache-2.0"
] | 9 | 2018-04-20T03:31:01.000Z | 2020-05-13T14:10:53.000Z | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/syncrep/test_basic.py | rodel-talampas/gpdb | 9c955e350334abbd922102f289f782697eb52069 | [
"PostgreSQL",
"Apache-2.0"
] | 36 | 2017-09-21T09:12:27.000Z | 2020-06-17T16:40:48.000Z | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/syncrep/test_basic.py | rodel-talampas/gpdb | 9c955e350334abbd922102f289f782697eb52069 | [
"PostgreSQL",
"Apache-2.0"
] | 32 | 2017-08-31T12:50:52.000Z | 2022-03-01T07:34:53.000Z | #!/usr/bin/env python
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tinctest import logger
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
from gppylib.db import dbconn
from mpp.gpdb.tests.storage.walrepl.run import StandbyRunMixin
from mpp.gpdb.tests.storage import walrepl
from mpp.gpdb.tests.storage.walrepl.lib.walcomm import *
from mpp.gpdb.tests.storage.walrepl.lib import PgControlData
from gppylib.commands.base import Command
import os
import re
import select
import signal
import subprocess
import time
import sys
class syncrep(StandbyRunMixin, MPPTestCase):
def generate_trigger_file(self, content):
filename = 'wal_rcv_test'
self.assertTrue(content is not None)
filepath = os.path.join(self.standby.datadir, filename)
with open(filepath, 'wb') as f:
f.write(content)
def wait_stdout(self, proc, timeout):
rlist = [proc.stdout.fileno()]
(rout, _, _) = select.select(rlist, [], [], timeout)
return len(rout) > 0
def set_guc(self, guc_name, guc_value):
logger.info('Configuring ' + guc_name +' ...')
cmd = Command("gpconfig " + guc_name,
"gpconfig -c " + guc_name + " -v " + guc_value)
cmd.run()
self.assertEqual(cmd.get_results().rc, 0, str(cmd))
logger.info('gpstop -u to reload config files...')
cmd = Command("gpstop -u",
"gpstop -u")
cmd.run()
self.assertEqual(cmd.get_results().rc, 0, str(cmd))
def test_syncrep(self):
# 1. Initiate the Standby
# 2. Once the WAL receiver starts, signal it to suspend post xlog flush
# but before sending the ack.
# 3. Now execute a transaction and commit it. The backend is expected
# be blocked.
# 4. Resume the WALReceiver and see the transaction passed and its
# results are visible.
# cleanup
PSQL.run_sql_command('DROP table if exists foo')
# 1. create standby and start
res = self.standby.create()
self.assertEqual(res, 0)
res = self.standby.start()
self.assertTrue(res.wasSuccessful())
# wait for the walreceiver to start
num_walsender = self.wait_for_walsender()
self.assertEqual(num_walsender, 1)
# 2. Once the WAL receiver starts, signal it to suspend post xlog flush
# but before sending the ack.
proc = subprocess.Popen(['ps', '-ef'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
search = "wal receiver process"
for line in stdout.split('\n'):
if (line.find(search) > 0):
split_line = re.split(r'\s+', line.strip())
break
self.assertTrue(len(split_line) > 0)
wal_rcv_pid = int(split_line[1])
logger.info('Suspending WAL Receiver(' + str(wal_rcv_pid) +')...')
self.generate_trigger_file('wait_before_send_ack')
os.kill(wal_rcv_pid, signal.SIGUSR2)
# 3. Now execute a transaction and commit it. The backend is expected
# be blocked.
logger.info('Create table foo...')
# we use subprocess since we expect it'll be blocked.
proc = subprocess.Popen(['psql', '-c', 'create table foo (a int)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
readable = self.wait_stdout(proc, 5.0)
self.assertFalse(readable, 'psql did not block')
# 4. Resume the WALReceiver and see the transaction passed and its
# results are visible.
logger.info('Resume the WAL Receiver...')
self.generate_trigger_file('resume')
os.kill(wal_rcv_pid, signal.SIGUSR2)
readable = self.wait_stdout(proc, 5.0)
self.assertTrue(readable, 'psql still blocks')
proc.communicate()
logger.info('No blocked backend found!')
logger.info('Verifying if table exists ? ...')
PSQL(sql_cmd='select * from foo').run(validateAfter=True)
logger.info('Pass')
def test_unblock_while_catchup_out_of_range(self):
"""
This test verifies if a backend gets blocked in case
the WAL sender is still in catchup mode.
"""
with WalClient("replication=true") as client:
(sysid, tli, xpos) = client.identify_system()
sql_startup = "SELECT count(*) FROM pg_stat_replication where state = 'startup'"
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
curs = dbconn.execSQL(conn, sql_startup)
results = curs.fetchall()
self.assertEqual(int(results[0][0]), 1, "No WAL sender in startup phase found")
logger.info('WAL sender is alive and now is in startup phase...')
# Generate enough xlog in WAL sender startup phase. None of the sql statements
# should get blocked. If blocked we have some issue.
# Checkpointing causes full page writes on updates/inserts. Hence helps
# xlog generation.
i = 0
logger.info('Running a bunch of SQLs to generate enough xlog to maintain catchup phase...')
while (i < 10):
PSQL.run_sql_command('DROP TABLE IF EXISTS foo; CREATE TABLE foo(a int, b int); CHECKPOINT;')
i = i + 1
logger.info('Pass - Database does not block if WAL sender is alive and in startup phase')
logger.info('Creating some xlog seg files to simulate catchup out-of-range..')
i = 0
while(i < 3):
PSQL.run_sql_command('select pg_switch_xlog();select pg_switch_xlog();checkpoint;')
i = i + 1
xpos_ptr = XLogRecPtr.from_string(xpos)
client.start_replication(xpos_ptr)
while True:
msg = client.receive(1000)
if isinstance(msg, WalMessageData):
header = msg.header
# walsender must be still in catchup phase as a lot xlog needs to be sent
sql_catchup = "SELECT count(*) FROM pg_stat_replication where state = 'catchup'"
sql_table_present = "SELECT count(*) from pg_class where relname = 'foo'"
sql_bkd_count = ("SELECT count(*) from pg_stat_activity where waiting ='t' and waiting_reason = 'replication'")
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
curs = dbconn.execSQL(conn, sql_catchup)
results = curs.fetchall()
self.assertEqual(int(results[0][0]), 1, "No Catchup WAL sender found")
logger.info('WAL sender is alive and now is in catchup phase...')
logger.info('In catchup phase, run some sql...')
PSQL.run_sql_command('DROP TABLE IF EXISTS foo; CREATE TABLE foo(a int, b int);'
,dbname='postgres')
while (i < 5):
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
# verify if the previous backend is blocked
curs = dbconn.execSQL(conn, sql_bkd_count)
results = curs.fetchall()
if (int(results[0][0]) > 0):
self.assertTrue(0, "Previous backend was blocked ...")
i = i + 1
logger.info('Create table is NOT blocked...')
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
# verify if WAL sender is still in catchup phase
curs = dbconn.execSQL(conn, sql_catchup)
results = curs.fetchall()
self.assertEqual(int(results[0][0]), 1,
"WAL sender catchup phase over before verification")
logger.info('WAL sender is alive and still in catchup phase...')
with dbconn.connect(dbconn.DbURL(dbname='postgres'), utility=True) as conn:
# verify if WAL sender is still in catchup phase
curs = dbconn.execSQL(conn, sql_table_present)
results = curs.fetchall()
self.assertEqual(int(results[0][0]), 1, "Table foo not found")
# sync replication needs a reply otherwise backend blocks
client.reply(header.walEnd, header.walEnd, header.walEnd)
# success, should get some 'w' message
logger.info ("Pass - Database does not block if WAL sender is alive and "
"the catchup is out-of-range")
break
elif isinstance(msg, WalMessageNoData):
# could be timeout
client.reply(xpos_ptr, xpos_ptr, xpos_ptr)
else:
raise StandardError(msg.errmsg)
def test_block_while_catchup_within_range(self):
"""
This test verifies if a backend gets blocked in case
the WAL sender is still in catchup mode.
"""
with WalClient("replication=true") as client:
(sysid, tli, xpos) = client.identify_system()
# Set the guc to > 1 so that we can verify the test
# using less amount of xlog
self.set_guc('repl_catchup_within_range', '3')
# Generate enough xlog in WAL sender startup phase. None of the sql statements
# should get blocked. If blocked we have some issue.
# Checkpointing causes full page writes on updates/inserts. Hence helps
# xlog generation.
i = 0
logger.info('Running a bunch of SQLs to generate enough xlog to maintain catchup phase...')
while (i < 10):
PSQL.run_sql_command('DROP TABLE IF EXISTS foo; CREATE TABLE foo(a int, b int); CHECKPOINT;')
i = i + 1
xpos_ptr = XLogRecPtr.from_string(xpos)
client.start_replication(xpos_ptr)
while True:
msg = client.receive(1000)
if isinstance(msg, WalMessageData):
header = msg.header
# walsender must be still in catchup phase as a lot xlog needs to be sent
sql_catchup = "SELECT count(*) FROM pg_stat_replication where state = 'catchup'"
sql_table_present = "SELECT count(*) from pg_class where relname = 'foo'"
sql_bkd_count = ("SELECT count(*) from pg_stat_activity where waiting ='t' and waiting_reason = 'replication'")
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
curs = dbconn.execSQL(conn, sql_catchup)
results = curs.fetchall()
self.assertEqual(int(results[0][0]), 1, "No Catchup WAL sender found")
logger.info('WAL sender is alive and now is in catchup phase...')
logger.info('In catchup phase, create table...')
subprocess.Popen(['psql', '-c',
'DROP TABLE IF EXISTS raghav; create table raghav (a int);'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
# verify if WAL sender is still in catchup phase
curs = dbconn.execSQL(conn, sql_catchup)
results = curs.fetchall()
self.assertEqual(int(results[0][0]), 1,
"WAL sender catchup phase over before verification")
logger.info('WAL sender is alive, still in catchup phase ..')
while (i < 5):
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
# verify if the previous backend is blocked
curs = dbconn.execSQL(conn, sql_bkd_count)
results = curs.fetchall()
if (int(results[0][0]) == 1):
break;
if (i == 4):
self.assertTrue(0, "Previous backend not blocked ...")
i = i + 1
logger.info('But, create table is blocked...')
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
# verify if WAL sender is still in catchup phase
curs = dbconn.execSQL(conn, sql_catchup)
results = curs.fetchall()
self.assertEqual(int(results[0][0]), 1,
"WAL sender catchup phase over before verification")
logger.info('WAL sender is alive, in catchup phase and backend is blocked...')
# sync replication needs a reply otherwise backend blocks
client.reply(header.walEnd, header.walEnd, header.walEnd)
# success, should get some 'w' message
logger.info ("Pass - Backends block if WAL sender is alive and the catchup is within-range")
break
elif isinstance(msg, WalMessageNoData):
# could be timeout
client.reply(xpos_ptr, xpos_ptr, xpos_ptr)
else:
raise StandardError(msg.errmsg)
logger.info ("Pass")
self.set_guc('repl_catchup_within_range', '1')
| 44.533742 | 131 | 0.564403 |
f7256728eb65c78928992820c0d53c79800f694d | 483 | py | Python | app/api/migrations/0002_auto_20210201_1602.py | ingjavierpinilla/magentrack-test | 4b5ee34aafbe85c4f536ceafd5efdc9271a26008 | [
"MIT"
] | null | null | null | app/api/migrations/0002_auto_20210201_1602.py | ingjavierpinilla/magentrack-test | 4b5ee34aafbe85c4f536ceafd5efdc9271a26008 | [
"MIT"
] | null | null | null | app/api/migrations/0002_auto_20210201_1602.py | ingjavierpinilla/magentrack-test | 4b5ee34aafbe85c4f536ceafd5efdc9271a26008 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-01 16:02
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 1, 16, 2, 48, 685488, tzinfo=utc)),
),
]
| 23 | 109 | 0.625259 |
f7257ab79e200ce2c0c75e0ae6d7b38cf586e521 | 4,649 | py | Python | ee/clickhouse/queries/paths/path_event_query.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | null | null | null | ee/clickhouse/queries/paths/path_event_query.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | null | null | null | ee/clickhouse/queries/paths/path_event_query.py | thinhnguyenuit/posthog | 4758e66790485587d29a617174158d07341342f8 | [
"MIT"
] | null | null | null | from typing import Any, Dict, Tuple
from ee.clickhouse.models.property import get_property_string_expr
from ee.clickhouse.queries.event_query import ClickhouseEventQuery
from posthog.constants import AUTOCAPTURE_EVENT, PAGEVIEW_EVENT, SCREEN_EVENT
from posthog.models.filters.path_filter import PathFilter
class PathEventQuery(ClickhouseEventQuery):
FUNNEL_PERSONS_ALIAS = "funnel_persons"
_filter: PathFilter
def __init__(
self,
filter: PathFilter,
team_id: int,
round_interval=False,
should_join_distinct_ids=False,
should_join_persons=False,
**kwargs,
) -> None:
super().__init__(filter, team_id, round_interval, should_join_distinct_ids, should_join_persons, **kwargs)
def get_query(self) -> Tuple[str, Dict[str, Any]]:
# TODO: ColumnOptimizer with options like self._filter.include_pageviews, self._filter.include_screenviews,
funnel_paths_timestamp = ""
funnel_paths_join = ""
funnel_paths_filter = ""
if self._filter.funnel_paths:
funnel_paths_timestamp = f"{self.FUNNEL_PERSONS_ALIAS}.timestamp as min_timestamp"
funnel_paths_join = f"JOIN {self.FUNNEL_PERSONS_ALIAS} ON {self.FUNNEL_PERSONS_ALIAS}.person_id = {self.DISTINCT_ID_TABLE_ALIAS}.person_id"
funnel_paths_filter = f"AND {self.EVENT_TABLE_ALIAS}.timestamp >= min_timestamp"
_fields = [
f"{self.EVENT_TABLE_ALIAS}.timestamp AS timestamp",
(
f"if(event = '{SCREEN_EVENT}', {self._get_screen_name_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{PAGEVIEW_EVENT}', {self._get_current_url_parsing()}, "
f"if({self.EVENT_TABLE_ALIAS}.event = '{AUTOCAPTURE_EVENT}', concat('autocapture:', {self.EVENT_TABLE_ALIAS}.elements_chain), "
f"{self.EVENT_TABLE_ALIAS}.event))) AS path_item"
),
f"{self.DISTINCT_ID_TABLE_ALIAS}.person_id as person_id" if self._should_join_distinct_ids else "",
funnel_paths_timestamp,
]
_fields = list(filter(None, _fields))
date_query, date_params = self._get_date_filter()
self.params.update(date_params)
prop_filters = self._filter.properties
prop_query, prop_params = self._get_props(prop_filters)
self.params.update(prop_params)
event_query, event_params = self._get_event_query()
self.params.update(event_params)
query = f"""
SELECT {','.join(_fields)} FROM events {self.EVENT_TABLE_ALIAS}
{self._get_disintct_id_query()}
{self._get_person_query()}
{funnel_paths_join}
WHERE team_id = %(team_id)s
{event_query}
{date_query}
{prop_query}
{funnel_paths_filter}
ORDER BY {self.DISTINCT_ID_TABLE_ALIAS}.person_id, {self.EVENT_TABLE_ALIAS}.timestamp
"""
return query, self.params
def _determine_should_join_distinct_ids(self) -> None:
self._should_join_distinct_ids = True
def _get_current_url_parsing(self):
path_type, _ = get_property_string_expr("events", "$current_url", "'$current_url'", "properties")
return f"if(length({path_type}) > 1, trim( TRAILING '/' FROM {path_type}), {path_type})"
def _get_screen_name_parsing(self):
path_type, _ = get_property_string_expr("events", "$screen_name", "'$screen_name'", "properties")
return path_type
def _get_event_query(self) -> Tuple[str, Dict[str, Any]]:
params: Dict[str, Any] = {}
conditions = []
or_conditions = []
if self._filter.include_pageviews:
or_conditions.append(f"event = '{PAGEVIEW_EVENT}'")
if self._filter.include_screenviews:
or_conditions.append(f"event = '{SCREEN_EVENT}'")
if self._filter.include_autocaptures:
or_conditions.append(f"event = '{AUTOCAPTURE_EVENT}'")
if self._filter.include_all_custom_events:
or_conditions.append(f"NOT event LIKE '$%%'")
if self._filter.custom_events:
or_conditions.append(f"event IN %(custom_events)s")
params["custom_events"] = self._filter.custom_events
if or_conditions:
conditions.append(f"({' OR '.join(or_conditions)})")
if self._filter.exclude_events:
conditions.append(f"NOT event IN %(exclude_events)s")
params["exclude_events"] = self._filter.exclude_events
if conditions:
return f" AND {' AND '.join(conditions)}", params
return "", {}
| 39.735043 | 151 | 0.655195 |
f7259ef31d09ee215158684c34454fabb4e5926d | 614 | py | Python | aws-auth0-auth/helloWorld.py | skarlekar/ms-auth-tutorials | 0de172817e54533be93700de19028cfa8757861f | [
"MIT"
] | null | null | null | aws-auth0-auth/helloWorld.py | skarlekar/ms-auth-tutorials | 0de172817e54533be93700de19028cfa8757861f | [
"MIT"
] | 1 | 2021-06-01T21:41:36.000Z | 2021-06-01T21:41:36.000Z | aws-auth0-auth/helloWorld.py | skarlekar/ms-auth-tutorials | 0de172817e54533be93700de19028cfa8757861f | [
"MIT"
] | 1 | 2017-10-26T15:08:40.000Z | 2017-10-26T15:08:40.000Z | """Simple helloWorld service."""
import json
def sayHello(event, context):
"""Return a message in the response body."""
print('Event is: {}'.format(json.dumps(event)))
body = {
"message": "Hello! Your Auth0 authorized function executed successfully!"
}
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
# Use this code if you don't use the http event with the LAMBDA-PROXY
# integration
"""
return {
"message": "Go Serverless v1.0! Your function executed successfully!",
"event": event
}
"""
| 22.740741 | 81 | 0.599349 |
f725d191d7ee26a6a4fe4a6ea65ea74b004d9957 | 531 | py | Python | contest/pythonist3/validating-credit-card-number/validating-credit-card-number.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
] | 4 | 2017-01-18T17:51:58.000Z | 2019-10-20T12:14:37.000Z | contest/pythonist3/validating-credit-card-number/validating-credit-card-number.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
] | null | null | null | contest/pythonist3/validating-credit-card-number/validating-credit-card-number.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
] | 8 | 2016-03-14T17:16:59.000Z | 2021-06-26T10:11:33.000Z | # -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2016-05-13 12:50:43
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2016-05-13 12:50:54
import re
for i in range(int(raw_input())):
S = raw_input().strip()
pre_match = re.search(r'^[456]\d{3}(-?)\d{4}\1\d{4}\1\d{4}$',S)
if pre_match:
processed_string = "".join(pre_match.group(0).split('-'))
final_match = re.search(r'(\d)\1{3,}',processed_string)
print 'Invalid' if final_match else 'Valid'
else:
print 'Invalid' | 35.4 | 67 | 0.59887 |
f7260ece4a1e3fc3b43d89b2b456333299b82c9d | 2,817 | py | Python | Q/questionnaire/serializers/serializers_ontologies.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | Q/questionnaire/serializers/serializers_ontologies.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | 477 | 2015-01-07T18:22:27.000Z | 2017-07-17T15:05:48.000Z | Q/questionnaire/serializers/serializers_ontologies.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | ####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.core.exceptions import ValidationError as DjangoValidationError
from rest_framework.exceptions import ValidationError as RestValidationError
from rest_framework import serializers
from uuid import UUID as generate_uuid
from Q.questionnaire.serializers.serializers_base import QListSerializer, QSerializer, QVersionSerializerField
from Q.questionnaire.models.models_ontologies import QOntology
from Q.questionnaire.q_utils import serialize_model_to_dict
from Q.questionnaire.q_constants import *
class QOntologySerializer(QSerializer):
class Meta:
model = QOntology
fields = (
'id',
'name',
'version',
'documentation',
'file',
'title',
"url",
'created',
'modified',
'ontology_type',
'is_registered',
'is_active',
'key',
'document_types',
)
# there is no need to explicitly add QUniqueTogetherValidator
# b/c that is done automatically in "QSerializer.get_unique_together_validators()"
# validators = [
# QUniqueTogetherValidator(
# queryset=QModelCustomization.objects.all(),
# # fields=('name', 'version'),
# )
# ]
version = QVersionSerializerField()
title = serializers.SerializerMethodField() # method_name="get_title"
document_types = serializers.SerializerMethodField(method_name="get_supported_document_types")
def get_title(self, obj):
return str(obj)
def get_supported_document_types(self, obj):
"""
returns the model_proxies of the current ontology that can be used to create documents
ie: those w/ the stereotype "document" and that are listed in SUPPORTED_DOCUMENTS
:param obj:
:return:
"""
supported_document_model_proxies = obj.model_proxies.filter(
is_document=True,
name__iregex=r'(' + '|'.join(["^{0}$".format(sd) for sd in SUPPORTED_DOCUMENTS["CIM2"]]) + ')',
).order_by("name")
return [
serialize_model_to_dict(
model_proxy,
include={
"title": str(model_proxy),
"name": model_proxy.name.lower()
},
exclude=["guid", "created", "modified", "ontology"]
)
for model_proxy in supported_document_model_proxies
]
| 34.777778 | 115 | 0.611999 |
f7262a2f0da63f591723f9cdf91c2bae40d81f7d | 19,587 | py | Python | pandas/tests/reshape/test_tile.py | stevenvandenberghe/pandas | 8cbee356da1161c56c64f6f89cb5548bcadc3e44 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/reshape/test_tile.py | stevenvandenberghe/pandas | 8cbee356da1161c56c64f6f89cb5548bcadc3e44 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/reshape/test_tile.py | stevenvandenberghe/pandas | 8cbee356da1161c56c64f6f89cb5548bcadc3e44 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2019-03-08T19:59:05.000Z | 2020-09-27T03:18:37.000Z | import os
import pytest
import numpy as np
from pandas.compat import zip
from pandas import (Series, isna, to_datetime, DatetimeIndex,
Timestamp, Interval, IntervalIndex, Categorical,
cut, qcut, date_range)
import pandas.util.testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
import pandas.core.reshape.tile as tmod
class TestCut(object):
def test_simple(self):
data = np.ones(5, dtype='int64')
result = cut(data, 4, labels=False)
expected = np.array([1, 1, 1, 1, 1])
tm.assert_numpy_array_equal(result, expected,
check_dtype=False)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
expected = Categorical(intervals, ordered=True)
expected = expected.take([0, 0, 0, 2, 3, 0, 0])
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95,
7.325, 9.7]))
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3), closed='left')
intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95,
7.325, 9.7095]))
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_bins_from_intervalindex(self):
c = cut(range(5), 3)
expected = c
result = cut(range(5), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
expected = Categorical.from_codes(np.append(c.codes, -1),
categories=c.categories,
ordered=True)
result = cut(range(6), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
# doc example
# make sure we preserve the bins
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
c = cut(ages, bins=[0, 18, 35, 70])
expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])
tm.assert_index_equal(c.categories, expected)
result = cut([25, 20, 50], bins=c.categories)
tm.assert_index_equal(result.categories, expected)
tm.assert_numpy_array_equal(result.codes,
np.array([1, 1, 2], dtype='int8'))
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
pytest.raises(ValueError, cut, data, [0.1, 1.5, 1, 10])
def test_wrong_num_labels(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
pytest.raises(ValueError, cut, data, [0, 1, 10],
labels=['foo', 'bar', 'baz'])
def test_cut_corner(self):
# h3h
pytest.raises(ValueError, cut, [], 2)
pytest.raises(ValueError, cut, [1, 2, 3], 0.5)
def test_cut_out_of_range_more(self):
# #1511
s = Series([0, -1, 0, 1, -3], name='x')
ind = cut(s, [0, 1], labels=False)
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name='x')
tm.assert_series_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True)
ex_levels = IntervalIndex.from_breaks([-1e-3, 0.25, 0.5, 0.75, 1])
tm.assert_index_equal(result.categories, ex_levels)
result, bins = cut(arr, 4, retbins=True, right=False)
ex_levels = IntervalIndex.from_breaks([0, 0.25, 0.5, 0.75, 1 + 1e-3],
closed='left')
tm.assert_index_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor(self):
s = Series(np.random.randn(100), name='foo')
factor = cut(s, 4)
assert factor.name == 'foo'
def test_label_precision(self):
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36,
0.54, 0.72])
tm.assert_index_equal(result.categories, ex_levels)
def test_na_handling(self):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4)
result_arr = np.asarray(result)
ex_arr = np.where(isna(arr), np.nan, result_arr)
tm.assert_almost_equal(result_arr, ex_arr)
result = cut(arr, 4, labels=False)
ex_result = np.where(isna(arr), np.nan, result)
tm.assert_almost_equal(result, ex_result)
def test_inf_handling(self):
data = np.arange(6)
data_ser = Series(data, dtype='int64')
bins = [-np.inf, 2, 4, np.inf]
result = cut(data, bins)
result_ser = cut(data_ser, bins)
ex_uniques = IntervalIndex.from_breaks(bins)
tm.assert_index_equal(result.categories, ex_uniques)
assert result[5] == Interval(4, np.inf)
assert result[0] == Interval(-np.inf, 2)
assert result_ser[5] == Interval(4, np.inf)
assert result_ser[0] == Interval(-np.inf, 2)
def test_qcut(self):
arr = np.random.randn(1000)
# We store the bins as Index that have been rounded
# to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds(self):
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles(self):
arr = np.random.randn(100)
factor = qcut(arr, [0, .25, .5, .75, 1.])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same(self):
tm.assert_raises_regex(ValueError, "edges.*unique", qcut,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = isna(result)
ex_mask = (arr < -1) | (arr > 1)
tm.assert_numpy_array_equal(mask, ex_mask)
def test_cut_pass_labels(self):
arr = [50, 5, 10, 15, 20, 30, 70]
bins = [0, 25, 50, 100]
labels = ['Small', 'Medium', 'Large']
result = cut(arr, bins, labels=labels)
exp = Categorical(['Medium'] + 4 * ['Small'] + ['Medium', 'Large'],
categories=labels,
ordered=True)
tm.assert_categorical_equal(result, exp)
result = cut(arr, bins, labels=Categorical.from_codes([0, 1, 2],
labels))
exp = Categorical.from_codes([1] + 4 * [0] + [1, 2], labels)
tm.assert_categorical_equal(result, exp)
# issue 16459
labels = ['Good', 'Medium', 'Bad']
result = cut(arr, 3, labels=labels)
exp = cut(arr, 3, labels=Categorical(labels, categories=labels,
ordered=True))
tm.assert_categorical_equal(result, exp)
def test_qcut_include_lowest(self):
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9)])
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas(self):
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index(self):
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_round_frac(self):
# it works
result = cut(np.arange(11.), 2)
result = cut(np.arange(11.) / 1e10, 2)
# #1979, negative numbers
result = tmod._round_frac(-117.9998, precision=3)
assert result == -118
result = tmod._round_frac(117.9998, precision=3)
assert result == 118
result = tmod._round_frac(117.9998, precision=2)
assert result == 118
result = tmod._round_frac(0.000123456, precision=2)
assert result == 0.00012
def test_qcut_binning_issues(self):
# #1978, 1979
path = os.path.join(tm.get_data_path(), 'cut_data.csv')
arr = np.loadtxt(path)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
assert sp < sn
assert ep < en
assert ep <= sn
def test_cut_return_intervals(self):
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = cut(s, 3)
exp_bins = np.linspace(0, 8, num=4).round(3)
exp_bins[0] -= 0.008
exp = Series(IntervalIndex.from_breaks(exp_bins, closed='right').take(
[0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
def test_qcut_return_intervals(self):
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(s, [0, 0.333, 0.666, 1])
exp_levels = np.array([Interval(-0.001, 2.664),
Interval(2.664, 5.328), Interval(5.328, 8)])
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(
CDT(ordered=True))
tm.assert_series_equal(res, exp)
def test_series_retbins(self):
# GH 8589
s = Series(np.arange(4))
result, bins = cut(s, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.003, 1.5, 3], closed='right').repeat(2)).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
result, bins = qcut(s, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.001, 1.5, 3], closed='right').repeat(2)).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_qcut_duplicates_bin(self):
# GH 7751
values = [0, 0, 0, 0, 1, 2, 3]
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
result = qcut(values, 3, duplicates='drop')
tm.assert_index_equal(result.categories, expected)
pytest.raises(ValueError, qcut, values, 3)
pytest.raises(ValueError, qcut, values, 3, duplicates='raise')
# invalid
pytest.raises(ValueError, qcut, values, 3, duplicates='foo')
def test_single_quantile(self):
# issue 15431
expected = Series([0, 0])
s = Series([9., 9.])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(8.999, 9.0),
Interval(8.999, 9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([-9., -9.])
expected = Series([0, 0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-9.001, -9.0),
Interval(-9.001, -9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([0., 0.])
expected = Series([0, 0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-0.001, 0.0),
Interval(-0.001, 0.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([9])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(8.999, 9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([-9])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-9.001, -9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([0])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-0.001, 0.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_single_bin(self):
# issue 14652
expected = Series([0, 0])
s = Series([9., 9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
s = Series([-9., -9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
expected = Series([0])
s = Series([9])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
s = Series([-9])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
# issue 15428
expected = Series([0, 0])
s = Series([0., 0.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
expected = Series([0])
s = Series([0])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
def test_datetime_cut(self):
# GH 14714
# testing for time data to be present as series
data = to_datetime(Series(['2013-01-01', '2013-01-02', '2013-01-03']))
result, bins = cut(data, 3, retbins=True)
expected = (
Series(IntervalIndex([
Interval(Timestamp('2012-12-31 23:57:07.200000'),
Timestamp('2013-01-01 16:00:00')),
Interval(Timestamp('2013-01-01 16:00:00'),
Timestamp('2013-01-02 08:00:00')),
Interval(Timestamp('2013-01-02 08:00:00'),
Timestamp('2013-01-03 00:00:00'))]))
.astype(CDT(ordered=True)))
tm.assert_series_equal(result, expected)
# testing for time data to be present as list
data = [np.datetime64('2013-01-01'), np.datetime64('2013-01-02'),
np.datetime64('2013-01-03')]
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
# testing for time data to be present as ndarray
data = np.array([np.datetime64('2013-01-01'),
np.datetime64('2013-01-02'),
np.datetime64('2013-01-03')])
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
# testing for time data to be present as datetime index
data = DatetimeIndex(['2013-01-01', '2013-01-02', '2013-01-03'])
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
def test_datetime_bin(self):
data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')]
bin_data = ['2012-12-12', '2012-12-14', '2012-12-16']
expected = (
Series(IntervalIndex([
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))]))
.astype(CDT(ordered=True)))
for conv in [Timestamp, Timestamp, np.datetime64]:
bins = [conv(v) for v in bin_data]
result = cut(data, bins=bins)
tm.assert_series_equal(Series(result), expected)
bin_pydatetime = [Timestamp(v).to_pydatetime() for v in bin_data]
result = cut(data, bins=bin_pydatetime)
tm.assert_series_equal(Series(result), expected)
bins = to_datetime(bin_data)
result = cut(data, bins=bin_pydatetime)
tm.assert_series_equal(Series(result), expected)
def test_datetime_nan(self):
def f():
cut(date_range('20130101', periods=3), bins=[0, 2, 4])
pytest.raises(ValueError, f)
result = cut(date_range('20130102', periods=5),
bins=date_range('20130101', periods=2))
mask = result.categories.isna()
tm.assert_numpy_array_equal(mask, np.array([False]))
mask = result.isna()
tm.assert_numpy_array_equal(
mask, np.array([False, True, True, True, True]))
@pytest.mark.parametrize(
"array_1_writeable, array_2_writeable",
[(True, True), (True, False), (False, False)])
def test_cut_read_only(self, array_1_writeable, array_2_writeable):
# issue 18773
array_1 = np.arange(0, 100, 10)
array_1.flags.writeable = array_1_writeable
array_2 = np.arange(0, 100, 10)
array_2.flags.writeable = array_2_writeable
hundred_elements = np.arange(100)
tm.assert_categorical_equal(cut(hundred_elements, array_1),
cut(hundred_elements, array_2))
| 37.026465 | 78 | 0.56512 |
f7263b5df5fb3177603ae56acb9d953605d88e9b | 1,670 | py | Python | dtypes/radix_sort.py | jay-tyler/data-structures | b4f4bcb091cf4be4c4cc29d8a687af3d063090f5 | [
"MIT"
] | 2 | 2015-08-25T02:51:47.000Z | 2019-11-03T20:00:16.000Z | dtypes/radix_sort.py | jay-tyler/data-structures | b4f4bcb091cf4be4c4cc29d8a687af3d063090f5 | [
"MIT"
] | 9 | 2015-09-19T20:51:14.000Z | 2015-09-28T07:06:50.000Z | dtypes/radix_sort.py | jay-tyler/data-structures | b4f4bcb091cf4be4c4cc29d8a687af3d063090f5 | [
"MIT"
] | 1 | 2020-04-22T21:24:36.000Z | 2020-04-22T21:24:36.000Z | def radsort(unslist):
"""Returns a sorted list. Accepts only a list containing positive
integers."""
# find max for iterative solution
maxval = max(unslist)
ntimes = len(str(maxval))
slist = unslist[:]
for n in range(ntimes):
# Making radix bins
bins = [[] for _ in range(10)]
# Place each list item in appropriate bin
for i, item in enumerate(slist):
inspecting = slist[i]
digval = _get_nth_digit(inspecting, n)
bins[digval].append(inspecting)
slist = []
# Flatten bins to list
for bin in bins:
slist.extend(bin)
return slist
def _get_nth_digit(num, n):
"""For a positive integer, get the value at the nth digit;
indexing starts at 0"""
return ((num % (10 ** (n + 1))) - (num % (10 ** n))) // 10 ** n
if __name__ == "__main__":
"""Test time performance for best and worst cases"""
import time
size = 1000
# Best case: when all numbers in the list have the same number of digits.
good_list = range(size + 1)
start = time.time()
for i in range(1000):
radsort(good_list)
stop = time.time()
best_time = (stop - start)
# Worst case: When there is one very large outlier.
bad_list = [1 for _ in range(size)] + [10**10]
start = time.time()
for i in range(1000):
radsort(bad_list)
stop = time.time()
worst_time = (stop - start)
print "Best case is {} times better than worst for n=1000\n".format(
worst_time/best_time)
print "Best case: {0:.{1}f} ms\nWorst case: {2:.{3}f} ms".format(
best_time, 5, worst_time, 5)
| 27.377049 | 77 | 0.58982 |
f7264b4fcfd7aafc1c81e31c2b3afdfb0672a9ba | 1,144 | py | Python | code/nn.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 17 | 2019-03-29T18:30:36.000Z | 2021-10-17T15:38:22.000Z | code/nn.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 1 | 2019-04-22T22:40:30.000Z | 2019-04-24T21:45:07.000Z | code/nn.py | ctallec/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 5 | 2019-04-29T16:26:18.000Z | 2020-01-23T07:17:49.000Z | """Some nn utilities."""
import torch
from abstract import ParametricFunction
def copy_buffer(net: ParametricFunction, target_net: ParametricFunction):
"""Copy all buffers from net to target_net."""
with torch.no_grad():
for target_buf, buf in zip(target_net.buffers(), net.buffers()): # type: ignore
target_buf.copy_(buf)
def soft_update(net: ParametricFunction, target_net: ParametricFunction, tau: float):
"""Soft update of the parameters of target_net with those of net.
Precisely
theta_targetnet <- tau * theta_targetnet + (1 - tau) * theta_net
"""
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.add_(1 - tau, param - target_param)
def hard_update(net: ParametricFunction, target_net: ParametricFunction):
"""Hard update (i.e. copy) of the parameters of target_net with those of net."""
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.copy_(param)
| 40.857143 | 87 | 0.701049 |
f7265d7477ff3fba1b5e7f80d15d88b7c11ed07e | 1,092 | py | Python | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | 2 | 2020-04-12T01:23:25.000Z | 2021-05-22T13:46:00.000Z | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | null | null | null | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | 2 | 2016-12-30T23:57:44.000Z | 2021-05-22T13:50:21.000Z | #!/usr/bin/env python
"""
Mark the start and end of the prompt with Final term (iterm2) escape sequences.
See: https://iterm2.com/finalterm.html
"""
from __future__ import unicode_literals
from prompt_toolkit import prompt
from prompt_toolkit.token import Token
import sys
BEFORE_PROMPT = '\033]133;A\a'
AFTER_PROMPT = '\033]133;B\a'
BEFORE_OUTPUT = '\033]133;C\a'
AFTER_OUTPUT = '\033]133;D;{command_status}\a' # command_status is the command status, 0-255
def get_prompt_tokens(cli):
# Generate the tokens for the prompt.
# Important: use the `ZeroWidthEscape` token only if you are sure that
# writing this as raw text to the output will not introduce any
# cursor movements.
return [
(Token.ZeroWidthEscape, BEFORE_PROMPT),
(Token, 'Say something: # '),
(Token.ZeroWidthEscape, AFTER_PROMPT),
]
if __name__ == '__main__':
answer = prompt(get_prompt_tokens=get_prompt_tokens)
sys.stdout.write(BEFORE_OUTPUT)
print('You said: %s' % answer)
sys.stdout.write(AFTER_OUTPUT.format(command_status=0))
| 29.513514 | 92 | 0.701465 |
f726841edd23cffe106d88311ba375ae4ca2b996 | 7,722 | py | Python | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | 1 | 2020-03-19T13:58:33.000Z | 2020-03-19T13:58:33.000Z | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from ..recommender import Recommender
from ...exception import ScoreException
class HFT(Recommender):
"""Hidden Factors and Hidden Topics
Parameters
----------
name: string, default: 'HFT'
The name of the recommender model.
k: int, optional, default: 10
The dimension of the latent factors.
max_iter: int, optional, default: 50
Maximum number of iterations for EM.
grad_iter: int, optional, default: 50
Maximum number of iterations for L-BFGS.
lambda_text: float, default: 0.1
Weight of corpus likelihood in objective function.
l2_reg: float, default: 0.001
Regularization for user item latent factors.
vocab_size: int, optional, default: 8000
Size of vocabulary for review text.
init_params: dictionary, optional, default: None
List of initial parameters, e.g., init_params = {'alpha': alpha, 'beta_u': beta_u,
'beta_i': beta_i, 'gamma_u': gamma_u, 'gamma_v': gamma_v}
alpha: float
Model offset, optional initialization via init_params.
beta_u: ndarray. shape (n_user, 1)
User biases, optional initialization via init_params.
beta_u: ndarray. shape (n_item, 1)
Item biases, optional initialization via init_params.
gamma_u: ndarray, shape (n_users,k)
The user latent factors, optional initialization via init_params.
gamma_v: ndarray, shape (n_items,k)
The item latent factors, optional initialization via init_params.
trainable: boolean, optional, default: True
When False, the model will not be re-trained, and input of pre-trained parameters are required.
verbose: boolean, optional, default: True
When True, some running logs are displayed.
seed: int, optional, default: None
Random seed for weight initialization.
References
----------
Julian McAuley, Jure Leskovec. "Hidden Factors and Hidden Topics: Understanding Rating Dimensions with Review Text"
RecSys '13 Proceedings of the 7th ACM conference on Recommender systems Pages 165-172
"""
def __init__(self, name='HFT', k=10, max_iter=50, grad_iter=50,
lambda_text=0.1, l2_reg=0.001, vocab_size=8000,
init_params=None, trainable=True, verbose=True, seed=None):
super().__init__(name=name, trainable=trainable, verbose=verbose)
self.k = k
self.lambda_text = lambda_text
self.l2_reg = l2_reg
self.grad_iter = grad_iter
self.name = name
self.max_iter = max_iter
self.verbose = verbose
self.init_params = {} if not init_params else init_params
self.seed = seed
self.vocab_size = vocab_size
def fit(self, train_set, val_set=None):
"""Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
from ...utils.init_utils import normal
self.n_item = self.train_set.num_items
self.n_user = self.train_set.num_users
self.alpha = self.init_params.get('alpha', train_set.global_mean)
self.beta_u = self.init_params.get('beta_u', normal(self.n_user, std=0.01, random_state=self.seed))
self.beta_i = self.init_params.get('beta_i', normal(self.n_item, std=0.01, random_state=self.seed))
self.gamma_u = self.init_params.get('gamma_u', normal((self.n_user, self.k), std=0.01, random_state=self.seed))
self.gamma_i = self.init_params.get('gamma_i', normal((self.n_item, self.k), std=0.01, random_state=self.seed))
if self.trainable:
self._fit_hft()
return self
@staticmethod
def _build_data(csr_mat):
index_list = []
rating_list = []
for i in range(csr_mat.shape[0]):
j, k = csr_mat.indptr[i], csr_mat.indptr[i + 1]
index_list.append(csr_mat.indices[j:k])
rating_list.append(csr_mat.data[j:k])
return index_list, rating_list
def _fit_hft(self):
from .hft import Model
from tqdm import trange
# document data
bow_mat = self.train_set.item_text.batch_bow(np.arange(self.n_item), keep_sparse=True)
documents, _ = self._build_data(bow_mat) # bag of word feature
# Rating data
user_data = self._build_data(self.train_set.matrix)
item_data = self._build_data(self.train_set.matrix.T.tocsr())
model = Model(n_user=self.n_user, n_item=self.n_item, alpha=self.alpha, beta_u=self.beta_u, beta_i=self.beta_i,
gamma_u=self.gamma_u, gamma_i=self.gamma_i, n_vocab=self.vocab_size, k=self.k,
lambda_text=self.lambda_text, l2_reg=self.l2_reg, grad_iter=self.grad_iter)
model.init_count(docs=documents)
# training
loop = trange(self.max_iter, disable=not self.verbose)
for _ in loop:
model.assign_word_topics(docs=documents)
loss = model.update_params(rating_data=(user_data, item_data))
loop.set_postfix(loss=loss)
self.alpha, self.beta_u, self.beta_i, self.gamma_u, self.gamma_i = model.get_parameter()
if self.verbose:
print('Learning completed!')
def score(self, user_idx, item_idx=None):
"""Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for that to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items
"""
if item_idx is None:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.alpha + self.beta_u[user_idx] + self.beta_i + self.gamma_i.dot(
self.gamma_u[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.alpha + self.beta_u[user_idx] + self.beta_i[item_idx] + self.gamma_i[item_idx, :].dot(
self.gamma_u[user_idx, :])
return user_pred
| 38.41791 | 119 | 0.642191 |
f726902376e280ba863a7c19c43b900218daf48a | 4,130 | py | Python | alipay/aop/api/request/AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignPromotionactivityCustomerReceiveModel import AlipayMarketingCampaignPromotionactivityCustomerReceiveModel
class AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignPromotionactivityCustomerReceiveModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignPromotionactivityCustomerReceiveModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.promotionactivity.customer.receive'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.482759 | 155 | 0.658111 |
f72693b16d34b944f5bb4a1349f76575267e7ffa | 1,136 | py | Python | examples/hacker_news/hacker_news/resources/s3_notebook_io_manager.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | examples/hacker_news/hacker_news/resources/s3_notebook_io_manager.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | examples/hacker_news/hacker_news/resources/s3_notebook_io_manager.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | from dagstermill.io_managers import OutputNotebookIOManager
from dagster import io_manager
from .fixed_s3_pickle_io_manager import s3_client
class S3OutputNotebookIOManager(OutputNotebookIOManager):
"""Defines an IOManager that will store dagstermill output notebooks on s3"""
def _get_key(self, context) -> str:
return "notebooks/" + "_".join(context.get_run_scoped_output_identifier())
def load_input(self, context) -> bytes:
key = self._get_key(context.upstream_output)
bucket = context.resources.s3_bucket
context.log.info("loading from: s3_bucket[%s], s3_key[%s]", bucket, key)
return s3_client().get_object(Bucket=bucket, Key=key)["Body"].read()
def handle_output(self, context, obj: bytes):
key = self._get_key(context)
bucket = context.resources.s3_bucket
context.log.info("storing to: s3_bucket[%s], s3_key[%s]", bucket, key)
s3_client().put_object(Bucket=bucket, Key=key, Body=obj)
@io_manager(required_resource_keys={"s3_bucket"})
def s3_notebook_io_manager(_) -> OutputNotebookIOManager:
return S3OutputNotebookIOManager()
| 37.866667 | 82 | 0.727113 |
f726991caedc24166bb6ed9a085571aa0555465e | 4,333 | py | Python | sdks/python/appcenter_sdk/models/InternalHockeyAppCutoverStatusResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/InternalHockeyAppCutoverStatusResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/InternalHockeyAppCutoverStatusResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class InternalHockeyAppCutoverStatusResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
not_requested = "not_requested"
requested = "requested"
in_progress = "in_progress"
completed = "completed"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'string',
'status': 'string'
}
attribute_map = {
'id': 'id',
'status': 'status'
}
def __init__(self, id=None, status=None): # noqa: E501
"""InternalHockeyAppCutoverStatusResponse - a model defined in Swagger""" # noqa: E501
self._id = None
self._status = None
self.discriminator = None
self.id = id
if status is not None:
self.status = status
@property
def id(self):
"""Gets the id of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
The ID of the app # noqa: E501
:return: The id of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:rtype: string
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InternalHockeyAppCutoverStatusResponse.
The ID of the app # noqa: E501
:param id: The id of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:type: string
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def status(self):
"""Gets the status of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
Does the HockeyApp app have crashes from within the last 90 days? # noqa: E501
:return: The status of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:rtype: string
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this InternalHockeyAppCutoverStatusResponse.
Does the HockeyApp app have crashes from within the last 90 days? # noqa: E501
:param status: The status of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:type: string
"""
allowed_values = [undefinedundefinedundefinedundefined] # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InternalHockeyAppCutoverStatusResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.080537 | 95 | 0.587122 |
f7269969627b886f2d9ff179c1f78a4abf30f3d0 | 1,426 | py | Python | Python/DataStructures/Trie.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | 3 | 2021-03-22T14:13:56.000Z | 2022-03-01T03:06:22.000Z | Python/DataStructures/Trie.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | null | null | null | Python/DataStructures/Trie.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | null | null | null | class TrieNode:
def __init__(self):
self.children = {}
self.isWord = False
class Trie:
def __init__(self):
self.root = TrieNode()
def put(self, word):
current = self.root
for i in range(0, len(word)):
child = word[i]
tmp = None
try:
tmp = current.children[child]
except KeyError:
tmp = TrieNode()
current.children[child] = tmp
current = tmp
current.isWord = True
def contains(self, word):
current = self.root
for i in range(0, len(word)):
child = word[i]
try:
current = current.children[child]
except KeyError:
return False
return current.isWord
def remove(self, word):
self._removeRecursive(self.root, word, 0)
def _removeRecursive(self, current, word, depth):
if current is None:
return None
if depth == len(word):
current.isWord = False
else:
child = word[depth]
if child in current.children:
self._removeRecursive(current.children[child], word, depth + 1)
else:
del current.children[child]
if not bool(current.children):
return current
return None
| 26.90566 | 80 | 0.497896 |
f726a169158c8afc5ef59a42f7606019f51270fd | 7,006 | py | Python | experiments/vitchyr/goal_distribution/representation_learning/exps_20_08_14/exp1_oracle_pygame_latent_reward_1ob.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | experiments/vitchyr/goal_distribution/representation_learning/exps_20_08_14/exp1_oracle_pygame_latent_reward_1ob.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/vitchyr/goal_distribution/representation_learning/exps_20_08_14/exp1_oracle_pygame_latent_reward_1ob.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | import rlkit.misc.hyperparameter as hyp
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.sets.rl_launcher import disco_experiment
if __name__ == "__main__":
variant = dict(
env_class=PickAndPlaceEnv,
env_kwargs=dict(
# Environment dynamics
action_scale=1.0,
boundary_dist=4,
ball_radius=1.5,
object_radius=1.,
ball_visual_radius=1.5,
object_visual_radius=1.,
min_grab_distance=1.,
walls=None,
# Rewards
action_l2norm_penalty=0,
reward_type="dense",
success_threshold=0.60,
# Reset settings
fixed_goal=None,
# Visualization settings
images_are_rgb=True,
render_dt_msec=0,
render_onscreen=False,
render_size=84,
show_goal=False,
goal_samplers=None,
goal_sampling_mode='random',
num_presampled_goals=10000,
object_reward_only=False,
init_position_strategy='random',
num_objects=1,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
use_automatic_entropy_tuning=True,
reward_scale='auto_normalize_by_max_magnitude',
),
max_path_length=100,
algo_kwargs=dict(
batch_size=128,
num_epochs=501,
num_eval_steps_per_epoch=3000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
),
# max_path_length=2,
# algo_kwargs=dict(
# batch_size=5,
# num_epochs=1,
# num_eval_steps_per_epoch=2*20,
# num_expl_steps_per_train_loop=2*20,
# num_trains_per_train_loop=10,
# min_num_steps_before_training=10,
# ),
replay_buffer_kwargs=dict(
fraction_future_context=0.0,
fraction_distribution_context=0.8,
max_size=int(1e6),
),
save_video=True,
save_video_kwargs=dict(
save_video_period=10,
pad_color=50,
subpad_length=1,
pad_length=1,
num_columns_per_rollout=2,
num_imgs=8,
# rows=2,
# columns=9,
),
renderer_kwargs=dict(
# create_image_format='HWC',
# output_image_format='CWH',
output_image_format='CHW',
# flatten_image=True,
# normalize_image=False,
),
create_vae_kwargs=dict(
latent_dim=128,
encoder_cnn_kwargs=dict(
kernel_sizes=[5, 3, 3],
n_channels=[16, 32, 64],
strides=[3, 2, 2],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
normalization_type='layer',
),
encoder_mlp_kwargs=dict(
hidden_sizes=[],
),
decoder_dcnn_kwargs=dict(
kernel_sizes=[3, 3, 6],
n_channels=[32, 16, 3],
strides=[2, 2, 3],
paddings=[0, 0, 0],
),
decoder_mlp_kwargs=dict(
hidden_sizes=[256, 256],
),
use_fancy_architecture=True,
decoder_distribution='gaussian_learned_global_scalar_variance',
),
vae_trainer_kwargs=dict(
vae_lr=1e-3,
vae_visualization_config=dict(
num_recons=5,
num_samples=20,
# debug_period=50,
debug_period=20,
unnormalize_images=True,
image_format='CHW',
),
beta=1,
set_loss_weight=0,
),
data_loader_kwargs=dict(
batch_size=128,
),
vae_algo_kwargs=dict(
num_iters=501,
num_epochs_per_iter=1,
progress_csv_file_name='vae_progress.csv',
),
generate_set_for_vae_pretraining_kwargs=dict(
num_sets=3,
num_samples_per_set=128,
),
generate_set_for_rl_kwargs=dict(
num_sets=3,
num_samples_per_set=128,
# save_to_filename='3sets128samples_2objs.pickle',
saved_filename='/global/scratch/vitchyr/doodad-log-since-07-10-2020/manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle',
),
num_ungrouped_images=12800,
reward_fn_kwargs=dict(
drop_log_det_term=True,
sqrt_reward=True,
),
rig=False,
rig_goal_setter_kwargs=dict(
use_random_goal=True,
),
use_ground_truth_reward=True,
)
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 3
mode = 'sss'
exp_prefix = 'exp2-oracle-pygame-latent-reward-1-obj'
search_space = {
'vae_algo_kwargs.num_iters': [501],
# 'algo_kwargs.num_epochs': [1],
'observation_key': [
'state_observation',
],
'use_ground_truth_reward': [
False,
],
'use_onehot_set_embedding': [
True,
],
'use_dummy_model': [
False,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = list(sweeper.iterate_hyperparameters())
for _ in range(n_seeds):
for exp_id, variant in enumerate(variants):
if mode == 'local':
variant['vae_algo_kwargs']['num_iters'] = 0
variant['generate_set_for_rl_kwargs']['saved_filename'] = (
'manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle'
)
variant['algo_kwargs'] = dict(
batch_size=5,
num_epochs=1,
num_eval_steps_per_epoch=2*20,
num_expl_steps_per_train_loop=2*20,
num_trains_per_train_loop=10,
min_num_steps_before_training=10,
)
variant['max_path_length'] = 2
run_experiment(
disco_experiment,
exp_name=exp_prefix,
num_exps_per_instance=2,
mode=mode,
variant=variant,
# slurm_config_name='cpu',
use_gpu=True,
# gpu_id=1,
)
| 32.137615 | 148 | 0.529403 |
f726b366c9cf2b7cd4cfde6038b4f205fcd52e43 | 1,036 | py | Python | vyperlogix/zlib/zlibCompressor.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | 1 | 2020-09-29T01:36:33.000Z | 2020-09-29T01:36:33.000Z | vyperlogix/zlib/zlibCompressor.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | vyperlogix/zlib/zlibCompressor.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | import gzip, zlib, base64
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def decompress_zlib(s):
return zlib.decompress(base64.decodestring(s), 15)
def zlib_compress(s):
return base64.encodestring(zlib.compress(s, 9))
| 31.393939 | 70 | 0.779923 |
f726da9544773e11f11ee7b9f04bc69fd7f46c4b | 8,615 | py | Python | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | 3 | 2021-08-28T10:55:12.000Z | 2021-12-01T20:42:38.000Z | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | null | null | null | EOD_api/test_EOD_api.py | webclinic017/time-series-pipeline | 5ac418b91e395a48cba397f95d25d221adfff9bd | [
"MIT"
] | 1 | 2021-09-26T16:07:24.000Z | 2021-09-26T16:07:24.000Z | import os
import re
import datetime
import unittest
from io import StringIO
from unittest.mock import patch
import pandas as pd
import EOD_api as eod
TOKEN = os.environ["EOD_TOKEN"]
def date_parser(string):
date_pattern = re.compile("([0-9]{4}-[0-9]{2}-[0-9]{2})[ ]", re.VERBOSE)
return date_pattern.sub(r"\1T", string)
class TestGetEod(unittest.TestCase):
# @classmethod
# def setUp(cls):
# pass
# def tearDown(cls):
# pass
def test_idempotent__addtickers(self):
d1 = eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
).add_tickers(["MSFT.US"])
d2 = (
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="5m"
)
.add_tickers(["MSFT.US"])
.add_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_idempotent_truncate_dates(self):
d1 = eod.Fundamental(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).truncate_dates("2020-10-14", "2020-10-16")
d2 = (
eod.Fundamental(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17")
.truncate_dates("2020-10-14", "2020-10-16")
.truncate_dates("2020-10-14", "2020-10-16")
)
self.assertEqual(d1, d2)
def test_idempotent_remove_tickers(self):
d1 = eod.Fundamental(
["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17"
).remove_tickers(["MSFT.US"])
d2 = (
eod.Fundamental(["AAPL.US", "MSFT.US"], TOKEN, "2020-10-13", "2020-10-17")
.remove_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_add_remove(self):
d1 = eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
d2 = (
eod.OhlcvIntraday(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", "1m")
.add_tickers(["MSFT.US"])
.remove_tickers(["MSFT.US"])
)
self.assertEqual(d1, d2)
def test_remove_all_tickers(self):
with self.assertRaises(Exception):
eod.Ohlcv(["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17").remove_tickers(
["AAPL.US"]
).retrieve_data()
def test_misspelled_input(self):
with self.assertRaises(Exception):
eod.OhlcvIntraday(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17", intraday_frec="Daoly"
)
def test_ohlcv_data_format_hasnt_changed(
self,
): # Cambiar de antes de formatting a después de formatting
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
url = "https://eodhistoricaldata.com/api/eod/AAPL.US?api_token={}&from=2020-10-13&to=2020-10-17&period=d".format(
TOKEN
)
actual = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
expected = pd.read_csv(
url,
usecols=[
"Date",
"Volume",
"Open",
"Close",
"High",
"Low",
"Adjusted_close",
],
)
pd.testing.assert_frame_equal(actual, expected, rtol=5e-3)
def test_index_formatting(self):
expected_aapl = pd.read_csv(
StringIO(
"""
Date Open High Low Close Adjusted_close Volume
2020-10-13 125.27 125.390 119.65 121.10 120.7110 262330500.0
2020-10-14 121.00 123.030 119.62 121.19 120.8008 151062297.0
2020-10-15 118.72 121.200 118.15 120.71 120.3223 112559203.0
2020-10-16 121.28 121.548 118.81 119.02 118.6377 115393797.0
275 NaN NaN NaN NaN NaN NaN
"""
),
sep="\\s+",
)
expected_aapl_formatted = pd.read_csv(
StringIO(
date_parser(
"""
Stock Date Open High Low Close Adjusted_close Volume
AAPL.US 2020-10-13 00:00:00+00:00 125.27 125.390 119.65 121.10 120.7110 262330500.0
AAPL.US 2020-10-14 00:00:00+00:00 121.00 123.030 119.62 121.19 120.8008 151062297.0
AAPL.US 2020-10-15 00:00:00+00:00 118.72 121.200 118.15 120.71 120.3223 112559203.0
AAPL.US 2020-10-16 00:00:00+00:00 121.28 121.548 118.81 119.02 118.6377 115393797.0
"""
)
),
sep="\\s+",
index_col=[0, 1],
converters={"Date": lambda col: datetime.datetime.fromisoformat(col)},
)
with patch.object(pd, "read_csv") as mock_read:
mock_read.autospec = True
mock_read.return_value = expected_aapl
formatted_mock = eod.Ohlcv(
["AAPL.US"], TOKEN, "2020-10-13", "2020-10-17"
).retrieve_data()
pd.testing.assert_frame_equal(
formatted_mock, expected_aapl_formatted, rtol=5e-3
)
# TODO? Write more tests:
# Check that the data is concated/merged/joined properly, particularly when the indexes come with Nans
# Check except clauses
# Check duplicate df values
# Assert errors with wrong args
# etc
# expected_ohlcv_concatted = pd.read_csv( StringIO( date_parser( """
# Stock Date Gmtoffset Datetime Open High Low Close Volume Returns
# BP.LSE 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# BP.LSE 2020-10-14 00:00:00+00:00 0.0 2020-10-13 15:25:00 213.649993 214.000000 213.550003 213.856994 1210380.0 -0.001601
# BP.LSE 2020-10-15 00:00:00+00:00 0.0 2020-10-14 15:25:00 213.000000 213.149993 212.600006 212.649993 1182246.0 0.019660
# BP.LSE 2020-10-16 00:00:00+00:00 0.0 2020-10-15 15:25:00 207.149993 207.199996 206.500000 206.850006 1626720.0 -0.013826
# AAPL.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# AAPL.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 121.139999 121.279998 121.029998 121.050003 4585723.0 0.003648
# AAPL.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 121.580001 121.709999 121.139999 121.180000 3420583.0 0.015419
# AAPL.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 120.790000 120.849998 120.580001 120.699996 3436603.0 -0.003550
# MSFT.US 2020-10-13 00:00:00+00:00 NaN NaN NaN NaN NaN NaN NaN NaN
# MSFT.US 2020-10-14 00:00:00+00:00 0.0 2020-10-13 19:55:00 223.320007 223.389999 222.750000 222.830001 1457493.0 0.000651
# MSFT.US 2020-10-15 00:00:00+00:00 0.0 2020-10-14 19:55:00 221.199996 221.414993 220.600006 220.759994 1122912.0 0.012377
# MSFT.US 2020-10-16 00:00:00+00:00 0.0 2020-10-15 19:55:00 219.639999 219.880004 219.490005 219.660003 1201342.0 -0.003900
# """ ) ), sep="\\s+", index_col=[0,1,2], converters = {'Date' : lambda col: datetime.datetime.fromisoformat( col ) \
# , 'Datetime' : lambda col: pd.to_datetime(col, format='%Y-%m-%dT%H:%M:%S', utc=True) } )
if __name__ == "__main__":
unittest.main()
| 43.075 | 165 | 0.51863 |
f727619381755861c088ab5d8fb34a9eb7540f17 | 341 | py | Python | LC/27.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/27.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/27.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
a=0
x=0
while(x<len(nums)):
if nums[x]==val:
nums.pop(x)
x-=1
x+=1
return len(nums) | 21.3125 | 39 | 0.384164 |
f727cc1948a85ac6d72771c8c995e728612019c7 | 4,358 | py | Python | src/tequila/quantumchemistry/__init__.py | naomicurnow/tequila | 739a76222005558d348a428cf2ce7cb5dfe290de | [
"MIT"
] | 1 | 2021-01-11T18:40:47.000Z | 2021-01-11T18:40:47.000Z | src/tequila/quantumchemistry/__init__.py | kiminh/tequila | 464085265e125222c63e65446861e9c0a2428bab | [
"MIT"
] | null | null | null | src/tequila/quantumchemistry/__init__.py | kiminh/tequila | 464085265e125222c63e65446861e9c0a2428bab | [
"MIT"
] | null | null | null | import typing
from .qc_base import ParametersQC, QuantumChemistryBase
SUPPORTED_QCHEMISTRY_BACKENDS = ["base", "psi4"]
INSTALLED_QCHEMISTRY_BACKENDS = {"base": QuantumChemistryBase}
try:
from .psi4_interface import QuantumChemistryPsi4
INSTALLED_QCHEMISTRY_BACKENDS["psi4"] = QuantumChemistryPsi4
except ImportError:
pass
def show_available_modules():
print("Available QuantumChemistry Modules:")
for k in INSTALLED_QCHEMISTRY_BACKENDS.keys():
print(k)
def show_supported_modules():
print(SUPPORTED_QCHEMISTRY_BACKENDS)
def Molecule(geometry: str,
basis_set: str = None,
transformation: typing.Union[str, typing.Callable] = None,
backend: str = None,
guess_wfn=None,
*args,
**kwargs) -> QuantumChemistryBase:
"""
Parameters
----------
geometry
molecular geometry as string or as filename (needs to be in xyz format with .xyz ending)
basis_set
quantum chemistry basis set (sto-3g, cc-pvdz, etc)
transformation
The Fermion to Qubit Transformation (jordan-wigner, bravyi-kitaev, bravyi-kitaev-tree and whatever OpenFermion supports)
backend
quantum chemistry backend (psi4, pyscf)
guess_wfn
pass down a psi4 guess wavefunction to start the scf cycle from
can also be a filename leading to a stored wavefunction
args
kwargs
Returns
-------
The Fermion to Qubit Transformation (jordan-wigner, bravyi-kitaev, bravyi-kitaev-tree and whatever OpenFermion supports)
"""
keyvals = {}
for k, v in kwargs.items():
if k in ParametersQC.__dict__.keys():
keyvals[k] = v
parameters = ParametersQC(geometry=geometry, basis_set=basis_set, multiplicity=1, **keyvals)
if backend is None:
if "psi4" in INSTALLED_QCHEMISTRY_BACKENDS:
backend = "psi4"
elif "pyscf" in INSTALLED_QCHEMISTRY_BACKENDS:
backend = "pyscf"
else:
requirements = [key in kwargs for key in ["one_body_integrals", "two_body_integrals", "nuclear_repulsion", "n_orbitals"]]
if not all(requirements):
raise Exception("No quantum chemistry backends installed on your system\n"
"To use the base functionality you need to pass the following tensors via keyword\n"
"one_body_integrals, two_body_integrals, nuclear_repulsion, n_orbitals\n")
else:
backend = "base"
if backend not in SUPPORTED_QCHEMISTRY_BACKENDS:
raise Exception(str(backend) + " is not (yet) supported by tequila")
if backend not in INSTALLED_QCHEMISTRY_BACKENDS:
raise Exception(str(backend) + " was not found on your system")
if guess_wfn is not None and backend != 'psi4':
raise Exception("guess_wfn only works for psi4")
if basis_set is None and backend != "base":
raise Exception("no basis_set provided for backend={}".format(backend))
elif basis_set is None:
basis_set = "custom"
parameters.basis_set=basis_set
return INSTALLED_QCHEMISTRY_BACKENDS[backend.lower()](parameters=parameters, transformation=transformation, guess_wfn=guess_wfn, *args, **kwargs)
def MoleculeFromOpenFermion(molecule,
transformation: typing.Union[str, typing.Callable] = None,
backend: str = None,
*args,
**kwargs) -> QuantumChemistryBase:
"""
Initialize a tequila Molecule directly from an openfermion molecule object
Parameters
----------
molecule
The openfermion molecule
transformation
The Fermion to Qubit Transformation (jordan-wigner, bravyi-kitaev, bravyi-kitaev-tree and whatever OpenFermion supports)
backend
The quantum chemistry backend, can be None in this case
Returns
-------
The tequila molecule
"""
if backend is None:
return QuantumChemistryBase.from_openfermion(molecule=molecule, transformation=transformation, *args, **kwargs)
else:
INSTALLED_QCHEMISTRY_BACKENDS[backend].from_openfermion(molecule=molecule, transformation=transformation, *args,
**kwargs)
| 37.568966 | 149 | 0.652593 |
f72814a675df3867ed79d00435689d65ca7e9ffb | 1,041 | py | Python | autoio-interfaces/chemkin_io/tests/test__species_write_read.py | lpratalimaffei/autoio | 57be6e4882af1841153c19e7353e2531e64ce47f | [
"Apache-2.0"
] | null | null | null | autoio-interfaces/chemkin_io/tests/test__species_write_read.py | lpratalimaffei/autoio | 57be6e4882af1841153c19e7353e2531e64ce47f | [
"Apache-2.0"
] | 1 | 2022-02-15T19:35:14.000Z | 2022-02-15T19:35:14.000Z | autoio-interfaces/chemkin_io/tests/test__species_write_read.py | lpratalimaffei/autoio | 57be6e4882af1841153c19e7353e2531e64ce47f | [
"Apache-2.0"
] | 13 | 2020-06-24T05:21:11.000Z | 2021-05-05T19:58:30.000Z | """ tests chemkin_io.writer.mechanism.species_block
"""
from chemkin_io.writer.mechanism import species_block as writer
from chemkin_io.parser.species import names as parser
SPC_IDENT_DCT = {
'O': {'smiles': 'smiles_1',
'inchi': 'inchi_1',
'charge': '',
'mult': '',
'sens': ''},
'H': {'smiles': 'smiles_2',
'inchi': 'inchi_2',
'charge': '',
'mult': '',
'sens': ''}
}
SPC_NAMES_STRING_1 = (
'SPECIES \n\nO ! SMILES: smiles_1 ' +
'InChi: inchi_1 \nH ! SMILES: smiles_2 ' +
'InChi: inchi_2 \n\nEND \n\n\n'
)
SPC_NAMES_STRING_2 = 'OH \nHO2 \nC3H8 \nN2O'
SPC_NAMES_TUPLE = ('OH', 'HO2', 'C3H8', 'N2O')
def test__write_spc_names():
""" Tests the species names writing
"""
spc_str = writer(SPC_IDENT_DCT)
assert spc_str == SPC_NAMES_STRING_1
def test__read_spc_names():
""" Tests the parsing of species names
"""
spc_tuple = parser(SPC_NAMES_STRING_2)
assert spc_tuple == SPC_NAMES_TUPLE
| 26.025 | 63 | 0.588857 |
f72839c01680fa5e8dca84f89e02ed7c86a3f02b | 6,728 | py | Python | hw4/p2_Parsons_Ross.py | rp779/Python-COP-4045 | 2feabafef4a3ee04d593a35aa77f45b5d25d3754 | [
"MIT"
] | null | null | null | hw4/p2_Parsons_Ross.py | rp779/Python-COP-4045 | 2feabafef4a3ee04d593a35aa77f45b5d25d3754 | [
"MIT"
] | null | null | null | hw4/p2_Parsons_Ross.py | rp779/Python-COP-4045 | 2feabafef4a3ee04d593a35aa77f45b5d25d3754 | [
"MIT"
] | null | null | null | # Problem 2
# @author: Ross
import sys # sys.exit()
import testif # testif module
import turtle # Part A
def draw_leaf_straight(length, level):
"""PART A: The draw_leaf_straight() function takes two arguments (length and level) and returns a graphic that depicts a leaf drawn in turtle graphics. """
if level <= 0: # base cass
return
else: # recursive case
turtle.forward(length)
draw_leaf_straight(0.6*length, level-1) # draws all middle branches
turtle.left(45)
draw_leaf_straight(0.6*length, level-1) # draws all left branches
turtle.right(90)
draw_leaf_straight(0.6*length, level-1) # draws all left branches
turtle.left(45)
turtle.backward(length)
return
def strB(n, base=10):
""" PART B: strB converts n (which is in base 10) to any base between 2 and 26. This is done by checking a string containing 26 items, for the 26 possible bases the user can convert to. n is divided by base using integer division (//) and the remainder is collected (the remainder will always be less than the base) by searching in alpha_num_str. """
class BadBase(Exception):
""" BadBase is a subclass of the exception class. This class is used to raise an exception if a user enters a base that is not between 2 and 26, BadBase will be raised. """
pass
try:
if base < 2 or base > 26:
raise BadBase
except BadBase:
print('Base must be between 2 and 26. Exiting...')
sys.exit()
else:
# a string representation to allow for conversion to any base between 2 and 26.
alpha_num_str = '0123456789ABCDEFGHIJKLMNOPQ'
# base case - if the number is less than the base, just look for the number in alpha_num_str and return it.
if n < base:
return alpha_num_str[n]
else:
# recursive case - to convert any base 10 number to any base, the general algorithm is to find the remainder of n / base, followed by n / new quotient. Continue doing this successively and collecting the remainders on each calculation. Then return the remainders in reverse order. In strB, the remainders are calculated by n % base and then searched for in alpha_num_str and concatenated together.
return strB(n // base, base) + alpha_num_str[n % base]
def Cnk_m(n, k):
""" PART C: Cnk_m returns a function that tests whether the n-choose-k values have been calculated yet and stores them in a dictionary so they can be returned instead re-calculated. """
# a dictionary that stores n-choose-k values e.g. { (n,k): value, (n,k):value } the key is a tuple - (n,k)
cache = dict()
def memoization_step(n, k):
""" inner function that runs recursively. """
if (n, k) not in cache: # first check these particular values of (n, k) have already been calculated.
if k == 0 or k == n: # base case
return 1
else: # recursive step. (n,k) have not been calculated and stored in the cache, so calculate them and store them in cache
cache[(n, k)] = memoization_step(
n-1, k-1) + memoization_step(n-1, k)
# if (n, k) have been calculated, simply return their value.
return cache[(n, k)]
return memoization_step(n, k)
def make_pairs(seq1, seq2, merge_list, accumulator=0):
""" PART D: make_pairs() takes in two sequences (seq1, seq2) and returns a list of tuples. Each tuple contains a value from seq1 whose index matches the value in seq2. The "accumulator" argument is set to a default value of zero. On each recursive call the accumulator is incremented by 1. merge_list is passed in as an argument because the list is mutable. """
# Get the smaller sequence
smaller = seq1 if len(seq1) <= len(seq2) else seq2
if accumulator == len(smaller): # base case
return merge_list
else: # recursive case
# append values from seq1 whose index matches the index in seq2.
merge_list.append((seq1[accumulator], seq2[accumulator]))
accumulator += 1
return make_pairs(seq1, seq2, merge_list, accumulator)
def main():
# Testing functionality of Part A: draw_leaf()
turtle.left(90)
turtle.speed(10)
draw_leaf_straight(120, 6)
turtle.done()
# Unit tests for Part B: strB()
testif.testif(strB(100, base=2) == '1100100',
'Test 1: 100 -> base 2', 'PASSED: 100 converted to base 2 = 1100100', 'FAILED')
testif.testif(strB(123456789, base=26) == 'AA44A1',
'Test 2: 123456789 -> base 26', 'PASSED: 123456789 converted to base 26 = AA44A1', 'FAILED')
testif.testif(strB(1234, base=10) == '1234',
'Test 3: 1234 -> base 10', 'PASSED: 1234 converted to base 10 = 1234', 'FAILED')
testif.testif(strB(100, base=16) == '64',
'Test 4: 100 -> base 16', 'PASSED: 100 converted to base 16 = 64', 'FAILED')
# Unit tests for Part C: Cnk_m()
testif.testif(Cnk_m(10, 3) == 120, 'Test 1: n-choose-k : n=10, k=3',
"PASSED: 10-choose-3 is 120", 'FAILED')
testif.testif(Cnk_m(39, 12) == 3910797436, 'Test 2: n-choose-k : n=39, k=12',
"PASSED: 39-choose-12 is 3910797436", 'FAILED')
testif.testif(Cnk_m(20, 4) == 4845, 'Test 3: n-choose-k : n=20, k=4',
"PASSED: 20-choose-4 is 4845", 'FAILED')
testif.testif(Cnk_m(15, 8) == 6435, 'Test 4: n-choose-k : n=15, k=8',
"PASSED: 15-choose-8 is 6435", 'FAILED')
# Unit tests for Part D: make_pairs()
testif.testif(make_pairs([1, 2, 3], [4, 5, 6], []) == [(1, 4), (2, 5), (3, 6)],
'Test 1: make_pairs : seq1=[1,2,3], seq2=[4,5,6]', "PASSED: make_pairs([1,2,3], [4,5,6]) = [(1,4),(2,5),(3,6)]", 'FAILED')
testif.testif(make_pairs([2, 5, 8, 11], [4, 5, 6], []) == [(2, 4), (5, 5), (8, 6)],
'Test 2: make_pairs : seq1=[2,5,8,11], seq2=[4,5,6]', "PASSED: make_pairs([1,2,3], [4,5,6]) == [(1,4),(2,5),(3,6)]", 'FAILED')
testif.testif(make_pairs([], [99, 17, 4], []) == [
], 'Test 3: make_pairs : seq1=[], seq2=[99,17,4]', "PASSED: make_pairs([], [99,17,4]) == []", 'FAILED')
testif.testif(make_pairs([0, 3, 4, 9, 4, 5], [7, 8, 33], []) == [(0, 7), (3, 8), (4, 33)],
'Test 4: make_pairs: seq1 = [0,3,4,9,4,5] seq2 = [7,8,33]', "PASSED: make_pairs([1,2,3], [4,5,6]) == [(1,4),(2,5),(3,6)]", 'FAILED')
testif.testif(make_pairs([10, 11, 20], [2, 4, 6, 0], []) == [(10, 2), (11, 4), (20, 6)],
'Test 5: make_pairs : seq1=[10,11,20], seq2=[2,4,6,0]', "PASSED: make_pairs([10,11,20], [2,4,6,0]) == [(10,2),(11,4),(20,6)", 'FAILED')
main()
| 50.969697 | 410 | 0.609394 |
f728659e152b0ab0ac0b69c92e35428aa12d16bd | 1,546 | py | Python | scripts/practice/FB/DiameterofBinaryTree.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | scripts/practice/FB/DiameterofBinaryTree.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | 8 | 2020-09-05T16:04:31.000Z | 2022-02-27T09:57:51.000Z | scripts/practice/FB/DiameterofBinaryTree.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | """
Given the root of a binary tree, return the length of the diameter of the tree.
The diameter of a binary tree is the length of the longest path between any two nodes in a tree.
This path may or may not pass through the root.
The length of a path between two nodes is represented by the number of edges between them.
Example 1:
Input: root = [1,2,3,4,5]
Output: 3
Explanation: 3 is the length of the path [4,2,1,3] or [5,2,1,3].
Example 2:
Input: root = [1,2]
Output: 1
Constraints:
The number of nodes in the tree is in the range [1, 104].
-100 <= Node.val <= 100
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def diameterOfBinaryTree(self, root):
diameter = 0
def longest_path(node):
if not node:
return 0
nonlocal diameter
# recursively find the longest path in
# both left child and right child
left_path = longest_path(node.left)
right_path = longest_path(node.right)
# update the diameter if left_path plus right_path is larger
diameter = max(diameter, left_path + right_path)
# return the longest one between left_path and right_path;
# remember to add 1 for the path connecting the node and its parent
return max(left_path, right_path) + 1
longest_path(root)
return diameter
| 25.766667 | 97 | 0.641656 |
f728735a0a2cd2a637b30db6ca8659076398b7a8 | 4,417 | py | Python | examples/sac.py | vincentlui/unsupervised-goal-conditioned-rl | 4f2e6938e072cb52f8ee779a939fe7bf6a980d45 | [
"MIT"
] | null | null | null | examples/sac.py | vincentlui/unsupervised-goal-conditioned-rl | 4f2e6938e072cb52f8ee779a939fe7bf6a980d45 | [
"MIT"
] | null | null | null | examples/sac.py | vincentlui/unsupervised-goal-conditioned-rl | 4f2e6938e072cb52f8ee779a939fe7bf6a980d45 | [
"MIT"
] | null | null | null | from gym.envs.mujoco import HalfCheetahEnv
import argparse
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from envs.navigation2d.navigation2d import Navigation2d
from rlkit.envs.mujoco.ant import AntEnv
from rlkit.envs.mujoco.half_cheetah import HalfCheetahEnv
def experiment(variant, args):
expl_env, eval_env = get_env(str(args.env))
# expl_env = NormalizedBoxEnv(HalfCheetahEnv())
# eval_env = NormalizedBoxEnv(HalfCheetahEnv())
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
M = variant['layer_size']
qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
def get_env(name):
if name == 'test':
expl_env, eval_env = Navigation2d(), Navigation2d()
# expl_env.set_random_start_state(True)
# eval_env.set_random_start_state(True)
return NormalizedBoxEnv(expl_env), NormalizedBoxEnv(eval_env)
elif name == 'Ant':
return NormalizedBoxEnv(AntEnv(expose_all_qpos=False)), NormalizedBoxEnv(AntEnv(expose_all_qpos=True))
elif name == 'Half-cheetah':
return NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=False)), NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=False))
return NormalizedBoxEnv(gym.make(name)), NormalizedBoxEnv(gym.make(name))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('env', type=str,
help='environment')
args = parser.parse_args()
# noinspection PyTypeChecker
variant = dict(
algorithm="SAC",
version="normal",
layer_size=128,
replay_buffer_size=int(1E6),
algorithm_kwargs=dict(
num_epochs=3000,
num_eval_steps_per_epoch=2000,
num_trains_per_train_loop=200,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=200,
batch_size=128,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
)
setup_logger('name-of-experiment', variant=variant)
setup_logger('SAC' + '_' + args.env, variant=variant, snapshot_mode="gap_and_last",
snapshot_gap=100, )
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant, args)
| 32.962687 | 127 | 0.672628 |
f7289065c4d52fe80d6531156b36dfd941d57e04 | 2,152 | py | Python | migrations/versions/0004_notification_stats_date.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | migrations/versions/0004_notification_stats_date.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | migrations/versions/0004_notification_stats_date.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | """empty message
Revision ID: 0004_notification_stats_date
Revises: 0003_add_service_history
Create Date: 2016-04-20 13:59:01.132535
"""
# revision identifiers, used by Alembic.
revision = "0004_notification_stats_date"
down_revision = "0003_add_service_history"
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("uix_service_to_day", "notification_statistics")
op.alter_column("notification_statistics", "day", new_column_name="day_string")
op.add_column("notification_statistics", sa.Column("day", sa.Date(), nullable=True))
op.get_bind()
op.execute(
"UPDATE notification_statistics ns1 SET day = (SELECT to_date(day_string, 'YYYY-MM-DD') FROM notification_statistics ns2 WHERE ns1.id = ns2.id)"
)
op.alter_column("notification_statistics", "day", nullable=False)
op.create_index(
op.f("ix_notification_statistics_day"),
"notification_statistics",
["day"],
unique=False,
)
op.drop_column("notification_statistics", "day_string")
op.create_unique_constraint("uix_service_to_day", "notification_statistics", columns=["service_id", "day"])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_notification_statistics_day"), table_name="notification_statistics")
op.drop_constraint("uix_service_to_day", "notification_statistics")
op.alter_column("notification_statistics", "day", new_column_name="day_date")
op.add_column("notification_statistics", sa.Column("day", sa.String(), nullable=True))
op.get_bind()
op.execute(
"UPDATE notification_statistics ns1 SET day = (SELECT to_char(day_date, 'YYYY-MM-DD') FROM notification_statistics ns2 WHERE ns1.id = ns2.id)"
)
op.alter_column("notification_statistics", "day", nullable=False)
op.drop_column("notification_statistics", "day_date")
op.create_unique_constraint("uix_service_to_day", "notification_statistics", columns=["service_id", "day"])
### end Alembic commands ###
| 36.474576 | 152 | 0.72816 |
f728c2249a621aec123829f2600362674d968847 | 2,941 | py | Python | experiments/utils.py | chandar-lab/IIRC | ae6ffcfc0a42274bcda66e2288e09118604620e4 | [
"MIT"
] | 23 | 2021-01-19T11:50:57.000Z | 2021-12-12T17:20:22.000Z | experiments/utils.py | chandar-lab/IIRC | ae6ffcfc0a42274bcda66e2288e09118604620e4 | [
"MIT"
] | 1 | 2021-04-06T14:35:03.000Z | 2021-06-20T08:56:15.000Z | experiments/utils.py | chandar-lab/IIRC | ae6ffcfc0a42274bcda66e2288e09118604620e4 | [
"MIT"
] | 8 | 2021-01-05T10:49:19.000Z | 2021-12-12T17:20:38.000Z | import numpy as np
import torch.nn as nn
import json
def log(epoch, task_id, log_dict, logbook):
log_dict["message"] = f"task_{task_id}_metrics"
log_dict["task_id"] = task_id
log_dict["task_epoch"] = epoch
log_dict["step"] = epoch
logbook.write_metric(log_dict)
def log_task(task_id, log_dict, logbook):
log_dict["message"] = f"incremental_metrics"
log_dict["task_id"] = task_id
log_dict["step"] = task_id
logbook.write_metric(log_dict)
def pad_random_crop(tensor_img, per_direction_padding=0):
pad_left = pad_right = pad_top = pad_bottom = per_direction_padding
tensor_width = tensor_img.shape[-1]
tensor_height = tensor_img.shape[-2]
tensor_img = nn.functional.pad(tensor_img,
[pad_left, pad_right, pad_top, pad_bottom])
start_index_width = np.random.randint(0, pad_left + pad_right)
start_index_height = np.random.randint(0, pad_top + pad_bottom)
end_index_width = start_index_width + tensor_width
end_index_height = start_index_height + tensor_height
return tensor_img[..., start_index_height:end_index_height, start_index_width:end_index_width]
def random_horizontal_flip(tensor_img, flip_prop=0.5):
do_flip = np.random.random() >= (1 - flip_prop)
if do_flip:
return tensor_img.flip((-1))
else:
return tensor_img
def remove_extra_logs(cur_task_id, cur_epoch, file):
logs_to_keep = []
remove_task_summary = False
with open(file, 'r') as logs_file:
for line in logs_file:
json_line = json.loads(line)
if not (json_line['logbook_type'] == "metric"):
logs_to_keep.append(json_line)
elif json_line["task_id"] < cur_task_id:
logs_to_keep.append(json_line)
elif json_line["task_id"] == cur_task_id:
if "task_epoch" in json_line.keys() and json_line["task_epoch"] < cur_epoch:
logs_to_keep.append(json_line)
elif "task_epoch" in json_line.keys() and json_line["task_epoch"] >= cur_epoch:
remove_task_summary = True
elif not remove_task_summary:
logs_to_keep.append(json_line)
with open(file, 'w') as logs_file:
for json_line in logs_to_keep:
logs_file.write(json.dumps(json_line))
logs_file.write("\n")
def extend_list(input_, output_length):
if isinstance(input_, int):
output = [input_ for _ in range(output_length)]
elif hasattr(input_, '__iter__'):
if len(input_) < output_length:
output = input_
output.extend([input_[-1] for _ in range(output_length - len(input_))])
elif len(input_) > output_length:
output = input_[:output_length]
else:
output = input_
else:
raise TypeError("Neither an integer nor an iterable was provided")
return output | 36.7625 | 98 | 0.652159 |
f728c391f0d3f70e7cfa1e9837dfcc22ca3a34d2 | 3,369 | py | Python | tests/PyPoE/poe/test_patchserver.py | Openarl/PyPoE | ab5377e3b16f1920d4d9ada443e1e9059715f0fb | [
"MIT"
] | 15 | 2017-09-19T05:40:42.000Z | 2021-04-23T00:59:24.000Z | tests/PyPoE/poe/test_patchserver.py | Openarl/PyPoE | ab5377e3b16f1920d4d9ada443e1e9059715f0fb | [
"MIT"
] | null | null | null | tests/PyPoE/poe/test_patchserver.py | Openarl/PyPoE | ab5377e3b16f1920d4d9ada443e1e9059715f0fb | [
"MIT"
] | 3 | 2018-02-14T00:02:09.000Z | 2020-07-26T15:18:55.000Z | """
Tests for PyPoE.poe.patchserver
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | tests/PyPoE/poe/test_patchserver.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: f728c391f0d3f70e7cfa1e9837dfcc22ca3a34d2 $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
Tests for patchserver.py
Agreement
===============================================================================
See PyPoE/LICENSE
TODO
===============================================================================
Testing on live data is difficult, since we can't verify it was downloaded
correctly as the contents of the files may change. Perhaps find a good
candidate for testing.
"""
# =============================================================================
# Imports
# =============================================================================
# Python
import os
import re
from urllib.error import HTTPError
from tempfile import TemporaryDirectory
# 3rd-party
import pytest
# self
from PyPoE.poe import patchserver
# =============================================================================
# Setup
# =============================================================================
_TEST_URL = 'Data/Wordlists.dat'
_re_version = re.compile(r'[\d]+\.[\d]+\.[\d]+\.[\d]+', re.UNICODE)
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture(scope='module')
def patch():
return patchserver.Patch()
# =============================================================================
# Tests
# =============================================================================
class TestPatch(object):
def test_dst_file(self, patch):
with TemporaryDirectory() as temp:
patch.download(
file_path=_TEST_URL,
dst_file=os.path.join(temp, 'test.txt'),
)
def test_dst_dir(self, patch):
with TemporaryDirectory() as temp:
patch.download(
file_path=_TEST_URL,
dst_dir=temp,
)
def test_missing_dst_error(self, patch):
with pytest.raises(ValueError):
patch.download(
file_path=_TEST_URL,
)
def test_file_not_found(self, patch):
with pytest.raises(HTTPError):
patch.download_raw(
file_path='THIS_SHOULD_NOT_EXIST.FILE',
)
def test_version(self, patch):
assert _re_version.match(patch.version) is not None, 'patch.version ' \
'result is expected to match the x.x.x.x format'
| 33.356436 | 122 | 0.344613 |
f728d6871fb41255044213a71f7eb015371377f1 | 4,096 | py | Python | tempest/api/compute/admin/test_security_groups.py | vmahuli/tempest | f70319f5eda72b8c8a913ae1002ec531324e4116 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/admin/test_security_groups.py | vmahuli/tempest | f70319f5eda72b8c8a913ae1002ec531324e4116 | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/admin/test_security_groups.py | vmahuli/tempest | f70319f5eda72b8c8a913ae1002ec531324e4116 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setUpClass(cls):
super(SecurityGroupsTestAdminJSON, cls).setUpClass()
cls.adm_client = cls.os_adm.security_groups_client
cls.client = cls.security_groups_client
def _delete_security_group(self, securitygroup_id, admin=True):
if admin:
resp, _ = self.adm_client.delete_security_group(securitygroup_id)
else:
resp, _ = self.client.delete_security_group(securitygroup_id)
self.assertEqual(202, resp.status)
@testtools.skipIf(CONF.service_available.neutron,
"Skipped because neutron do not support all_tenants"
"search filter.")
@test.attr(type='smoke')
def test_list_security_groups_list_all_tenants_filter(self):
# Admin can list security groups of all tenants
# List of all security groups created
security_group_list = []
# Create two security groups for a non-admin tenant
for i in range(2):
name = data_utils.rand_name('securitygroup-')
description = data_utils.rand_name('description-')
resp, securitygroup = (self.client
.create_security_group(name, description))
self.assertEqual(200, resp.status)
self.addCleanup(self._delete_security_group,
securitygroup['id'], admin=False)
security_group_list.append(securitygroup)
client_tenant_id = securitygroup['tenant_id']
# Create two security groups for admin tenant
for i in range(2):
name = data_utils.rand_name('securitygroup-')
description = data_utils.rand_name('description-')
resp, adm_securitygroup = (self.adm_client
.create_security_group(name,
description))
self.assertEqual(200, resp.status)
self.addCleanup(self._delete_security_group,
adm_securitygroup['id'])
security_group_list.append(adm_securitygroup)
# Fetch all security groups based on 'all_tenants' search filter
param = {'all_tenants': 'true'}
resp, fetched_list = self.adm_client.list_security_groups(params=param)
self.assertEqual(200, resp.status)
sec_group_id_list = map(lambda sg: sg['id'], fetched_list)
# Now check if all created Security Groups are present in fetched list
for sec_group in security_group_list:
self.assertIn(sec_group['id'], sec_group_id_list)
# Fetch all security groups for non-admin user with 'all_tenants'
# search filter
resp, fetched_list = self.client.list_security_groups(params=param)
self.assertEqual(200, resp.status)
# Now check if all created Security Groups are present in fetched list
for sec_group in fetched_list:
self.assertEqual(sec_group['tenant_id'], client_tenant_id,
"Failed to get all security groups for "
"non admin user.")
class SecurityGroupsTestAdminXML(SecurityGroupsTestAdminJSON):
_interface = 'xml'
| 42.666667 | 79 | 0.654541 |
f728fdc393576c7d300b95276d9f3b1aeee7cd65 | 16,567 | py | Python | toolium/driver_wrapper.py | Telefonica/toolium | 3921cf94164ae1a2cd27d94197f0b145f2498541 | [
"Apache-2.0"
] | 94 | 2016-02-15T11:32:36.000Z | 2022-02-14T12:31:42.000Z | toolium/driver_wrapper.py | Telefonica/toolium | 3921cf94164ae1a2cd27d94197f0b145f2498541 | [
"Apache-2.0"
] | 225 | 2016-03-18T16:14:21.000Z | 2022-03-30T10:21:26.000Z | toolium/driver_wrapper.py | Telefonica/toolium | 3921cf94164ae1a2cd27d94197f0b145f2498541 | [
"Apache-2.0"
] | 65 | 2016-05-12T13:23:56.000Z | 2022-02-16T08:33:18.000Z | # -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging.config
import os
import screeninfo
from toolium.config_driver import ConfigDriver
from toolium.config_parser import ExtendedConfigParser
from toolium.driver_wrappers_pool import DriverWrappersPool
from toolium.utils.driver_utils import Utils
from toolium.utils.path_utils import get_valid_filename
class DriverWrapper(object):
"""Wrapper with the webdriver and the configuration needed to execute tests
:type driver: selenium.webdriver.remote.webdriver.WebDriver or appium.webdriver.webdriver.WebDriver
:type config: toolium.config_parser.ExtendedConfigParser or configparser.ConfigParser
:type utils: toolium.utils.driver_utils.Utils
:type app_strings: dict
:type session_id: str
:type remote_node: str
:type remote_node_video_enabled: bool
:type logger: logging.Logger
:type config_properties_filenames: str
:type config_log_filename: str
:type output_log_filename: str
:type visual_baseline_directory: str
:type baseline_name: str
"""
driver = None #: webdriver instance
config = ExtendedConfigParser() #: driver configuration
utils = None #: test utils instance
app_strings = None #: mobile application strings
session_id = None #: remote webdriver session id
server_type = None #: remote server type
remote_node = None #: remote grid node
remote_node_video_enabled = False #: True if the remote grid node has the video recorder enabled
logger = None #: logger instance
# Configuration and output files
config_properties_filenames = None #: configuration filenames separated by commas
config_log_filename = None #: configuration log file
output_log_filename = None #: output log file
visual_baseline_directory = None #: folder with the baseline images
baseline_name = None #: baseline name
def __init__(self):
if not DriverWrappersPool.is_empty():
# Copy config object and other properties from default driver
default_wrapper = DriverWrappersPool.get_default_wrapper()
self.config = default_wrapper.config.deepcopy()
self.logger = default_wrapper.logger
self.config_properties_filenames = default_wrapper.config_properties_filenames
self.config_log_filename = default_wrapper.config_log_filename
self.output_log_filename = default_wrapper.output_log_filename
self.visual_baseline_directory = default_wrapper.visual_baseline_directory
self.baseline_name = default_wrapper.baseline_name
# Create utils instance and add wrapper to the pool
self.utils = Utils(self)
DriverWrappersPool.add_wrapper(self)
def configure_logger(self, tc_config_log_filename=None, tc_output_log_filename=None):
"""Configure selenium instance logger
:param tc_config_log_filename: test case specific logging config file
:param tc_output_log_filename: test case specific output logger file
"""
# Get config logger filename
config_log_filename = DriverWrappersPool.get_configured_value('Config_log_filename', tc_config_log_filename,
'logging.conf')
config_log_filename = os.path.join(DriverWrappersPool.config_directory, config_log_filename)
# Configure logger only if logging filename has changed
if self.config_log_filename != config_log_filename:
# Get output logger filename
output_log_filename = DriverWrappersPool.get_configured_value('Output_log_filename', tc_output_log_filename,
'toolium.log')
output_log_filename = os.path.join(DriverWrappersPool.output_directory, output_log_filename)
output_log_filename = output_log_filename.replace('\\', '\\\\')
try:
logging.config.fileConfig(config_log_filename, {'logfilename': output_log_filename}, False)
except Exception as exc:
print("[WARN] Error reading logging config file '{}': {}".format(config_log_filename, exc))
self.config_log_filename = config_log_filename
self.output_log_filename = output_log_filename
self.logger = logging.getLogger(__name__)
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None):
"""Configure selenium instance properties
:param tc_config_prop_filenames: test case specific properties filenames
:param behave_properties: dict with behave user data properties
"""
prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames,
'properties.cfg;local-properties.cfg')
prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in
prop_filenames.split(';')]
prop_filenames = ';'.join(prop_filenames)
# Configure config only if properties filename has changed
if self.config_properties_filenames != prop_filenames:
# Initialize the config object
self.config = ExtendedConfigParser.get_config_from_file(prop_filenames)
self.config_properties_filenames = prop_filenames
# Override properties with system properties
self.config.update_properties(os.environ)
# Override properties with behave userdata properties
if behave_properties:
self.config.update_properties(behave_properties)
# Modify config properties before driver creation
self.finalize_properties_configuration()
def finalize_properties_configuration(self):
# Override method if config properties (self.config object) need custom modifications before driver creation
pass
def configure_visual_baseline(self):
"""Configure baseline directory"""
# Get baseline name and translate config variables
baseline_name = self.config.get_optional('VisualTests', 'baseline_name', '{Driver_type}')
baseline_name = self.config.translate_config_variables(baseline_name)
# Configure baseline directory if baseline name has changed
if self.baseline_name != baseline_name:
self.baseline_name = baseline_name
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
get_valid_filename(baseline_name))
def update_visual_baseline(self):
"""Configure baseline directory after driver is created"""
# Update baseline with real platformVersion value
if '{PlatformVersion}' in self.baseline_name:
try:
platform_version = self.driver.desired_capabilities['platformVersion']
except KeyError:
platform_version = None
self.baseline_name = self.baseline_name.replace('{PlatformVersion}', str(platform_version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with real version value
if '{Version}' in self.baseline_name:
try:
splitted_version = self.driver.desired_capabilities['version'].split('.')
version = '.'.join(splitted_version[:2])
except KeyError:
version = None
self.baseline_name = self.baseline_name.replace('{Version}', str(version))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
# Update baseline with remote node value
if '{RemoteNode}' in self.baseline_name:
self.baseline_name = self.baseline_name.replace('{RemoteNode}', str(self.remote_node))
self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory,
self.baseline_name)
def configure(self, tc_config_files, is_selenium_test=True, behave_properties=None):
"""Configure initial selenium instance using logging and properties files for Selenium or Appium tests
:param tc_config_files: test case specific config files
:param is_selenium_test: true if test is a selenium or appium test case
:param behave_properties: dict with behave user data properties
"""
# Configure config and output directories
DriverWrappersPool.configure_common_directories(tc_config_files)
# Configure logger
self.configure_logger(tc_config_files.config_log_filename, tc_config_files.output_log_filename)
# Initialize the config object
self.configure_properties(tc_config_files.config_properties_filenames, behave_properties)
# Configure visual directories
if is_selenium_test:
driver_info = self.config.get('Driver', 'type')
DriverWrappersPool.configure_visual_directories(driver_info)
self.configure_visual_baseline()
def connect(self, maximize=True):
"""Set up the selenium driver and connect to the server
:param maximize: True if the driver should be maximized
:returns: selenium driver
"""
if not self.config.get('Driver', 'type') or self.config.get('Driver', 'type') in ['api', 'no_driver']:
return None
self.driver = ConfigDriver(self.config, self.utils).create_driver()
# Save session id and remote node to download video after the test execution
self.session_id = self.driver.session_id
self.server_type, self.remote_node = self.utils.get_remote_node()
if self.server_type == 'grid':
self.remote_node_video_enabled = self.utils.is_remote_video_enabled(self.remote_node)
else:
self.remote_node_video_enabled = True if self.server_type in ['ggr', 'selenoid'] else False
# Save app_strings in mobile tests
if self.is_mobile_test() and not self.is_web_test() and self.config.getboolean_optional('Driver',
'appium_app_strings'):
self.app_strings = self.driver.app_strings()
if self.is_maximizable():
# Bounds and screen
bounds_x, bounds_y = self.get_config_window_bounds()
self.driver.set_window_position(bounds_x, bounds_y)
self.logger.debug('Window bounds: %s x %s', bounds_x, bounds_y)
# Maximize browser
if maximize:
# Set window size or maximize
window_width = self.config.get_optional('Driver', 'window_width')
window_height = self.config.get_optional('Driver', 'window_height')
if window_width and window_height:
self.driver.set_window_size(window_width, window_height)
else:
self.driver.maximize_window()
# Log window size
window_size = self.utils.get_window_size()
self.logger.debug('Window size: %s x %s', window_size['width'], window_size['height'])
# Update baseline
self.update_visual_baseline()
# Discard previous logcat logs
self.utils.discard_logcat_logs()
# Set implicitly wait timeout
self.utils.set_implicitly_wait()
return self.driver
def get_config_window_bounds(self):
"""Reads bounds from config and, if monitor is specified, modify the values to match with the specified monitor
:return: coords X and Y where set the browser window.
"""
bounds_x = int(self.config.get_optional('Driver', 'bounds_x') or 0)
bounds_y = int(self.config.get_optional('Driver', 'bounds_y') or 0)
monitor_index = int(self.config.get_optional('Driver', 'monitor') or -1)
if monitor_index > -1:
try:
monitor = screeninfo.get_monitors()[monitor_index]
bounds_x += monitor.x
bounds_y += monitor.y
except NotImplementedError:
self.logger.warning('Current environment doesn\'t support get_monitors')
return bounds_x, bounds_y
def is_android_test(self):
"""Check if actual test must be executed in an Android mobile
:returns: True if test must be executed in an Android mobile
"""
return self.utils.get_driver_name() == 'android'
def is_ios_test(self):
"""Check if actual test must be executed in an iOS mobile
:returns: True if test must be executed in an iOS mobile
"""
return self.utils.get_driver_name() in ('ios', 'iphone')
def is_mobile_test(self):
"""Check if actual test must be executed in a mobile
:returns: True if test must be executed in a mobile
"""
return self.is_android_test() or self.is_ios_test()
def is_web_test(self):
"""Check if actual test must be executed in a browser
:returns: True if test must be executed in a browser
"""
appium_browser_name = self.config.get_optional('AppiumCapabilities', 'browserName')
return not self.is_mobile_test() or appium_browser_name not in (None, '')
def is_android_web_test(self):
"""Check if actual test must be executed in a browser of an Android mobile
:returns: True if test must be executed in a browser of an Android mobile
"""
return self.is_android_test() and self.is_web_test()
def is_ios_web_test(self):
"""Check if actual test must be executed in a browser of an iOS mobile
:returns: True if test must be executed in a browser of an iOS mobile
"""
return self.is_ios_test() and self.is_web_test()
def is_maximizable(self):
"""Check if the browser is maximizable
:returns: True if the browser is maximizable
"""
return not self.is_mobile_test()
def should_reuse_driver(self, scope, test_passed, context=None):
"""Check if the driver should be reused
:param scope: execution scope (function, module, class or session)
:param test_passed: True if the test has passed
:param context: behave context
:returns: True if the driver should be reused
"""
reuse_driver = self.config.getboolean_optional('Driver', 'reuse_driver')
reuse_driver_session = self.config.getboolean_optional('Driver', 'reuse_driver_session')
restart_driver_after_failure = (self.config.getboolean_optional('Driver', 'restart_driver_after_failure') or
self.config.getboolean_optional('Driver', 'restart_driver_fail'))
if context and scope == 'function':
reuse_driver = reuse_driver or (hasattr(context, 'reuse_driver_from_tags')
and context.reuse_driver_from_tags)
return (((reuse_driver and scope == 'function') or (reuse_driver_session and scope != 'session'))
and (test_passed or not restart_driver_after_failure))
def get_driver_platform(self):
"""
Get driver platform where tests are running
:return: platform name
"""
platform = ''
if 'platform' in self.driver.desired_capabilities:
platform = self.driver.desired_capabilities['platform']
elif 'platformName' in self.driver.desired_capabilities:
platform = self.driver.desired_capabilities['platformName']
return platform
| 46.536517 | 120 | 0.669584 |
f72945b47d67acc2ed74ec645c6eaf015a73bd05 | 4,214 | py | Python | nicos_mlz/sans1/setups/tisane_multifg.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/sans1/setups/tisane_multifg.py | ess-dmsc/nicos | 755d61d403ff7123f804c45fc80c7ff4d762993b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/sans1/setups/tisane_multifg.py | mlz-ictrl/nicos | a6de0bc194ba42e3dc04a033713b41b5499ba8e1 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'frequency counter, fg1 and fg2'
excludes = ['frequency']
# group = 'lowlevel'
tango_base = 'tango://sans1hw.sans1.frm2:10000/sans1/tisane'
ARMING_STRING_FC = (
':FUNC "FREQ";'
':CALC:AVER 1;'
':CALC:SMO:RESP FAST;'
':CALC:SMO 1;'
':INP:COUP AC;'
':INP:FILT 0;'
':INP:IMP +1.00000000E+006;'
':INP:LEV -1.80000000E+000;'
':INP:LEV:REL +50;'
':INP:LEV:AUTO 0;'
':INP:NREJ 1;'
':INP:PROB +1;'
':INP:RANG +5.00000000E+000;'
':INP:SLOP POS;'
':MMEM:CDIR "INT:\\";'
':OUTP:POL NORM;'
':OUTP 0;'
':SAMP:COUN +1;'
':FREQ:GATE:POL NEG;'
':FREQ:GATE:SOUR TIME;'
':FREQ:GATE:TIME +1.00000000000000E-001;'
':TRIG:COUN +1;'
':TRIG:DEL +0.00000000000000E+000;'
':TRIG:SLOP NEG;'
':TRIG:SOUR IMM;'
)
ARMING_STRING = (
'*RST;'
':SOUR1:FUNC:SHAP SQU;'
':SOUR1:FREQ 5;'
':SOUR1:VOLT 2.4;'
':SOUR1:VOLT:UNIT VPP;'
':SOUR1:VOLT:OFFS 1.3;'
':SOUR1:FUNCtion:SQU:DCYCle 50;'
':SOUR1:AM:STATe OFF;'
':SOUR1:SWEep:STATe OFF;'
':SOUR1:BURSt:MODE TRIG;'
':OUTP1:LOAD 50;'
':OUTP1:POL NORM;'
':TRIG1:SOUR EXT;'
':SOUR1:BURSt:NCYCles 9.9E37;'
':SOUR2:FUNC:SHAP SQU;'
':SOUR2:FREQ 10;'
':SOUR2:VOLT 5;'
':SOUR2:VOLT:UNIT VPP;'
':SOUR2:VOLT:OFFS 1.3;'
':SOUR2:FUNCtion:SQU:DCYCle 50;'
':SOUR2:AM:STATe OFF;'
':SOUR2:SWEep:STATe OFF;'
':SOUR2:BURSt:MODE TRIG;'
':OUTP2:LOAD 50;'
':OUTP2:POL NORM;'
':TRIG2:SOUR EXT;'
':SOUR2:BURSt:NCYCles 9.9E37;'
':SOUR1:BURSt:STATe ON;'
':SOUR2:BURSt:STATe ON;'
':OUTP1 ON;'
':OUTP2 ON;'
)
OFF_STRING = (
':OUTP1 OFF;'
':OUTP2 OFF;'
':SOUR1:BURSt:STATe OFF;'
':SOUR2:BURSt:STATe OFF;'
)
devices = dict(
tisane_fc = device('nicos.devices.entangle.Sensor',
description = "Frequency counter for chopper signal",
tangodevice = "%s/fc1_frequency" % tango_base,
unit = "Hz",
pollinterval = 1,
fmtstr = '%.6f',
),
tisane_fc_trigger = device('nicos_mlz.devices.io_trigger.Trigger',
description = "String blasting device",
tangodevice = "%s/fc1_io" % tango_base,
lowlevel = True,
safesetting = 'idle',
strings = {'idle' : '',
'arm' : ARMING_STRING_FC,
}
),
# tisane_fg1_sample = device('nicos_mlz.sans1.devices.tisane.Burst',
# description = "Signal-generator for sample tisane signal",
# tangodevice = "%s_multifg/ch1_burst" % tango_base,
# frequency = 1000,
# amplitude = 2.5,
# offset = 1.3,
# shape = 'square',
# duty = 50,
# mapping = dict(On = 1, Off = 0),
# ),
# tisane_fg2_det = device('nicos_mlz.sans1.devices.tisane.Burst',
# description = "Signal-generator for detector tisane signal",
# tangodevice = "%s_multifg/ch2_burst" % tango_base,
# frequency = 1000,
# amplitude = 5.0,
# offset = 1.3,
# shape = 'square',
# duty = 50,
# mapping = dict(On = 1, Off = 0),
# ),
tisane_fg_multi = device('nicos_mlz.devices.io_trigger.Trigger',
description = "String blasting device",
tangodevice = "%s_multifg/io" % tango_base,
safesetting = 'idle',
strings = {'idle' : '',
'arm' : ARMING_STRING,
'off' : OFF_STRING,
}
),
)
| 32.921875 | 71 | 0.44803 |
f72951655290a417050aa4a7bd1e7df075ef3d8e | 2,724 | py | Python | cloudify_nsx/security/group_dynamic_member.py | cloudify-cosmo/cloudify-nsx-plugin | d7b2abbe384e55aaf47b2c8474ab07f622eb83b5 | [
"Apache-2.0"
] | 2 | 2017-03-08T21:44:54.000Z | 2019-01-16T06:00:16.000Z | cloudify_nsx/security/group_dynamic_member.py | cloudify-cosmo/cloudify-nsx-plugin | d7b2abbe384e55aaf47b2c8474ab07f622eb83b5 | [
"Apache-2.0"
] | 3 | 2017-01-26T13:26:31.000Z | 2017-02-03T14:51:45.000Z | cloudify_nsx/security/group_dynamic_member.py | cloudify-cosmo/cloudify-nsx-plugin | d7b2abbe384e55aaf47b2c8474ab07f622eb83b5 | [
"Apache-2.0"
] | 5 | 2016-12-28T15:26:02.000Z | 2017-01-30T08:46:10.000Z | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
import cloudify_nsx.library.nsx_security_group as nsx_security_group
import cloudify_nsx.library.nsx_common as common
@operation
def create(**kwargs):
kwargs = common.get_properties_update(
'dynamic_member', "security_group_id", kwargs,
target_relationship="cloudify.nsx.relationships.contained_in",
target_property="resource_id"
)
validation_rules = {
"security_group_id": {
"required": True
},
# dynamic member definition
"dynamic_set": {
"required": True
}
}
use_existing, dynamic_member = common.get_properties_and_validate(
'dynamic_member', kwargs, validation_rules
)
resource_id = ctx.instance.runtime_properties.get('resource_id')
if resource_id:
ctx.logger.info("Reused %s" % resource_id)
return
# credentials
client_session = common.nsx_login(kwargs)
resource_id = nsx_security_group.set_dynamic_member(
client_session,
dynamic_member['security_group_id'],
dynamic_member['dynamic_set']
)
ctx.instance.runtime_properties['resource_id'] = resource_id
ctx.logger.info("created %s" % (
resource_id
))
@operation
def delete(**kwargs):
use_existing, dynamic_member = common.get_properties(
'dynamic_member', kwargs
)
if use_existing:
common.remove_properties('dynamic_member')
ctx.logger.info("Used existed")
return
resource_id = ctx.instance.runtime_properties.get('resource_id')
if not resource_id:
common.remove_properties('dynamic_member')
ctx.logger.info("Not fully created, skip")
return
# credentials
client_session = common.nsx_login(kwargs)
common.attempt_with_rerun(
nsx_security_group.del_dynamic_member,
client_session=client_session,
security_group_id=resource_id
)
ctx.logger.info("delete %s" % resource_id)
common.remove_properties('dynamic_member')
| 29.608696 | 79 | 0.694934 |
f7295938f7a5c8eb813df1e080d84cb28c749497 | 1,628 | py | Python | rally/cli/commands/info.py | LorenzoBianconi/rally | 2bbd7ee590cca048fb4ad6a8eefc484989979ff8 | [
"Apache-2.0"
] | 1 | 2021-09-29T02:16:09.000Z | 2021-09-29T02:16:09.000Z | rally/cli/commands/info.py | noah8713/rally-ovs | 2434787c2cf4ca267108966c4ddc55ded3c333d9 | [
"Apache-2.0"
] | 1 | 2020-07-14T11:29:31.000Z | 2020-07-14T11:29:31.000Z | rally/cli/commands/info.py | noah8713/rally-ovs | 2434787c2cf4ca267108966c4ddc55ded3c333d9 | [
"Apache-2.0"
] | 1 | 2020-07-02T01:33:48.000Z | 2020-07-02T01:33:48.000Z | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from rally.cli import cliutils
from rally.cli.commands import plugin
class InfoCommands(object):
"""[Deprecated since 0.1.1] Allows you to get quick doc of rally entities.
"""
@cliutils.args("--query", dest="query", type=str, help="Search query.")
def find(self, query):
"""Search for an entity that matches the query and print info about it.
:param query: search query.
"""
print("This command was deprecated, and will be removed in 0.2.0 use:")
print("rally plugin show %s" % query)
plugin.PluginCommands().show(query)
return 1
def list(self):
"""List main entities in Rally for which rally info find works.
Lists task scenario groups, deploy engines and server providers.
"""
print("This command was deprecated, and will be removed in 0.2.0 use:")
print("rally plugin list")
plugin.PluginCommands().list()
return 1
| 33.22449 | 79 | 0.675676 |
f7297e07cf922dfa791b82c22a538dc4f2b6e22c | 1,588 | py | Python | misc/zkbreaker.py | hubo1016/vlcp | 61c4c2595b610675ac0cbc4dbc46f70ec40090d3 | [
"Apache-2.0"
] | 252 | 2015-11-17T14:21:50.000Z | 2022-03-11T10:19:47.000Z | misc/zkbreaker.py | SarahZarei/vlcp | 61c4c2595b610675ac0cbc4dbc46f70ec40090d3 | [
"Apache-2.0"
] | 23 | 2018-01-09T13:28:52.000Z | 2019-12-12T06:11:44.000Z | misc/zkbreaker.py | SarahZarei/vlcp | 61c4c2595b610675ac0cbc4dbc46f70ec40090d3 | [
"Apache-2.0"
] | 37 | 2016-08-03T04:42:22.000Z | 2021-12-30T16:57:10.000Z | '''
Created on 2016/10/25
:author: hubo
'''
from vlcp.config import config
from vlcp.protocol.zookeeper import ZooKeeper
import vlcp.protocol.zookeeper
from random import random
from vlcp.event.core import syscall_clearqueue
from logging import getLogger
_logger = getLogger(__name__)
@config('protocol.zookeeper')
class BreakingZooKeeper(ZooKeeper):
'''
This evil protocol breaks ZooKeeper connection from time to time to validate your client
and service code
'''
_default_senddrop = 0.001
_default_receivedrop = 0.01
async def _senddata(self, connection, data, container, priority = 0):
if random() < self.senddrop:
_logger.warning("Oops, I break a connection when sending")
await connection.reset(True)
return await ZooKeeper._senddata(self, connection, data, container, priority)
async def requests(self, connection, requests, container, callback=None, priority = 0):
def evil_callback(request, response):
if random() < self.receivedrop:
_logger.warning("Oops, I break a connection when receiving")
connection.subroutine(connection.reset(True), False)
connection.subroutine(connection.syscall_noreturn(syscall_clearqueue(connection.scheduler.queue[('message', connection)])))
if callback:
callback(request, response)
return await ZooKeeper.requests(self, connection, requests, container, evil_callback, priority)
def patch_zookeeper():
vlcp.protocol.zookeeper.ZooKeeper = BreakingZooKeeper
| 34.521739 | 139 | 0.707809 |
f729b7bba4aa0803df14326f38b9ee5b1d94ee72 | 287 | py | Python | dev/ideas/cython/conversion.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | dev/ideas/cython/conversion.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | dev/ideas/cython/conversion.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | from brian2.codegen.runtime.cython_rt.extension_manager import cython_extension_manager
code = '''
def f(ns):
#cdef int n = <int> ns['n']
cdef int n = ns['n']
print n
'''
ns = {
'n':3,
}
mod = cython_extension_manager.create_extension(code)
mod.f(ns) | 19.133333 | 88 | 0.620209 |
f729beec659e8400f9ebf74b21ae35139ed63557 | 6,042 | py | Python | Pyrex/Distutils/build_ext.py | jwilk/Pyrex | 83dfbae1261788933472e3f9c501ad74c61a37c5 | [
"Apache-2.0"
] | 5 | 2019-05-26T20:48:36.000Z | 2021-07-09T01:38:38.000Z | Pyrex/Distutils/build_ext.py | jwilk/Pyrex | 83dfbae1261788933472e3f9c501ad74c61a37c5 | [
"Apache-2.0"
] | null | null | null | Pyrex/Distutils/build_ext.py | jwilk/Pyrex | 83dfbae1261788933472e3f9c501ad74c61a37c5 | [
"Apache-2.0"
] | 1 | 2022-02-10T07:14:58.000Z | 2022-02-10T07:14:58.000Z | """Pyrex.Distutils.build_ext
Implements a version of the Distutils 'build_ext' command, for
building Pyrex extension modules."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id:$"
import sys, os, string, re
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer, newer_group
from distutils import log
from distutils.dir_util import mkpath
try:
from Pyrex.Compiler.Main \
import CompilationOptions, \
default_options as pyrex_default_options, \
compile as pyrex_compile
from Pyrex.Compiler.Errors import PyrexError
except ImportError:
PyrexError = None
from distutils.command import build_ext as _build_ext
extension_name_re = _build_ext.extension_name_re
show_compilers = _build_ext.show_compilers
class build_ext(_build_ext.build_ext):
description = "build C/C++ and Pyrex extensions (compile/link to build directory)"
sep_by = _build_ext.build_ext.sep_by
user_options = _build_ext.build_ext.user_options
boolean_options = _build_ext.build_ext.boolean_options
help_options = _build_ext.build_ext.help_options
# Add the pyrex specific data.
user_options.extend([
('pyrex-cplus', None,
"generate C++ source files"),
('pyrex-create-listing', None,
"write errors to a listing file"),
('pyrex-include-dirs=', None,
"path to the Pyrex include files" + sep_by),
('pyrex-c-in-temp', None,
"put generated C files in temp directory"),
('pyrex-gen-pxi', None,
"generate .pxi file for public declarations"),
])
boolean_options.extend([
'pyrex-cplus', 'pyrex-create-listing', 'pyrex-c-in-temp'
])
def initialize_options(self):
_build_ext.build_ext.initialize_options(self)
self.pyrex_cplus = 0
self.pyrex_create_listing = 0
self.pyrex_include_dirs = None
self.pyrex_c_in_temp = 0
self.pyrex_gen_pxi = 0
def finalize_options (self):
_build_ext.build_ext.finalize_options(self)
if self.pyrex_include_dirs is None:
self.pyrex_include_dirs = []
elif type(self.pyrex_include_dirs) is StringType:
self.pyrex_include_dirs = \
string.split(self.pyrex_include_dirs, os.pathsep)
# finalize_options ()
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
ext.sources = self.pyrex_sources(ext.sources, ext)
self.build_extension(ext)
def pyrex_sources(self, sources, extension):
"""
Walk the list of source files in 'sources', looking for Pyrex
source (.pyx) files. Run Pyrex on all that are found, and return
a modified 'sources' list with Pyrex source files replaced by the
generated C (or C++) files.
"""
if PyrexError == None:
raise DistutilsPlatformError, \
("Pyrex does not appear to be installed "
"on platform '%s'") % os.name
new_sources = []
pyrex_sources = []
pyrex_targets = {}
# Setup create_list and cplus from the extension options if
# Pyrex.Distutils.extension.Extension is used, otherwise just
# use what was parsed from the command-line or the configuration file.
# cplus will also be set to true is extension.language is equal to
# 'C++' or 'c++'.
#try:
# create_listing = self.pyrex_create_listing or \
# extension.pyrex_create_listing
# cplus = self.pyrex_cplus or \
# extension.pyrex_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
#except AttributeError:
# create_listing = self.pyrex_create_listing
# cplus = self.pyrex_cplus or \
# (extension.language != None and \
# extension.language.lower() == 'c++')
create_listing = self.pyrex_create_listing or \
getattr(extension, 'pyrex_create_listing', 0)
cplus = self.pyrex_cplus or getattr(extension, 'pyrex_cplus', 0) or \
(extension.language and extension.language.lower() == 'c++')
pyrex_gen_pxi = self.pyrex_gen_pxi or getattr(extension, 'pyrex_gen_pxi', 0)
# Set up the include_path for the Pyres compiler:
# 1. Start with the command line option.
# 2. Add in any (unique) paths from the extension
# pyrex_include_dirs (if Pyrex.Distutils.extension is used).
# 3. Add in any (unique) paths from the extension include_dirs
includes = self.pyrex_include_dirs
try:
for i in extension.pyrex_include_dirs:
if not i in includes:
includes.append(i)
except AttributeError:
pass
for i in extension.include_dirs:
if not i in includes:
includes.append(i)
# Set the target_ext to '.c'. Pyrex will change this to '.cpp' if
# needed.
if cplus:
target_ext = '.cpp'
else:
target_ext = '.c'
# Decide whether to drop the generated C files into the temp dir
# or the source tree.
if not self.inplace and (self.pyrex_c_in_temp
or getattr(extension, 'pyrex_c_in_temp', 0)):
target_dir = os.path.join(self.build_temp, "pyrex")
else:
target_dir = ""
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".pyx": # Pyrex source file
new_sources.append(os.path.join(target_dir, base + target_ext))
pyrex_sources.append(source)
pyrex_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not pyrex_sources:
return new_sources
for source in pyrex_sources:
target = pyrex_targets[source]
# source_time = os.stat(source).st_mtime
# try:
# target_time = os.stat(target).st_mtime
# newer = source_time > target_time
# except EnvironmentError:
# newer = 1
# if newer:
if self.force or newer(source, target):
log.info("pyrexc %s --> %s", source, target)
self.mkpath(os.path.dirname(target))
options = CompilationOptions(pyrex_default_options,
use_listing_file = create_listing,
include_path = includes,
output_file = target,
cplus = cplus,
generate_pxi = pyrex_gen_pxi)
result = pyrex_compile(source, options=options)
return new_sources
# pyrex_sources ()
# class build_ext
| 30.984615 | 83 | 0.721119 |
f729ef60b68c40e555be435faed97dbc1fd4116d | 12,824 | py | Python | owtf/plugin/plugin_params.py | alienus/owtf | b6d81fac83c324c2b8c6fe2a974c036881c1fcd0 | [
"BSD-3-Clause"
] | null | null | null | owtf/plugin/plugin_params.py | alienus/owtf | b6d81fac83c324c2b8c6fe2a974c036881c1fcd0 | [
"BSD-3-Clause"
] | null | null | null | owtf/plugin/plugin_params.py | alienus/owtf | b6d81fac83c324c2b8c6fe2a974c036881c1fcd0 | [
"BSD-3-Clause"
] | null | null | null | """
owtf.plugin.plugin_params.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Manage parameters to the plugins
"""
import logging
from collections import defaultdict
from owtf.config import config_handler
from owtf.db.database import get_scoped_session
from owtf.managers.error import add_error
from owtf.utils.error import abort_framework
from owtf.utils.logger import logger
from owtf.utils.strings import merge_dicts
class PluginParams(object):
def __init__(self, options):
self.init = False
self.no_args = []
self.logger = logger
self.session = get_scoped_session()
self.logger.setup_logging()
def process_args(self):
"""Process args
:return: True if run is successful
:rtype: `bool`
"""
self.args = defaultdict(list)
for arg in self.raw_args:
if 'O' == arg:
continue
chunks = arg.split('=')
if len(chunks) < 2:
add_error(self.session, "USER ERROR: %s arguments should be in NAME=VALUE format" % str(chunks), 'user')
return False
arg_name = chunks[0]
try:
arg_val = arg.replace(arg_name, '')[1:]
except ValueError:
add_error(self.session, "USER ERROR: %s arguments should be in NAME=VALUE format" % str(arg_name), 'user')
return False
self.args[arg_name] = arg_val
return True
def list_args(self, args, mandatory=True):
"""List of available arguments
:param args: Args
:type args: `dict`
:param mandatory: True/false if mandatory to set
:type mandatory: `bool`
:return: None
:rtype: None
"""
logging.info("") # Newline
if mandatory:
logging.info("mandatory parameters:")
else:
logging.info("Optional parameters:")
for arg_name, arg_description in list(args.items()):
if arg_description is None:
arg_description = ""
logging.info("- %s%s%s" % (arg_name, (30 - len(arg_name)) * '_', arg_description.replace('\n', "\n")))
def get_args_example(self, full_args_list):
"""Arguments for an example plugin
:param full_args_list: Full list of args
:type full_args_list: `dict`
:return: Padded example
:rtype: `str`
"""
args_str = []
for key, value in list(merge_dicts(full_args_list['mandatory'], full_args_list['Optional']).items()):
args_str.append(key)
pad = '=? '
return pad.join(args_str) + pad
def show_param_info(self, full_args_list, plugin):
"""Show parameter info for a plugin
:param full_args_list: Full args list
:type full_args_list: `dict`
:param plugin: Plugin
:type plugin: `dict`
:return: None
:rtype: None
"""
logging.info("\nInformation for %s" % self.show_plugin(plugin))
logging.info("\nDescription: %s" % str(full_args_list['Description']))
self.list_args(full_args_list['mandatory'], True)
if len(full_args_list['Optional']) > 0:
self.list_args(full_args_list['Optional'], False)
logging.info("\nUsage: %s\n" % self.get_args_example(full_args_list))
abort_framework("User is only viewing options, exiting")
def show_plugin(self, plugin):
"""Show plugin info
:param plugin: Plugin dict
:type plugin: `dict`
:return: Formatted plugin string
:rtype: `str`
"""
return "plugin: {0}/{1}".format(plugin['type'], plugin['file'])
def default_arg_from_config(self, args, arg_name, settings_list):
"""Get default args from config
:param args: args list
:type args: `dict`
:param arg_name: Name of arg to fetch
:type arg_name: `str`
:param settings_list: List of settings
:type settings_list: `list`
:return: True if run is successful
:rtype: `bool`
"""
default_order_str = " (Default order is: %s)" % str(settings_list)
for setting in settings_list:
if config_handler.is_set(setting): # argument is set in config
args[arg_name] = config_handler.get_val(setting)
logging.info("default not passed '%s' to '%s'%s" % (arg_name, str(args[arg_name]), default_order_str))
return True
logging.info("Could not default not passed: '%s'%s" % (arg_name, default_order_str))
return False
def get_arg_list(self, session, arg_list, plugin, mandatory=True):
"""Get args list
:param arg_list: available args
:type arg_list: `dict`
:param plugin: Plugin info
:type plugin: `dict`
:param mandatory: Mandatory to list?
:type mandatory: `bool`
:return: available args for plugins
:rtype: `dict`
"""
if not self.init:
self.init = True
if not self.process_args(): # Process Passed arguments the first time only
return self.ret_arg_error({}, plugin) # Abort processing (invalid data)
args = {}
for arg_name in arg_list:
if arg_name not in self.args:
config_default_order = ["%s_%s_%s" % (plugin['code'], plugin['type'], arg_name),
'%s_%s' % (plugin['code'], arg_name), arg_name]
default = self.default_arg_from_config(args, arg_name, config_default_order)
if default or mandatory is False:
# The Parameter has been defaulted, must skip loop to avoid assignment at the bottom or
# argument is optional = ok to skip
continue
add_error(session, "USER ERROR: %s requires argument: '%s'" % (self.show_plugin(plugin), arg_name),
'user')
return self.ret_arg_error({}, plugin) # Abort processing (invalid data)
args[arg_name] = self.args[arg_name]
return args
def get_arg_error(self, plugin):
"""Set arg error
:param plugin: Plugin dict
:type plugin: `dict`
:return: Argument error for a plugin
:rtype: `bool`
"""
return plugin['argError']
def set_arg_error(self, plugin, error=True):
"""Set arg error for a plugin
:param plugin: Plugin dict
:type plugin: `dict`
:param error: Error or not
:type error: `bool`
:return: None
:rtype: None
"""
plugin['argError'] = error
def ret_arg_error(self, return_val, plugin):
"""Returns the arg error for a plugin
:param return_val: The return value
:type return_val: `bools`
:param plugin: Plugin dict
:type plugin: `dict`
:return: return val
:rtype: `str`
"""
self.set_arg_error(plugin, True)
return return_val
def check_arg_list(self, full_args_list, plugin):
"""Check args list for a plugin
:param full_args_list: Full args list
:type full_args_list: `dict`
:param plugin: Plugin dict
:type plugin: `dict`
:return: True if run successful
:rtype: `bool`
"""
if ('Mandatory' not in full_args_list) or ('Optional' not in full_args_list):
add_error("OWTF PLUGIN BUG: %s requires declared Mandatory and Optional arguments" %
self.show_plugin(plugin))
return self.ret_arg_error(True, plugin)
if 'Description' not in full_args_list:
add_error("OWTF PLUGIN BUG: %s requires a Description" % self.show_plugin(plugin))
return self.ret_arg_error(False, plugin)
return True
def set_args_basic(self, all_args, plugin):
"""Set basic required args
:param all_args: All available args
:type all_args: `dict`
:param plugin: Plugin dict
:type plugin: `dict`
:return: Replaced args list
:rtype: `list`
"""
if not all_args:
return self.no_args
args_str = []
for arg_name, arg_val in list(all_args.items()):
args_str.append(arg_name + "=" + str(self.args[arg_name]))
all_args[arg_name] = arg_val
plugin['args'] = ' '.join(args_str) # Record arguments in plugin dictionary
return [all_args]
def set_config(self, args):
"""Set config for args
:param args: Args to override
:type args: `dict`
:return: None
:rtype: None
"""
for arg_name, arg_val in list(args.items()):
logging.info("Overriding configuration setting '_%s' with value %s.." % (arg_name, str(arg_val)))
config_handler.set_general_val('string', '_%s' % arg_name, arg_val) # Pre-pend "_" to avoid naming collisions
def get_permutations(self, args):
"""Get permutations from args
:param args: Available args
:type args: `dict`
:return: List of permutations
:rtype: `defaultdict`
"""
permutations = defaultdict(list)
if 'REPEAT_DELIM' not in args:
return permutations # No permutations
separator = args['REPEAT_DELIM']
for arg_name, arg_val in list(args.items()):
if arg_name == 'REPEAT_DELIM':
continue # The repeat delimiter is not considered a permutation: It's the permutation delimiter :)
chunks = arg_val.split(separator)
if len(chunks) > 1:
permutations[arg_name] = chunks
return permutations
def set_permutation(self, arg_name, permutations, permutation_list):
"""Add a particular permutation for an arg
:param arg_name: Arg to replace
:type arg_name: `str`
:param permutations: List of permutations
:type permutations: `list`
:param permutation_list: Permutation list
:type permutation_list: `list`
:return: None
:rtype: None
"""
for i in range(0, len(permutation_list)):
count = 0
for perm in permutations:
perm_args = permutation_list[i].copy() # 1st copy by value original arguments
perm_args[arg_name] = perm # 2nd override argument with permutation
if count == 0: # Modify 1st existing record with permutation
permutation_list[i] = perm_args
else:
permutation_list.append(perm_args) # 3rd store each subsequent permutation as a different set of args
count += 1
def set_args(self, all_args, plugin):
"""Set args from all args for a plugin
:param all_args: All available args
:type all_args: `dict`
:param plugin: Plugin
:type plugin: `dict`
:return: List of permutations
:rtype: `list`
"""
arg_list = self.set_args_basic(all_args, plugin)
if not arg_list:
return arg_list # Nothing to do
args = arg_list[0]
permutation_list = [args]
for arg_name, permutations in list(self.get_permutations(args).items()):
self.set_permutation(arg_name, permutations, permutation_list)
if not permutation_list:
return arg_list # No permutations, return original arguments
return permutation_list
def get_args(self, session, full_args_list, plugin):
"""Get args from a full list for a plugin
:param full_args_list: available args
:type full_args_list: `dict`
:param plugin: Plugin
:type plugin: `dict`
:return: None
:rtype: None
"""
self.set_arg_error(plugin, False)
if not self.check_arg_list(full_args_list, plugin):
return self.no_args
if 'O' in self.raw_args: # To view available options
self.show_param_info(full_args_list, plugin)
return self.no_args # Abort processing, just showing options
mandatory = self.get_arg_list(session, full_args_list['Mandatory'], plugin, True)
optional = self.get_arg_list(session, full_args_list['Optional'], plugin, False)
if self.get_arg_error(plugin):
logging.info("")
logging.warn("ERROR: Aborting argument processing, please correct the errors above and try again")
logging.info("")
return self.no_args # Error processing arguments, must abort processing
all_args = merge_dicts(mandatory, optional)
return self.set_args(all_args, plugin)
plugin_params = PluginParams(options={})
| 37.497076 | 122 | 0.592171 |
f729f12176ee792d4c983261f3ac3717876984d3 | 1,212 | py | Python | eta/t/test_parser.py | lewismj/eta | a07a9c078f8e2c9e166febea0ee61351c25caaa8 | [
"BSD-2-Clause"
] | null | null | null | eta/t/test_parser.py | lewismj/eta | a07a9c078f8e2c9e166febea0ee61351c25caaa8 | [
"BSD-2-Clause"
] | null | null | null | eta/t/test_parser.py | lewismj/eta | a07a9c078f8e2c9e166febea0ee61351c25caaa8 | [
"BSD-2-Clause"
] | null | null | null | """
Basic unit tests for the parser class.
"""
import unittest
from eta.parser import parser
from eta.types import Symbol
from lark.visitors import VisitError
class ParserTest(unittest.TestCase):
def test_basic_expressions(self):
try:
parser.parse("(defun (foo x y) (+ x y))")
parser.parse("(+ 21 35 12 7)")
parser.parse("(/ 10 2)")
parser.parse("(defun (len xs) "
"(if (== xs nil) "
" (0) (+ 1 (len (tail xs)))))")
except VisitError:
self.fail("Failed to parse basic expressions.")
def test_expression_structure_basic(self):
ast = parser.parse("(+ 1 2)")
self.assertEqual(1, len(ast))
expression = ast[0]
self.assertEqual(Symbol("+"), expression[0])
self.assertEqual(1, expression[1])
self.assertEqual(2, expression[2])
def test_parsed_as_number(self):
ast = parser.parse("(-1)")
self.assertEqual(1, len(ast))
def make_suite():
return unittest.makeSuite(ParserTest, 'Parser test')
if __name__ == '__main__':
suite = make_suite()
runner = unittest.TextTestRunner()
runner.run(suite)
| 27.545455 | 59 | 0.584158 |
f72a1413848ce1eae8308309d1a389688e408204 | 7,059 | py | Python | tests/test_util.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import subprocess
from hashlib import sha1
from textwrap import dedent
from pex.common import safe_mkdir, safe_open, temporary_dir, touch
from pex.compatibility import to_bytes
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from pex.typing import TYPE_CHECKING, cast
from pex.util import CacheHelper, DistributionHelper, iter_pth_paths, named_temporary_file
try:
from unittest import mock
except ImportError:
import mock # type: ignore[no-redef,import]
if TYPE_CHECKING:
from typing import Any, Dict, List
def test_access_zipped_assets():
# type: (...) -> None
pex_third_party_asset_dir = DistributionHelper.access_zipped_assets("pex", "third_party")
resources = os.listdir(pex_third_party_asset_dir)
assert (
len(resources) > 0
), "The pex.third_party package should contain at least an __init__.py file."
resources.remove("__init__.py")
for path in resources:
assert path in (
"__init__.pyc",
"__init__.pyo",
"__pycache__",
), "Expected only __init__.py (and its compilations) in the pex.third_party package."
def test_hash():
# type: () -> None
empty_hash_digest = sha1().hexdigest()
with named_temporary_file() as fp:
fp.flush()
assert empty_hash_digest == CacheHelper.hash(fp.name)
with named_temporary_file() as fp:
string = b"asdf" * 1024 * sha1().block_size + b"extra padding"
fp.write(string)
fp.flush()
assert sha1(string).hexdigest() == CacheHelper.hash(fp.name)
with named_temporary_file() as fp:
empty_hash = sha1()
fp.write(b"asdf")
fp.flush()
hash_output = CacheHelper.hash(fp.name, digest=empty_hash)
assert hash_output == empty_hash.hexdigest()
def test_dir_hash():
# type: () -> None
with temporary_dir() as tmp_dir:
safe_mkdir(os.path.join(tmp_dir, "a", "b"))
with safe_open(os.path.join(tmp_dir, "c", "d", "e.py"), "w") as fp:
fp.write("contents1")
with safe_open(os.path.join(tmp_dir, "f.py"), "w") as fp:
fp.write("contents2")
hash1 = CacheHelper.dir_hash(tmp_dir)
os.rename(os.path.join(tmp_dir, "c"), os.path.join(tmp_dir, "c-renamed"))
assert hash1 != CacheHelper.dir_hash(tmp_dir)
os.rename(os.path.join(tmp_dir, "c-renamed"), os.path.join(tmp_dir, "c"))
assert hash1 == CacheHelper.dir_hash(tmp_dir)
touch(os.path.join(tmp_dir, "c", "d", "e.pyc"))
assert hash1 == CacheHelper.dir_hash(tmp_dir)
touch(os.path.join(tmp_dir, "c", "d", "e.pyc.123456789"))
assert hash1 == CacheHelper.dir_hash(tmp_dir)
pycache_dir = os.path.join(tmp_dir, "__pycache__")
safe_mkdir(pycache_dir)
touch(os.path.join(pycache_dir, "f.pyc"))
assert hash1 == CacheHelper.dir_hash(tmp_dir)
touch(os.path.join(pycache_dir, "f.pyc.123456789"))
assert hash1 == CacheHelper.dir_hash(tmp_dir)
touch(os.path.join(pycache_dir, "f.py"))
assert hash1 == CacheHelper.dir_hash(
tmp_dir
), "All content under __pycache__ directories should be ignored."
try:
import __builtin__ as python_builtins # type: ignore[import]
except ImportError:
import builtins as python_builtins # type: ignore[no-redef]
def assert_access_zipped_assets(distribution_helper_import):
# type: (str) -> bytes
test_executable = dedent(
"""
import os
{distribution_helper_import}
temp_dir = DistributionHelper.access_zipped_assets('my_package', 'submodule')
with open(os.path.join(temp_dir, 'mod.py'), 'r') as fp:
for line in fp:
print(line)
""".format(
distribution_helper_import=distribution_helper_import
)
)
with temporary_dir() as td1, temporary_dir() as td2:
pb = PEXBuilder(path=td1)
with open(os.path.join(td1, "exe.py"), "w") as fp:
fp.write(test_executable)
pb.set_executable(fp.name)
submodule = os.path.join(td1, "my_package", "submodule")
safe_mkdir(submodule)
mod_path = os.path.join(submodule, "mod.py")
with open(mod_path, "w") as fp:
fp.write("accessed")
pb.add_source(fp.name, "my_package/submodule/mod.py")
pb.add_source(None, "my_package/__init__.py")
pb.add_source(None, "my_package/submodule/__init__.py")
pex = os.path.join(td2, "app.pex")
pb.build(pex)
process = PEX(pex, interpreter=pb.interpreter).run(
blocking=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
assert process.returncode == 0
assert b"accessed\n" == stdout
return cast(bytes, stderr)
def test_access_zipped_assets_integration():
# type: () -> None
stderr = assert_access_zipped_assets("from pex.util import DistributionHelper")
assert b"" == stderr.strip()
def test_named_temporary_file():
# type: () -> None
with named_temporary_file() as fp:
name = fp.name
fp.write(b"hi")
fp.flush()
assert os.path.exists(name)
with open(name) as new_fp:
assert new_fp.read() == "hi"
assert not os.path.exists(name)
@mock.patch("os.path.exists", autospec=True, spec_set=True)
def test_iter_pth_paths(mock_exists):
# type: (Any) -> None
# Ensure path checking always returns True for dummy paths.
mock_exists.return_value = True
with temporary_dir() as tmpdir:
in_tmp = lambda f: os.path.join(tmpdir, f)
PTH_TEST_MAPPING = {
# A mapping of .pth file content -> expected paths.
"/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python\n": [
"/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
],
"relative_path\nrelative_path2\n\nrelative_path3": [
in_tmp("relative_path"),
in_tmp("relative_path2"),
in_tmp("relative_path3"),
],
"duplicate_path\nduplicate_path": [in_tmp("duplicate_path")],
"randompath\nimport nosuchmodule\n": [in_tmp("randompath")],
"import sys\nfoo\n/bar/baz": [in_tmp("foo"), "/bar/baz"],
"import nosuchmodule\nfoo": [],
"import nosuchmodule\n": [],
"import bad)syntax\n": [],
} # type: Dict[str, List[str]]
for i, pth_content in enumerate(PTH_TEST_MAPPING):
pth_tmp_path = os.path.abspath(os.path.join(tmpdir, "test%s.pth" % i))
with open(pth_tmp_path, "wb") as f:
f.write(to_bytes(pth_content))
assert sorted(PTH_TEST_MAPPING[pth_content]) == sorted(
list(iter_pth_paths(pth_tmp_path))
)
| 35.832487 | 93 | 0.632243 |
f72a14cfa2d25b558ea211b3bf08c71152c5cdcc | 8,959 | py | Python | src/sage/knots/knot.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | src/sage/knots/knot.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | src/sage/knots/knot.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | r"""
Knots
AUTHORS:
- Miguel Angel Marco Buzunariz
- Amit Jamadagni
"""
#*****************************************************************************
# Copyright (C) 2014 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.knots.link import Link
from sage.rings.integer import Integer
from sage.rings.finite_rings.integer_mod import Mod
class Knot(Link):
"""
A knot.
A knot is defined as embedding of the circle `\mathbb{S}^1` in the
3-dimensional sphere `\mathbb{S}^3`, considered up to ambient isotopy.
They represent the physical idea of a knotted rope, but with the
particularity that the rope is closed. That is, the ends of the rope
are joined.
.. SEEALSO::
:class:`Link`
INPUT:
- ``data`` -- see :class:`Link` for the allowable inputs
- ``check`` -- optional, default ``True``. If ``True``, make sure
that the data define a knot, not a link
EXAMPLES:
We construct the knot `8_{14}` and compute some invariants::
sage: B = BraidGroup(4)
sage: K = Knot(B([1,1,1,2,-1,2,-3,2,-3]))
.. PLOT::
:width: 300 px
B = BraidGroup(4)
K = Knot(B([1,1,1,2,-1,2,-3,2,-3]))
sphinx_plot(K.plot())
::
sage: K.alexander_polynomial()
-2*t^-2 + 8*t^-1 - 11 + 8*t - 2*t^2
sage: K.jones_polynomial()
t^7 - 3*t^6 + 4*t^5 - 5*t^4 + 6*t^3 - 5*t^2 + 4*t + 1/t - 2
sage: K.determinant()
31
sage: K.signature()
-2
REFERENCES:
- :wikipedia:`Knot_(mathematics)`
.. TODO::
- Make a class Knots for the monoid of all knots and have this be an
element in that monoid.
"""
def __init__(self, data, check=True):
"""
Initialize ``self``.
TESTS::
sage: B = BraidGroup(8)
sage: K = Knot(B([-1, -1, -1, 2, 1, -2, 3, -2, 3]))
sage: TestSuite(K).run()
sage: K = Knot(B([1, -2, 1, -2]))
sage: TestSuite(K).run()
sage: K = Knot([[1, 1, 2, 2]])
sage: TestSuite(K).run()
The following is not a knot: it has two components. ::
sage: Knot([[[1, 2], [-2, -1]], [1, -1]])
Traceback (most recent call last):
...
ValueError: the input has more than 1 connected component
sage: Knot([[[1, 2], [-2, -1]], [1, -1]], check=False)
Knot represented by 2 crossings
"""
Link.__init__(self, data)
if check:
if self.number_of_components() != 1:
raise ValueError("the input has more than 1 connected component")
def __repr__(self):
"""
Return a string representation.
EXAMPLES::
sage: B = BraidGroup(8)
sage: K = Knot(B([1, 2, 1, 2]))
sage: K
Knot represented by 4 crossings
sage: K = Knot([[1, 7, 2, 6], [7, 3, 8, 2], [3, 11, 4, 10], [11, 5, 12, 4], [14, 5, 1, 6], [13, 9, 14, 8], [12, 9, 13, 10]])
sage: K
Knot represented by 7 crossings
"""
pd_len = len(self.pd_code())
return 'Knot represented by {} crossings'.format(pd_len)
def dt_code(self):
"""
Return the DT code of ``self``.
ALGORITHM:
The DT code is generated by the following way:
Start moving along the knot, as we encounter the crossings we
start numbering them, so every crossing has two numbers assigned to
it once we have traced the entire knot. Now we take the even number
associated with every crossing.
The following sign convention is to be followed:
Take the even number with a negative sign if it is an overcrossing
that we are encountering.
OUTPUT: DT code representation of the knot
EXAMPLES::
sage: K = Knot([[1,5,2,4],[5,3,6,2],[3,1,4,6]])
sage: K.dt_code()
[4, 6, 2]
sage: B = BraidGroup(4)
sage: K = Knot(B([1, 2, 1, 2]))
sage: K.dt_code()
[4, -6, 8, -2]
sage: K = Knot([[[1, -2, 3, -4, 5, -1, 2, -3, 4, -5]], [1, 1, 1, 1, 1]])
sage: K.dt_code()
[6, 8, 10, 2, 4]
"""
b = self.braid().Tietze()
N = len(b)
label = [0 for i in range(2 * N)]
string = 1
next_label = 1
type1 = 0
crossing = 0
while next_label <= 2 * N:
string_found = False
for i in range(crossing, N):
if abs(b[i]) == string or abs(b[i]) == string - 1:
string_found = True
crossing = i
break
if not string_found:
for i in range(0, crossing):
if abs(b[i]) == string or abs(b[i]) == string - 1:
string_found = True
crossing = i
break
assert label[2 * crossing + next_label % 2] != 1, "invalid knot"
label[2 * crossing + next_label % 2] = next_label
next_label = next_label + 1
if type1 == 0:
if b[crossing] < 0:
type1 = 1
else:
type1 = -1
else:
type1 = -1 * type1
if ((abs(b[crossing]) == string and b[crossing] * type1 > 0)
or (abs(b[crossing]) != string and b[crossing] * type1 < 0)):
if next_label % 2 == 1:
label[2 * crossing] = label[2 * crossing] * -1
if abs(b[crossing]) == string:
string = string + 1
else:
string = string - 1
crossing = crossing + 1
code = [0 for i in range(N)]
for i in range(N):
for j in range(N):
if label[2 * j + 1] == 2 * i + 1:
code[i] = label[2 * j]
break
return code
def arf_invariant(self):
"""
Return the Arf invariant.
EXAMPLES::
sage: B = BraidGroup(4)
sage: K = Knot(B([-1, 2, 1, 2]))
sage: K.arf_invariant()
0
sage: B = BraidGroup(8)
sage: K = Knot(B([-2, 3, 1, 2, 1, 4]))
sage: K.arf_invariant()
0
sage: K = Knot(B([1, 2, 1, 2]))
sage: K.arf_invariant()
1
"""
a = self.alexander_polynomial()
if Mod(a(-1), 8) == 1 or Mod(a(-1), 8) == 7:
return 0
return 1
def connected_sum(self, other):
r"""
Return the oriented connected sum of ``self`` and ``other``.
.. NOTE::
We give the knots an orientation based upon the braid
representation.
INPUT:
- ``other`` -- a knot
OUTPUT:
A knot equivalent to the connected sum of ``self`` and ``other``.
EXAMPLES::
sage: B = BraidGroup(2)
sage: trefoil = Knot(B([1,1,1]))
sage: K = trefoil.connected_sum(trefoil); K
Knot represented by 6 crossings
sage: K.braid()
s0^3*s1*s0^3*s1^-1
.. PLOT::
:width: 300 px
B = BraidGroup(2)
trefoil = Knot(B([1,1,1]))
K = trefoil.connected_sum(trefoil)
sphinx_plot(K.plot())
::
sage: rev_trefoil = Knot(B([-1,-1,-1]))
sage: K = trefoil.connected_sum(rev_trefoil); K
Knot represented by 6 crossings
sage: K.braid()
s0^3*s1*s0^-3*s1^-1
.. PLOT::
:width: 300 px
B = BraidGroup(2)
t = Knot(B([1,1,1]))
tr = Knot(B([-1,-1,-1]))
K = t.connected_sum(tr)
sphinx_plot(K.plot())
REFERENCES:
- :wikipedia:`Connected_sum`
"""
from copy import deepcopy
from sage.functions.generalized import sign
ogc1 = deepcopy(self.oriented_gauss_code())
ogc2 = deepcopy(other.oriented_gauss_code())
# how much we have to "displace" the numbering of the crossings of other
m1 = max([abs(i) for i in ogc1[0][0]])
m2 = min([abs(i) for i in ogc2[0][0]])
n = m1 - m2 + 1
# construct the oriented gauss code of the result
ogc2[0][0] = [a+int(sign(a))*n for a in ogc2[0][0]]
nogc = [[ogc1[0][0]+ogc2[0][0]],ogc1[1]+ogc2[1]]
return Knot(nogc)
| 30.063758 | 136 | 0.480076 |
f72a14f414a54cde57d44cf0105f7d6ea3a142f8 | 3,674 | py | Python | matplotlib2tikz/patch.py | jameshensman/matplotlib2tikz | 53cd52529c13b08221f962f1a338d33c055132ee | [
"MIT"
] | 1 | 2021-05-25T20:47:41.000Z | 2021-05-25T20:47:41.000Z | matplotlib2tikz/patch.py | jameshensman/matplotlib2tikz | 53cd52529c13b08221f962f1a338d33c055132ee | [
"MIT"
] | null | null | null | matplotlib2tikz/patch.py | jameshensman/matplotlib2tikz | 53cd52529c13b08221f962f1a338d33c055132ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
import matplotlib as mpl
from . import path as mypath
def draw_patch(data, obj):
"""Return the PGFPlots code for patches.
"""
# Gather the draw options.
data, draw_options = mypath.get_draw_options(
data, obj.get_edgecolor(), obj.get_facecolor()
)
if isinstance(obj, mpl.patches.Rectangle):
# rectangle specialization
return _draw_rectangle(data, obj, draw_options)
elif isinstance(obj, mpl.patches.Ellipse):
# ellipse specialization
return _draw_ellipse(data, obj, draw_options)
# regular patch
return mypath.draw_path(data, obj.get_path(), draw_options=draw_options)
def draw_patchcollection(data, obj):
"""Returns PGFPlots code for a number of patch objects.
"""
content = []
# Gather the draw options.
try:
edge_color = obj.get_edgecolor()[0]
except IndexError:
edge_color = None
try:
face_color = obj.get_facecolor()[0]
except IndexError:
face_color = None
data, draw_options = mypath.get_draw_options(data, edge_color, face_color)
for path in obj.get_paths():
data, cont = mypath.draw_path(data, path, draw_options=draw_options)
content.append(cont)
return data, content
def _draw_rectangle(data, obj, draw_options):
"""Return the PGFPlots code for rectangles.
"""
# Objects with labels are plot objects (from bar charts, etc).
# Even those without labels explicitly set have a label of
# "_nolegend_". Everything else should be skipped because
# they likely correspong to axis/legend objects which are
# handled by PGFPlots
label = obj.get_label()
if label == "":
return data, []
# get real label, bar charts by default only give rectangles
# labels of "_nolegend_"
# See
# <http://stackoverflow.com/questions/35881290/how-to-get-the-label-on-bar-plot-stacked-bar-plot-in-matplotlib>
handles, labels = obj.axes.get_legend_handles_labels()
labelsFound = [
label for h, label in zip(handles, labels) if obj in h.get_children()
]
if len(labelsFound) == 1:
label = labelsFound[0]
legend = ""
if label != "_nolegend_" and label not in data["rectangle_legends"]:
data["rectangle_legends"].add(label)
legend = ("\\addlegendimage{ybar,ybar legend,%s};\n") % (",".join(draw_options))
left_lower_x = obj.get_x()
left_lower_y = obj.get_y()
cont = (
"%s\\draw[%s] (axis cs:%.15g,%.15g) " "rectangle (axis cs:%.15g,%.15g);\n"
) % (
legend,
",".join(draw_options),
left_lower_x,
left_lower_y,
left_lower_x + obj.get_width(),
left_lower_y + obj.get_height(),
)
return data, cont
def _draw_ellipse(data, obj, draw_options):
"""Return the PGFPlots code for ellipses.
"""
if isinstance(obj, mpl.patches.Circle):
# circle specialization
return _draw_circle(data, obj, draw_options)
x, y = obj.center
cont = (
"\\draw[%s, rotate around={%.15g:(%.15g,%.15g)}] (axis cs:%.15g,%.15g) ellipse (%.15g and %.15g);\n"
% (
",".join(draw_options),
obj.angle,
x,
y,
x,
y,
0.5 * obj.width,
0.5 * obj.height,
)
)
return data, cont
def _draw_circle(data, obj, draw_options):
"""Return the PGFPlots code for circles.
"""
x, y = obj.center
cont = "\\draw[%s] (axis cs:%.15g,%.15g) circle (%.15g);\n" % (
",".join(draw_options),
x,
y,
obj.get_radius(),
)
return data, cont
| 28.929134 | 115 | 0.60724 |
f72a1694183b61ee3827e0b4a3909cca8c657eca | 32,592 | py | Python | lib/googlecloudsdk/core/console/console_io.py | ianel20/google-cloud-sdk | 36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/core/console/console_io.py | ianel20/google-cloud-sdk | 36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/core/console/console_io.py | ianel20/google-cloud-sdk | 36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:23:41.000Z | 2020-07-25T12:23:41.000Z | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General console printing utilities used by the Cloud SDK."""
import logging
import os
import re
import sys
import textwrap
import threading
import time
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_pager
from googlecloudsdk.core.util import files
from googlecloudsdk.third_party.py27 import py27_subprocess as subprocess
FLOAT_COMPARE_EPSILON = 1e-6
class Error(exceptions.Error):
"""Base exception for the module."""
pass
class UnattendedPromptError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(UnattendedPromptError, self).__init__(
'This prompt could not be answered because you are not in an '
'interactive session. You can re-run the command with the --quiet '
'flag to accept default answers for all prompts.')
class OperationCancelledError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(OperationCancelledError, self).__init__('Operation cancelled.')
class TablePrinter(object):
"""Provides the ability to print a list of items as a formatted table.
Using this class helps you adhere to the gcloud style guide.
The table will auto size the columns to fit the maximum item length for that
column. You can also choose how to justify each column and to add extra
padding to each column.
"""
JUSTIFY_LEFT = '<'
JUSTIFY_RIGHT = '>'
JUSTIFY_CENTER = '^'
def __init__(self, headers, title=None,
justification=None, column_padding=None):
"""Creates a new TablePrinter.
Args:
headers: A tuple of strings that represent the column headers titles.
This can be a tuple of empty strings or None's if you do not want
headers displayed. The number of empty elements in the tuple must match
the number of columns you want to display.
title: str, An optional title for the table.
justification: A tuple of JUSTIFY_LEFT, JUSTIFY_RIGHT, JUSTIFY_CENTER that
describes the justification for each column. This must have the same
number of items as the headers tuple.
column_padding: A tuple of ints that describes the extra padding that
should be added to each column. This must have the same
number of items as the headers tuple.
Raises:
ValueError: If the justification or column_padding tuples are not of the
correct type or length.
"""
self.__headers = [h if h else '' for h in headers]
self.__title = title
self.__num_columns = len(self.__headers)
self.__header_widths = [len(str(x)) for x in self.__headers]
self.__column_padding = column_padding
if self.__column_padding is None:
self.__column_padding = tuple([0] * self.__num_columns)
if (not isinstance(self.__column_padding, (tuple)) or
len(self.__column_padding) != self.__num_columns):
raise ValueError('Column padding tuple does not have {0} columns'
.format(self.__num_columns))
self.__justification = justification
if self.__justification is None:
self.__justification = tuple([TablePrinter.JUSTIFY_LEFT] *
self.__num_columns)
if (not isinstance(self.__justification, tuple) or
len(self.__justification) != self.__num_columns):
raise ValueError('Justification tuple does not have {0} columns'
.format(self.__num_columns))
for value in self.__justification:
if not (value is TablePrinter.JUSTIFY_LEFT or
value is TablePrinter.JUSTIFY_RIGHT or
value is TablePrinter.JUSTIFY_CENTER):
raise ValueError('Justification values must be one of JUSTIFY_LEFT, '
'JUSTIFY_RIGHT, or JUSTIFY_CENTER')
def SetTitle(self, title):
"""Sets the title of the table.
Args:
title: str, The new title.
"""
self.__title = title
def Log(self, rows, logger=None, level=logging.INFO):
"""Logs the given rows to the given logger.
Args:
rows: list of tuples, The rows to log the formatted table for.
logger: logging.Logger, The logger to do the logging. If None, the root
logger will be used.
level: logging level, An optional override for the logging level, INFO by
default.
"""
if not logger:
logger = log.getLogger()
lines = self.GetLines(rows)
for line in lines:
logger.log(level, line)
def Print(self, rows, output_stream=None, indent=0):
"""Prints the given rows to stdout.
Args:
rows: list of tuples, The rows to print the formatted table for.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
indent: int, The number of spaces to indent all lines of the table.
"""
if not output_stream:
output_stream = log.out
lines = self.GetLines(rows, indent=indent)
for line in lines:
output_stream.write(line + '\n')
def GetLines(self, rows, indent=0):
"""Gets a list of strings of formatted lines for the given rows.
Args:
rows: list of tuples, The rows to get the formatted table for.
indent: int, The number of spaces to indent all lines of the table.
Returns:
list of str, The lines of the formatted table that can be printed.
Raises:
ValueError: If any row does not have the correct number of columns.
"""
column_widths = list(self.__header_widths)
for row in rows:
if len(row) != self.__num_columns:
raise ValueError('Row [{row}] does not have {rows} columns'
.format(row=row, rows=self.__num_columns))
# Find the max width of each column
for i in range(self.__num_columns):
column_widths[i] = max(column_widths[i], len(str(row[i])))
# Add padding
column_widths = [column_widths[i] + self.__column_padding[i]
for i in range(self.__num_columns)]
total_width = (len(column_widths) - 1) * 3
for width in column_widths:
total_width += width
edge_line = ('--' +
'---'.join(['-' * width for width in column_widths]) +
'--')
title_divider_line = ('|-' +
'---'.join(['-' * width for width in column_widths]) +
'-|')
divider_line = ('|-' +
'-+-'.join(['-' * width for width in column_widths]) +
'-|')
lines = [edge_line]
if self.__title:
title_line = '| {{title:{justify}{width}s}} |'.format(
justify=TablePrinter.JUSTIFY_CENTER, width=total_width).format(
title=self.__title)
lines.append(title_line)
lines.append(title_divider_line)
# Generate format strings with the correct width for each column
column_formats = []
for i in range(self.__num_columns):
column_formats.append('{{i{i}:{justify}{width}s}}'.format(
i=i, justify=self.__justification[i], width=column_widths[i]))
pattern = '| ' + ' | '.join(column_formats) + ' |'
def _ParameterizedArrayDict(array):
return dict(('i{i}'.format(i=i), array[i]) for i in range(len(array)))
if [h for h in self.__headers if h]:
# Only print headers if there is at least one non-empty header
lines.append(pattern.format(**_ParameterizedArrayDict(self.__headers)))
lines.append(divider_line)
lines.extend([pattern.format(**_ParameterizedArrayDict(row))
for row in rows])
lines.append(edge_line)
if indent:
return [(' ' * indent) + l for l in lines]
return lines
class ListPrinter(object):
"""Provides the ability to print a list of items as a formatted list.
Using this class helps you adhere to the gcloud style guide.
"""
def __init__(self, title):
"""Create a titled list printer that can print rows to stdout.
Args:
title: A string for the title of the list.
"""
self.__title = title
def Print(self, rows, output_stream=None):
"""Print this list with the provided rows to stdout.
Args:
rows: A list of objects representing the rows of this list. Before being
printed, they will be converted to strings.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
"""
if not output_stream:
output_stream = log.out
output_stream.write(self.__title + '\n')
for row in rows:
output_stream.write(' - ' + str(row) + '\n')
TEXTWRAP = textwrap.TextWrapper(replace_whitespace=False,
drop_whitespace=False,
break_on_hyphens=False)
def _DoWrap(message):
"""Text wrap the given message and correctly handle newlines in the middle.
Args:
message: str, The message to wrap. It may have newlines in the middle of
it.
Returns:
str, The wrapped message.
"""
return '\n'.join([TEXTWRAP.fill(line) for line in message.splitlines()])
def _RawInput(prompt=None):
"""A simple redirect to the built-in raw_input function.
If the prompt is given, it is correctly line wrapped.
Args:
prompt: str, An optional prompt.
Returns:
The input from stdin.
"""
if prompt:
sys.stderr.write(_DoWrap(prompt))
try:
return raw_input()
except EOFError:
return None
def IsInteractive(output=False, error=False, heuristic=False):
"""Determines if the current terminal session is interactive.
sys.stdin must be a terminal input stream.
Args:
output: If True then sys.stdout must also be a terminal output stream.
error: If True then sys.stderr must also be a terminal output stream.
heuristic: If True then we also do some additional heuristics to check if
we are in an interactive context. Checking home path for example.
Returns:
True if the current terminal session is interactive.
"""
if not sys.stdin.isatty():
return False
if output and not sys.stdout.isatty():
return False
if error and not sys.stderr.isatty():
return False
if heuristic:
# Check the home path. Most startup scripts for example are executed by
# users that don't have a home path set. Home is OS dependent though, so
# check everything.
# *NIX OS usually sets the HOME env variable. It is usually '/home/user',
# but can also be '/root'. If it's just '/' we are most likely in an init
# script.
# Windows usually sets HOMEDRIVE and HOMEPATH. If they don't exist we are
# probably being run from a task scheduler context. HOMEPATH can be '\'
# when a user has a network mapped home directory.
# Cygwin has it all! Both Windows and Linux. Checking both is perfect.
home = os.getenv('HOME')
homepath = os.getenv('HOMEPATH')
if not homepath and (not home or home == '/'):
return False
return True
def CanPrompt():
"""Returns true if we can prompt the user for information.
This combines all checks (IsInteractive(), disable_prompts is False) to
verify that we can prompt the user for information.
Returns:
bool, True if we can prompt the user for information.
"""
return (IsInteractive(error=True) and
not properties.VALUES.core.disable_prompts.GetBool())
def PromptContinue(message=None, prompt_string=None, default=True,
throw_if_unattended=False, cancel_on_no=False):
"""Prompts the user a yes or no question and asks if they want to continue.
Args:
message: str, The prompt to print before the question.
prompt_string: str, An alternate yes/no prompt to display. If None, it
defaults to 'Do you want to continue'.
default: bool, What the default answer should be. True for yes, False for
no.
throw_if_unattended: bool, If True, this will throw if there was nothing
to consume on stdin and stdin is not a tty.
cancel_on_no: bool, If True and the user answers no, throw an exception to
cancel the entire operation. Useful if you know you don't want to
continue doing anything and don't want to have to raise your own
exception.
Raises:
UnattendedPromptError: If there is no input to consume and this is not
running in an interactive terminal.
OperationCancelledError: If the user answers no and cancel_on_no is True.
Returns:
bool, False if the user said no, True if the user said anything else or if
prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
if not default and cancel_on_no:
raise OperationCancelledError()
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n\n')
if not prompt_string:
prompt_string = 'Do you want to continue'
if default:
prompt_string += ' (Y/n)? '
else:
prompt_string += ' (y/N)? '
sys.stderr.write(_DoWrap(prompt_string))
def GetAnswer():
while True:
answer = _RawInput()
# pylint:disable=g-explicit-bool-comparison, We explicitly want to
# distinguish between empty string and None.
if answer == '':
# User just hit enter, return default.
sys.stderr.write('\n')
return default
elif answer is None:
# This means we hit EOF, no input or user closed the stream.
if throw_if_unattended and not IsInteractive():
sys.stderr.write('\n')
raise UnattendedPromptError()
else:
sys.stderr.write('\n')
return default
elif answer.lower() in ['y', 'yes']:
sys.stderr.write('\n')
return True
elif answer.lower() in ['n', 'no']:
sys.stderr.write('\n')
return False
else:
sys.stderr.write("Please enter 'y' or 'n': ")
answer = GetAnswer()
if not answer and cancel_on_no:
raise OperationCancelledError()
return answer
def PromptResponse(message):
"""Prompts the user for a string.
Args:
message: str, The prompt to print before the question.
Returns:
str, The string entered by the user, or None if prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return None
response = _RawInput(message)
return response
def PromptWithDefault(message, default=None):
"""Prompts the user for a string, allowing a default.
Unlike PromptResponse, this also appends a ': ' to the prompt. If 'default'
is specified, the default is also written written into the prompt (e.g.
if message is "message" and default is "default", the prompt would be
"message (default): ").
The default is returned if the user simply presses enter (no input) or an
EOF is received.
Args:
message: str, The prompt to print before the question.
default: str, The default value (if any).
Returns:
str, The string entered by the user, or the default if no value was
entered or prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return default
if default:
message += ' ({default}): '.format(default=default)
else:
message += ': '
response = _RawInput(message)
if not response:
response = default
return response
def PromptChoice(options, default=None, message=None, prompt_string=None):
"""Prompt the user to select a choice from a list of items.
Args:
options: [object], A list of objects to print as choices. Their str()
method will be used to display them.
default: int, The default index to return if prompting is disabled or if
they do not enter a choice.
message: str, An optional message to print before the choices are displayed.
prompt_string: str, A string to print when prompting the user to enter a
choice. If not given, a default prompt is used.
Raises:
ValueError: If no options are given or if the default is not in the range of
available options.
Returns:
The index of the item in the list that was chosen, or the default if prompts
are disabled.
"""
if not options:
raise ValueError('You must provide at least one option.')
maximum = len(options)
if default is not None and not 0 <= default < maximum:
raise ValueError(
'Default option [{default}] is not a valid index for the options list '
'[{maximum} options given]'.format(default=default, maximum=maximum))
if properties.VALUES.core.disable_prompts.GetBool():
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n')
for i, option in enumerate(options):
sys.stderr.write(' [{index}] {option}\n'.format(
index=i + 1, option=str(option)))
if not prompt_string:
prompt_string = 'Please enter your numeric choice'
if default is None:
suffix_string = ': '
else:
suffix_string = ' ({default}): '.format(default=default + 1)
sys.stderr.write(_DoWrap(prompt_string + suffix_string))
while True:
answer = _RawInput()
if answer is None or (answer is '' and default is not None):
# Return default if we failed to read from stdin
# Return default if the user hit enter and there is a valid default
# Prompt again otherwise
sys.stderr.write('\n')
return default
try:
num_choice = int(answer)
if num_choice < 1 or num_choice > maximum:
raise ValueError('Choice must be between 1 and {maximum}'.format(
maximum=maximum))
sys.stderr.write('\n')
return num_choice - 1
except ValueError:
sys.stderr.write('Please enter a value between 1 and {maximum}: '
.format(maximum=maximum))
def LazyFormat(s, **kwargs):
"""Converts {key} => value for key, value in kwargs.iteritems().
After the {key} converstions it converts {{<identifier>}} => {<identifier>}.
Args:
s: str, The string to format.
**kwargs: {str:str}, A dict of strings for named parameters.
Returns:
str, The lazily-formatted string.
"""
for key, value in kwargs.iteritems():
fmt = '{' + key + '}'
start = 0
while True:
start = s.find(fmt, start)
if start == -1:
break
if (start and s[start - 1] == '{' and
len(fmt) < len(s[start:]) and s[start + len(fmt)] == '}'):
# {{key}} => {key}
s = s[0:start - 1] + fmt + s[start + len(fmt) + 1:]
start += len(fmt)
else:
# {key} => value
s = s[0:start] + value + s[start + len(fmt):]
start += len(value)
# {{unknown}} => {unknown}
return re.sub(r'{({\w+})}', r'\1', s)
def PrintExtendedList(items, col_fetchers):
"""Print a properly formated extended list for some set of resources.
If items is a generator, this function may elect to only request those rows
that it is ready to display.
Args:
items: [resource] or a generator producing resources, The objects
representing cloud resources.
col_fetchers: [(string, func(resource))], A list of tuples, one for each
column, in the order that they should appear. The string is the title
of that column which will be printed in a header. The func is a function
that will fetch a row-value for that column, given the resource
corresponding to the row.
"""
total_items = 0
rows = [[title for (title, unused_func) in col_fetchers]]
for item in items:
total_items += 1
row = []
for (unused_title, func) in col_fetchers:
value = func(item)
if value is None:
row.append('-')
else:
row.append(value)
rows.append(row)
attr = console_attr.GetConsoleAttr()
max_col_widths = [0] * len(col_fetchers)
for row in rows:
for col in range(len(row)):
max_col_widths[col] = max(max_col_widths[col],
attr.DisplayWidth(unicode(row[col]))+2)
for row in rows:
for col in range(len(row)):
width = max_col_widths[col]
item = unicode(row[col])
item_width = attr.DisplayWidth(item)
if item_width < width and col != len(row) - 1:
item += u' ' * (width - item_width)
log.out.write(item)
log.out.write('\n')
if not total_items:
log.status.write('Listed 0 items.\n')
class ProgressTracker(object):
"""A context manager for telling the user about long-running progress."""
SPIN_MARKS = [
'|',
'/',
'-',
'\\',
]
def __init__(self, message, autotick=True, detail_message_callback=None,
tick_delay=1):
self._message = message
self._prefix = message + '...'
self._ticks = 0
self._autotick = autotick
self._done = False
self._lock = threading.Lock()
self._detail_message_callback = detail_message_callback
self._last_message_size = 0
self._tick_delay = tick_delay
self._is_tty = IsInteractive(output=True, error=True)
def _GetPrefix(self):
if self._detail_message_callback:
detail_message = self._detail_message_callback()
if detail_message:
return self._prefix + ' ' + detail_message + '...'
return self._prefix
def __enter__(self):
log.file_only_logger.info(self._GetPrefix())
self._Print()
if self._autotick:
def Ticker():
while True:
time.sleep(self._tick_delay)
if self.Tick():
return
threading.Thread(target=Ticker).start()
return self
def Tick(self):
"""Give a visual indication to the user that some progress has been made.
Output is sent to sys.stderr. Nothing is shown if output is not a TTY.
Returns:
Whether progress has completed.
"""
if self._is_tty:
with self._lock:
if not self._done:
self._ticks += 1
self._Print(ProgressTracker.SPIN_MARKS[
self._ticks % len(ProgressTracker.SPIN_MARKS)])
return self._done
def _Print(self, message=''):
"""Reprints the prefix followed by an optional message."""
display_message = self._GetPrefix()
if message:
display_message += message
# This is to clear the display buffer, otherwise it would display the
# trailing parts of the previous line
if self._last_message_size > 0:
sys.stderr.write('\r' + self._last_message_size * ' ')
self._last_message_size = len(display_message)
sys.stderr.write('\r' + display_message)
sys.stderr.flush()
def __exit__(self, ex_type, unused_value, unused_traceback):
with self._lock:
self._done = True
# If an exception was raised during progress tracking, exit silently here
# and let the appropriate exception handler tell the user what happened.
if ex_type:
# This is to prevent the tick character from appearing before 'failed.'
# (ex. 'message...failed' instead of 'message.../failed.')
self._Print('failed.\n')
return False
self._Print('done.\n')
class DelayedProgressTracker(ProgressTracker):
"""A progress tracker that only appears during a long running operation.
Waits for the given timeout, then displays a progress tacker.
"""
class TrackerState(object):
"""Enum representing the current state of the progress tracker."""
class _TrackerStateTuple(object):
def __init__(self, name):
self.name = name
WAITING = _TrackerStateTuple('Waiting')
STARTED = _TrackerStateTuple('Started')
FINISHED = _TrackerStateTuple('Finished')
def __init__(self, message, timeout, autotick=True,
detail_message_callback=None):
super(DelayedProgressTracker, self).__init__(
message, autotick=autotick,
detail_message_callback=detail_message_callback)
self._timeout = timeout
self._state = self.TrackerState.WAITING
self._state_lock = threading.Lock()
def _SleepWhileNotFinished(self, timeout, increment=0.1):
"""Sleep for the given time unless the tracker enters the FINISHED state.
Args:
timeout: number, the total time for which to sleep
increment: number, the increment at which to check whether the tracker is
FINISHED
Returns:
bool, True unless the tracker reached the FINISHED state before the total
sleep time elapsed
"""
elapsed_time = 0
while (elapsed_time + FLOAT_COMPARE_EPSILON) <= timeout:
time.sleep(increment)
elapsed_time += increment
if self._state is self.TrackerState.FINISHED:
return False
return True
def __enter__(self):
def StartTracker():
if not self._SleepWhileNotFinished(self._timeout):
# If we aborted sleep early, return. We exited the progress tracker
# before the delay finished.
return
with self._state_lock:
if self._state is not self.TrackerState.FINISHED:
self._state = self.TrackerState.STARTED
super(DelayedProgressTracker, self).__enter__()
threading.Thread(target=StartTracker).start()
return self
def __exit__(self, exc_type, exc_value, traceback):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
super(DelayedProgressTracker, self).__exit__(exc_type, exc_value,
traceback)
self._state = self.TrackerState.FINISHED
def Tick(self):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
return super(DelayedProgressTracker, self).Tick()
return self._state is self.TrackerState.FINISHED
class ProgressBar(object):
"""A simple progress bar for tracking completion of an action.
This progress bar works without having to use any control characters. It
prints the action that is being done, and then fills a progress bar below it.
You should not print anything else on the output stream during this time as it
will cause the progress bar to break on lines.
Progress bars can be stacked into a group. first=True marks the first bar in
the group and last=True marks the last bar in the group. The default assumes
a singleton bar with first=True and last=True.
This class can also be used in a context manager.
"""
@staticmethod
def _DefaultCallback(progress_factor):
pass
DEFAULT_CALLBACK = _DefaultCallback
@staticmethod
def SplitProgressBar(original_callback, weights):
"""Splits a progress bar into logical sections.
Wraps the original callback so that each of the subsections can use the full
range of 0 to 1 to indicate its progress. The overall progress bar will
display total progress based on the weights of the tasks.
Args:
original_callback: f(float), The original callback for the progress bar.
weights: [float], The weights of the tasks to create. These can be any
numbers you want and the split will be based on their proportions to
each other.
Raises:
ValueError: If the weights don't add up to 1.
Returns:
(f(float), ), A tuple of callback functions, in order, for the subtasks.
"""
if (original_callback is None or
original_callback == ProgressBar.DEFAULT_CALLBACK):
return tuple([ProgressBar.DEFAULT_CALLBACK for _ in range(len(weights))])
def MakeCallback(already_done, weight):
def Callback(done_fraction):
original_callback(already_done + (done_fraction * weight))
return Callback
total = float(sum(weights))
callbacks = []
already_done = 0
for weight in weights:
normalized_weight = weight / total
callbacks.append(MakeCallback(already_done, normalized_weight))
already_done += normalized_weight
return tuple(callbacks)
def __init__(self, label, stream=log.status, total_ticks=60, first=True,
last=True):
"""Creates a progress bar for the given action.
Args:
label: str, The action that is being performed.
stream: The output stream to write to, stderr by default.
total_ticks: int, The number of ticks wide to make the progress bar.
first: bool, True if this is the first bar in a stacked group.
last: bool, True if this is the last bar in a stacked group.
"""
self._stream = stream
self._ticks_written = 0
self._total_ticks = total_ticks
self._first = first
self._last = last
attr = console_attr.ConsoleAttr()
self._box = attr.GetBoxLineCharacters()
self._redraw = (self._box.d_dr != self._box.d_vr or
self._box.d_dl != self._box.d_vl)
max_label_width = self._total_ticks - 4
if len(label) > max_label_width:
label = label[:max_label_width - 3] + '...'
elif len(label) < max_label_width:
diff = max_label_width - len(label)
label += ' ' * diff
left = self._box.d_vr + self._box.d_h
right = self._box.d_h + self._box.d_vl
self._label = u'{left} {label} {right}'.format(left=left, label=label,
right=right)
def Start(self):
"""Starts the progress bar by writing the top rule and label."""
if self._first or self._redraw:
left = self._box.d_dr if self._first else self._box.d_vr
right = self._box.d_dl if self._first else self._box.d_vl
rule = u'{left}{middle}{right}\n'.format(
left=left, middle=self._box.d_h * self._total_ticks, right=right)
self._stream.write(rule)
self._stream.write(self._label + '\n')
self._stream.write(self._box.d_ur)
self._ticks_written = 0
def SetProgress(self, progress_factor):
"""Sets the current progress of the task.
This method has no effect if the progress bar has already progressed past
the progress you call it with (since the progress bar cannot back up).
Args:
progress_factor: float, The current progress as a float between 0 and 1.
"""
expected_ticks = int(self._total_ticks * progress_factor)
new_ticks = expected_ticks - self._ticks_written
# Don't allow us to go over 100%.
new_ticks = min(new_ticks, self._total_ticks - self._ticks_written)
if new_ticks > 0:
self._stream.write(self._box.d_h * new_ticks)
self._ticks_written += new_ticks
if expected_ticks == self._total_ticks:
end = '\n' if self._last or not self._redraw else '\r'
self._stream.write(self._box.d_ul + end)
self._stream.flush()
def Finish(self):
"""Mark the progress as done."""
self.SetProgress(1)
def __enter__(self):
self.Start()
return self
def __exit__(self, *args):
self.Finish()
def More(contents, out=None, prompt=None, check_pager=True):
"""Run a user specified pager or fall back to the internal pager.
Args:
contents: The entire contents of the text lines to page.
out: The output stream, log.out (effectively) if None.
prompt: The page break prompt.
check_pager: Checks the PAGER env var and uses it if True.
"""
if not IsInteractive(output=True):
if not out:
out = log.out
out.write(contents)
return
if not out:
# Rendered help to the log file.
log.file_only_logger.info(contents)
# Paging shenanigans to stdout.
out = sys.stdout
if check_pager:
pager = os.environ.get('PAGER', None)
if pager == '-':
# Use the fallback Pager.
pager = None
elif not pager:
# Search for a pager that handles ANSI escapes.
for command in ('less', 'pager'):
if files.FindExecutableOnPath(command):
pager = command
break
if pager:
less = os.environ.get('LESS', None)
if less is None:
os.environ['LESS'] = '-R'
p = subprocess.Popen(pager, stdin=subprocess.PIPE, shell=True)
encoding = console_attr.GetConsoleAttr().GetEncoding()
p.communicate(input=contents.encode(encoding))
p.wait()
if less is None:
os.environ.pop('LESS')
return
# Fall back to the internal pager.
console_pager.Pager(contents, out, prompt).Run()
| 34.020877 | 80 | 0.665777 |