code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
# encoding: utf-8
import datetime
import re
import requests
from ckan.common import config
from ckan.common import asbool
from six import text_type, string_types
from ckan.common import _, json
import ckan.lib.maintain as maintain
log = __import__('logging').getLogger(__name__)
class License(object):
"""Domain object for a license."""
def __init__(self, data):
# convert old keys if necessary
if 'is_okd_compliant' in data:
data['od_conformance'] = 'approved' \
if asbool(data['is_okd_compliant']) else ''
del data['is_okd_compliant']
if 'is_osi_compliant' in data:
data['osd_conformance'] = 'approved' \
if asbool(data['is_osi_compliant']) else ''
del data['is_osi_compliant']
self._data = data
for (key, value) in self._data.items():
if key == 'date_created':
# Parse ISO formatted datetime.
value = datetime.datetime(*map(int, re.split('[^\d]', value)))
self._data[key] = value
elif isinstance(value, str):
# Convert str to unicode (keeps Pylons and SQLAlchemy happy).
value = value.decode('utf8')
self._data[key] = value
def __getattr__(self, name):
if name == 'is_okd_compliant':
log.warn('license.is_okd_compliant is deprecated - use '
'od_conformance instead.')
return self._data['od_conformance'] == 'approved'
if name == 'is_osi_compliant':
log.warn('license.is_osi_compliant is deprecated - use '
'osd_conformance instead.')
return self._data['osd_conformance'] == 'approved'
return self._data[name]
@maintain.deprecated("License.__getitem__() is deprecated and will be "
"removed in a future version of CKAN. Instead, "
"please use attribute access.")
def __getitem__(self, key):
'''NB This method is deprecated and will be removed in a future version
of CKAN. Instead, please use attribute access.
'''
return self.__getattr__(key)
def isopen(self):
if not hasattr(self, '_isopen'):
self._isopen = self.od_conformance == 'approved' or \
self.osd_conformance == 'approved'
return self._isopen
@maintain.deprecated("License.as_dict() is deprecated and will be "
"removed in a future version of CKAN. Instead, "
"please use attribute access.")
def as_dict(self):
'''NB This method is deprecated and will be removed in a future version
of CKAN. Instead, please use attribute access.
'''
data = self._data.copy()
if 'date_created' in data:
value = data['date_created']
value = value.isoformat()
data['date_created'] = value
# deprecated keys
if 'od_conformance' in data:
data['is_okd_compliant'] = data['od_conformance'] == 'approved'
if 'osd_conformance' in data:
data['is_osi_compliant'] = data['osd_conformance'] == 'approved'
return data
class LicenseRegister(object):
"""Dictionary-like interface to a group of licenses."""
def __init__(self):
group_url = config.get('licenses_group_url', None)
if group_url:
self.load_licenses(group_url)
else:
default_license_list = [
LicenseNotSpecified(),
LicenseOpenDataCommonsPDDL(),
LicenseOpenDataCommonsOpenDatabase(),
LicenseOpenDataAttribution(),
LicenseCreativeCommonsZero(),
LicenseCreativeCommonsAttribution(),
LicenseCreativeCommonsAttributionShareAlike(),
LicenseGNUFreeDocument(),
LicenseOtherOpen(),
LicenseOtherPublicDomain(),
LicenseOtherAttribution(),
LicenseOpenGovernment(),
LicenseCreativeCommonsNonCommercial(),
LicenseOtherNonCommercial(),
LicenseOtherClosed(),
]
self._create_license_list(default_license_list)
def load_licenses(self, license_url):
try:
if license_url.startswith('file://'):
with open(license_url.replace('file://', ''), 'r') as f:
license_data = json.load(f)
else:
response = requests.get(license_url)
license_data = response.json()
except requests.RequestException as e:
msg = "Couldn't get the licenses file {}: {}".format(license_url, e)
raise Exception(msg)
except ValueError as e:
msg = "Couldn't parse the licenses file {}: {}".format(license_url, e)
raise Exception(msg)
for license in license_data:
if isinstance(license, string_types):
license = license_data[license]
if license.get('title'):
license['title'] = _(license['title'])
self._create_license_list(license_data, license_url)
def _create_license_list(self, license_data, license_url=''):
if isinstance(license_data, dict):
self.licenses = [License(entity) for entity in license_data.values()]
elif isinstance(license_data, list):
self.licenses = [License(entity) for entity in license_data]
else:
msg = "Licenses at %s must be dictionary or list" % license_url
raise ValueError(msg)
def __getitem__(self, key, default=Exception):
for license in self.licenses:
if key == license.id:
return license
if default != Exception:
return default
else:
raise KeyError("License not found: %s" % key)
def get(self, key, default=None):
return self.__getitem__(key, default=default)
def keys(self):
return [license.id for license in self.licenses]
def values(self):
return self.licenses
def items(self):
return [(license.id, license) for license in self.licenses]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.licenses)
class DefaultLicense(dict):
''' The license was a dict but this did not allow translation of the
title. This is a slightly changed dict that allows us to have the title
as a property and so translated. '''
domain_content = False
domain_data = False
domain_software = False
family = ''
is_generic = False
od_conformance = 'not reviewed'
osd_conformance = 'not reviewed'
maintainer = ''
status = 'active'
url = ''
title = ''
id = ''
keys = ['domain_content',
'id',
'domain_data',
'domain_software',
'family',
'is_generic',
'od_conformance',
'osd_conformance',
'maintainer',
'status',
'url',
'title']
def __getitem__(self, key):
''' behave like a dict but get from attributes '''
if key in self.keys:
value = getattr(self, key)
if isinstance(value, str):
return text_type(value)
else:
return value
else:
raise KeyError()
def copy(self):
''' create a dict of the license used by the licenses api '''
out = {}
for key in self.keys:
out[key] = text_type(getattr(self, key))
return out
class LicenseNotSpecified(DefaultLicense):
id = "notspecified"
is_generic = True
@property
def title(self):
return _("License not specified")
class LicenseOpenDataCommonsPDDL(DefaultLicense):
domain_data = True
id = "odc-pddl"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/odc-pddl"
@property
def title(self):
return _("Open Data Commons Public Domain Dedication and License (PDDL)")
class LicenseOpenDataCommonsOpenDatabase(DefaultLicense):
domain_data = True
id = "odc-odbl"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/odc-odbl"
@property
def title(self):
return _("Open Data Commons Open Database License (ODbL)")
class LicenseOpenDataAttribution(DefaultLicense):
domain_data = True
id = "odc-by"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/odc-by"
@property
def title(self):
return _("Open Data Commons Attribution License")
class LicenseCreativeCommonsZero(DefaultLicense):
domain_content = True
domain_data = True
id = "cc-zero"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/cc-zero"
@property
def title(self):
return _("Creative Commons CCZero")
class LicenseCreativeCommonsAttribution(DefaultLicense):
id = "cc-by"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/cc-by"
@property
def title(self):
return _("Creative Commons Attribution")
class LicenseCreativeCommonsAttributionShareAlike(DefaultLicense):
domain_content = True
id = "cc-by-sa"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/cc-by-sa"
@property
def title(self):
return _("Creative Commons Attribution Share-Alike")
class LicenseGNUFreeDocument(DefaultLicense):
domain_content = True
id = "gfdl"
od_conformance = 'approved'
url = "http://www.opendefinition.org/licenses/gfdl"
@property
def title(self):
return _("GNU Free Documentation License")
class LicenseOtherOpen(DefaultLicense):
domain_content = True
id = "other-open"
is_generic = True
od_conformance = 'approved'
@property
def title(self):
return _("Other (Open)")
class LicenseOtherPublicDomain(DefaultLicense):
domain_content = True
id = "other-pd"
is_generic = True
od_conformance = 'approved'
@property
def title(self):
return _("Other (Public Domain)")
class LicenseOtherAttribution(DefaultLicense):
domain_content = True
id = "other-at"
is_generic = True
od_conformance = 'approved'
@property
def title(self):
return _("Other (Attribution)")
class LicenseOpenGovernment(DefaultLicense):
domain_content = True
id = "uk-ogl"
od_conformance = 'approved'
# CS: bad_spelling ignore
url = "http://reference.data.gov.uk/id/open-government-licence"
@property
def title(self):
# CS: bad_spelling ignore
return _("UK Open Government Licence (OGL)")
class LicenseCreativeCommonsNonCommercial(DefaultLicense):
id = "cc-nc"
url = "http://creativecommons.org/licenses/by-nc/2.0/"
@property
def title(self):
return _("Creative Commons Non-Commercial (Any)")
class LicenseOtherNonCommercial(DefaultLicense):
id = "other-nc"
is_generic = True
@property
def title(self):
return _("Other (Non-Commercial)")
class LicenseOtherClosed(DefaultLicense):
id = "other-closed"
is_generic = True
@property
def title(self):
return _("Other (Not Open)")
| [
[
[
26,
34
],
[
985,
993
]
],
[
[
42,
44
],
[
1013,
1015
]
],
[
[
53,
61
],
[
4592,
4600
],
[
4680,
4688
]
],
[
[
87,
93
],
[
3397,
3403
]
],
[
[
118,
124
],
[
527,
533
],
[
718,
724
]
],
[
[
141,
150
],
[
7433,
7442
],
[
7701,
7710
]
],
[
[
152,
164
],
[
5046,
5058
]
],
[
[
190,
191
],
[
5181,
5182
],
[
7891,
7892
],
[
8155,
8156
],
[
8467,
8468
],
[
8752,
8753
],
[
9056,
9057
],
[
9300,
9301
],
[
9591,
9592
],
[
9864,
9865
],
[
10094,
10095
],
[
10312,
10313
],
[
10538,
10539
],
[
10868,
10869
],
[
11093,
11094
],
[
11279,
11280
],
[
11447,
11448
]
],
[
[
193,
197
],
[
4534,
4538
]
],
[
[
205,
234
],
[
1794,
1802
],
[
2427,
2435
]
],
[
[
236,
239
],
[
1369,
1372
],
[
1587,
1590
]
],
[
[
292,
299
],
[
5401,
5408
],
[
5528,
5535
]
],
[
[
3267,
3282
]
],
[
[
6420,
6434
],
[
7777,
7791
],
[
7952,
7966
],
[
8264,
8278
],
[
8553,
8567
],
[
8829,
8843
],
[
9126,
9140
],
[
9385,
9399
],
[
9667,
9681
],
[
9924,
9938
],
[
10144,
10158
],
[
10370,
10384
],
[
10592,
10606
],
[
10949,
10963
],
[
11169,
11183
],
[
11333,
11347
]
],
[
[
7757,
7776
],
[
3567,
3586
]
],
[
[
7925,
7951
],
[
3606,
3632
]
],
[
[
8229,
8263
],
[
3652,
3686
]
],
[
[
8526,
8552
],
[
3706,
3732
]
],
[
[
8802,
8828
],
[
3752,
3778
]
],
[
[
9092,
9125
],
[
3798,
3831
]
],
[
[
9341,
9384
],
[
3851,
3894
]
],
[
[
9644,
9666
],
[
3914,
3936
]
],
[
[
9907,
9923
],
[
3956,
3972
]
],
[
[
10119,
10143
],
[
3992,
4016
]
],
[
[
10346,
10369
],
[
4036,
4059
]
],
[
[
10570,
10591
],
[
4079,
4100
]
],
[
[
10913,
10948
],
[
4120,
4155
]
],
[
[
11143,
11168
],
[
4175,
4200
]
],
[
[
11314,
11332
],
[
4220,
4238
]
]
] |
from sklearn.datasets import fetch_20newsgroups
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
# Define the category map
category_map = {'talk.politics.misc': 'Politics', 'rec.autos': 'Autos',
'rec.sport.hockey': 'Hockey', 'sci.electronics': 'Electronics',
'sci.med': 'Medicine'}
# Get the training dataset
training_data = fetch_20newsgroups(subset='train',
categories=category_map.keys(), shuffle=True, random_state=5)
# Build a count vectorizer and extract term counts
count_vectorizer = CountVectorizer()
train_tc = count_vectorizer.fit_transform(training_data.data)
print("\nDimensions of training data:", train_tc.shape)
# Create the tf-idf transformer
tfidf = TfidfTransformer()
train_tfidf = tfidf.fit_transform(train_tc)
# Define test data
input_data = [
'You need to be careful with cars when you are driving on slippery roads',
'A lot of devices can be operated wirelessly',
'Players need to be careful when they are close to goal posts',
'Political debates help us understand the perspectives of both sides'
]
# Train a Multinomial Naive Bayes classifier
classifier = MultinomialNB().fit(train_tfidf, training_data.target)
# Transform input data using count vectorizer
input_tc = count_vectorizer.transform(input_data)
# Transform vectorized data using tfidf transformer
input_tfidf = tfidf.transform(input_tc)
# Predict the output categories
predictions = classifier.predict(input_tfidf)
# Print the outputs
for sent, category in zip(input_data, predictions):
print('\nInput:', sent, '\nPredicted category:', \
category_map[training_data.target_names[category]])
| [
[
[
29,
47
],
[
463,
481
]
],
[
[
80,
93
],
[
1251,
1264
]
],
[
[
138,
154
],
[
818,
834
]
],
[
[
199,
214
],
[
641,
656
]
],
[
[
242,
254
],
[
518,
530
],
[
1715,
1727
]
],
[
[
447,
460
],
[
701,
714
],
[
1284,
1297
],
[
1728,
1741
]
],
[
[
622,
638
],
[
670,
686
],
[
1364,
1380
]
],
[
[
659,
667
],
[
761,
769
],
[
871,
879
]
],
[
[
810,
815
],
[
851,
856
],
[
1470,
1475
]
],
[
[
837,
848
],
[
1271,
1282
]
],
[
[
902,
912
],
[
1391,
1401
],
[
1622,
1632
]
],
[
[
1238,
1248
],
[
1543,
1553
]
],
[
[
1353,
1361
],
[
1486,
1494
]
],
[
[
1456,
1467
],
[
1562,
1573
]
],
[
[
1529,
1540
],
[
1634,
1645
]
],
[
[
1600,
1604
],
[
1670,
1674
]
],
[
[
1606,
1614
],
[
1755,
1763
]
]
] |
from loguru import logger
from flask import request
from flasgger import swag_from
from flask_restful import Resource
from jwt.exceptions import ExpiredSignatureError
from ada_friend_app.modulo.cripto import Sha256
from ada_friend_app.modulo.jwt_auth import Token
from ada_friend_app.api.resposta_api import Resposta
from ada_friend_app.servico.mod_database import Database
class Login(Resource):
@swag_from('../../docs/api/login_post.yml')
def post(self):
json = request.json
if json.get('email', False) and json.get('senha', False):
senha = Sha256(json['senha']).hash
usuario = Database().get_document('usuarios', {'_id': json['email'], 'senha': senha})
if usuario:
usuario = usuario[0]
logger.debug(f"{json['email']} - CONECTADO")
try:
token = Token.gerar(usuario['senha'], usuario['_id'])
return Resposta.token_validado(token)
except ExpiredSignatureError:
return Resposta.nao_aceito('Token expirado')
except Exception as e:
return Resposta.error(str(e))
else:
logger.debug(f"{json['email']} - ERRO DE ACESSO")
return Resposta.nao_aceito('Usuário ou senha inválido!')
else:
return Resposta.error('JSON Inválido!')
| [
[
[
19,
25
],
[
786,
792
],
[
1229,
1235
]
],
[
[
44,
51
],
[
483,
490
]
],
[
[
73,
82
],
[
405,
414
]
],
[
[
109,
117
],
[
389,
397
]
],
[
[
145,
166
],
[
1008,
1029
]
],
[
[
209,
215
],
[
583,
589
]
],
[
[
259,
264
],
[
881,
886
]
],
[
[
309,
317
],
[
954,
962
],
[
1058,
1066
],
[
1162,
1170
],
[
1318,
1326
],
[
1401,
1409
]
],
[
[
366,
374
],
[
632,
640
]
],
[
[
383,
388
]
]
] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .mixed_precision import *
from . import mixed_precision
__all__ = mixed_precision.__all__
| [
[
[
634,
649
]
],
[
[
680,
681
]
],
[
[
696,
711
],
[
723,
738
]
],
[
[
713,
720
]
]
] |
import argparse
from rasa.cli.arguments.default_arguments import (
add_nlu_data_param,
add_out_param,
add_data_param,
add_domain_param,
)
def set_convert_arguments(parser: argparse.ArgumentParser):
add_data_param(parser, required=True, default=None, data_type="Rasa NLU ")
add_out_param(
parser,
required=True,
default=None,
help_text="File where to save training data in Rasa format.",
)
parser.add_argument("-l", "--language", default="en", help="Language of data.")
parser.add_argument(
"-f",
"--format",
required=True,
choices=["json", "md"],
help="Output format the training data should be converted into.",
)
def set_split_arguments(parser: argparse.ArgumentParser):
add_nlu_data_param(parser, help_text="File or folder containing your NLU data.")
parser.add_argument(
"--training-fraction",
type=float,
default=0.8,
help="Percentage of the data which should be in the training data.",
)
add_out_param(
parser,
default="train_test_split",
help_text="Directory where the split files should be stored.",
)
def set_validator_arguments(parser: argparse.ArgumentParser):
add_domain_param(parser)
add_data_param(parser)
| [
[
[
7,
15
],
[
191,
199
],
[
767,
775
],
[
1246,
1254
]
],
[
[
72,
90
],
[
797,
815
]
],
[
[
96,
109
],
[
301,
314
],
[
1064,
1077
]
],
[
[
115,
129
],
[
221,
235
],
[
1305,
1319
]
],
[
[
135,
151
],
[
1276,
1292
]
],
[
[
161,
182
]
],
[
[
739,
758
]
],
[
[
1214,
1237
]
]
] |
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class Simulation:
def __init__(self, init_investment, stock_returns, strategy, predicted_movements=None):
self.init_investment = init_investment
self.predicted_movements = predicted_movements
self.stock_returns = stock_returns
self.strategy = strategy
self.action_history = []
self.account_history = [init_investment]
self.__actual_investment = 0
self.step = 0
self.return_on_investment = 0
self.profit_on_investment = 0
def start(self):
for self.step in range(len(self.stock_returns)):
if self.predicted_movements is not None:
action = self.strategy.decide(self.predicted_movements[self.step])
else:
action = self.strategy.decide(self.step)
self.__make_transaction(action)
def __make_transaction(self, action):
self.action_history.append(action)
if action == 'buy':
self.__buy()
elif action == 'hold':
self.__hold()
elif action == 'sell':
self.__sell()
elif action == 'wait':
self.__wait()
else:
sys.exit('Action not implemented, exiting program!')
def get_investment_performance(self):
self.return_on_investment = (self.account_history[-1] - self.init_investment) / self.init_investment
self.profit_on_investment = self.account_history[-1] - self.init_investment
return {'return': self.return_on_investment,
'profit': self.profit_on_investment}
def plot_trading_history(self, stock_prices, date):
date = date.iloc[-len(stock_prices-1):]
stock_prices = np.insert(stock_prices, 0, stock_prices[0])
fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(40, 20))
ax1.plot(stock_prices, color='black', label='Cena zamknięcia akcji')
actions = pd.DataFrame(self.action_history)
buy_idx = actions[actions[0] == 'buy'].index.to_list()
sell_idx = actions[actions[0] == 'sell'].index.to_list()
stock_prices = np.array(stock_prices)
ax1.scatter(buy_idx, stock_prices[buy_idx], color='green', s=40, label='Kupno')
ax1.scatter(sell_idx, stock_prices[sell_idx], color='red', s=40, label='Sprzedaż')
ax1.legend()
ax2.plot(self.account_history[:-1], label='Kapitał')
plt.xlabel('Krok czasowy')
ax1.set_ylabel('Cena akcji')
ax2.set_ylabel('Kapitał')
ax2.legend()
plt.show()
def __calculate_daily_profit(self):
self.__actual_investment += self.__actual_investment * self.stock_returns[self.step]
def __buy(self):
self.__actual_investment = self.account_history[self.step]
self.__calculate_daily_profit()
self.account_history.append(self.__actual_investment)
def __hold(self):
self.__calculate_daily_profit()
self.account_history.append(self.__actual_investment)
def __sell(self):
self.account_history.append(self.__actual_investment)
self.__actual_investment = 0
def __wait(self):
self.account_history.append(self.account_history[self.step-1])
| [
[
[
7,
10
],
[
1259,
1262
]
],
[
[
18,
42
],
[
1852,
1855
],
[
2471,
2474
],
[
2598,
2601
]
],
[
[
50,
61
],
[
1782,
1784
],
[
2179,
2181
]
],
[
[
69,
81
],
[
1994,
1996
]
],
[
[
90,
100
]
]
] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import os
from six.moves import configparser
from prompt_toolkit import prompt # pylint: disable=import-error
from azure.cli.core._help import PRIVACY_STATEMENT
SELECT_SYMBOL = {
'outside': '#',
'query': '??',
'example': '::',
'exit_code': '$',
'scope': '%%',
'unscope': '..'
}
GESTURE_INFO = {
SELECT_SYMBOL['outside'] + "[cmd]": "use commands outside the application",
# pylint: disable=line-too-long
"[cmd] + [param] +" + "\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Inject jmespath query from previous command",
"\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Jmespath query of the previous command",
"[cmd] " + SELECT_SYMBOL['example'] + " [num]": "do a step by step tutorial of example",
SELECT_SYMBOL['exit_code']: "get the exit code of the previous command",
SELECT_SYMBOL['scope'] + '[cmd]': "set a scope, and scopes can be chained with spaces",
SELECT_SYMBOL['scope'] + ' ' + SELECT_SYMBOL['unscope']: "go back a scope",
}
CONFIG_FILE_NAME = 'shell-config'
GESTURE_LENGTH = max(len(key) for key in GESTURE_INFO) + 1
def help_text(values):
""" reformats the help text """
result = ""
for key in values:
result += key + ' '.join('' for x in range(GESTURE_LENGTH - len(key))) +\
': ' + values[key] + '\n'
return result
SHELL_HELP = help_text(GESTURE_INFO)
class Configuration(object):
""" configuration for program """
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False,
'y': True, 'Y': True, 'n': False, 'N': False}
""" Configuration information """
def __init__(self, cli_config, style=None):
self.config = configparser.ConfigParser({
'firsttime': 'yes',
'style': style if style else 'default'
})
self.cli_config = cli_config
self.config.add_section('Help Files')
self.config.add_section('Layout')
self.config.set('Help Files', 'command', 'help_dump.json')
self.config.set('Help Files', 'history', 'history.txt')
self.config.set('Help Files', 'frequency', 'frequency.json')
self.config.set('Layout', 'command_description', 'yes')
self.config.set('Layout', 'param_description', 'yes')
self.config.set('Layout', 'examples', 'yes')
self.config_dir = os.getenv('AZURE_CONFIG_DIR') or os.path.expanduser(os.path.join('~', '.azure-shell'))
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
if not os.path.exists(os.path.join(self.config_dir, CONFIG_FILE_NAME)):
with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file:
self.config.write(config_file)
else:
with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'r') as config_file:
self.config.readfp(config_file) # pylint: disable=deprecated-method
self.update()
def get_config_dir(self):
return self.config_dir
def get_history(self):
""" returns the history """
return self.config.get('Help Files', 'history')
def get_help_files(self):
""" returns where the command table is cached """
return self.config.get('Help Files', 'command')
def get_frequency(self):
""" returns the name of the frequency file """
return self.config.get('Help Files', 'frequency')
def load(self, path):
""" loads the configuration settings """
self.config.read(path)
def firsttime(self):
""" sets it as already done"""
self.config.set('DEFAULT', 'firsttime', 'no')
if self.cli_config.getboolean('core', 'collect_telemetry', fallback=False):
print(PRIVACY_STATEMENT)
else:
self.cli_config.set_value('core', 'collect_telemetry', ask_user_for_telemetry())
self.update()
def get_style(self):
""" gets the last style they used """
return self.config.get('DEFAULT', 'style')
def has_feedback(self):
""" returns whether user has given feedback """
return self.cli_config.getboolean('core', 'given feedback', fallback='false')
def set_feedback(self, value):
""" sets the feedback in the config """
self.cli_config.set_value('core', 'given feedback', value)
def set_style(self, val):
""" sets the style they used """
self.set_val('DEFAULT', 'style', val)
def set_val(self, direct, section, val):
""" set the config values """
if val is not None:
self.config.set(direct, section, val)
self.update()
def update(self):
""" updates the configuration settings """
with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file:
self.config.write(config_file)
def ask_user_for_telemetry():
""" asks the user for if we can collect telemetry """
answer = " "
while answer.lower() != 'yes' and answer.lower() != 'no':
answer = prompt(u'\nDo you agree to sending telemetry (yes/no)? Default answer is yes: ')
if answer == '':
answer = 'yes'
return answer
| [
[
[
368,
382
]
],
[
[
391,
393
],
[
2817,
2819
],
[
2850,
2852
],
[
2869,
2871
],
[
2920,
2922
],
[
2965,
2967
],
[
3009,
3011
],
[
3024,
3026
],
[
3096,
3098
],
[
3249,
3251
],
[
5224,
5226
]
],
[
[
417,
429
],
[
2165,
2177
]
],
[
[
457,
463
],
[
5523,
5529
]
],
[
[
529,
546
],
[
4232,
4249
]
],
[
[
549,
562
],
[
712,
725
],
[
857,
870
],
[
958,
971
],
[
1058,
1071
],
[
1140,
1153
],
[
1217,
1230
],
[
1309,
1322
],
[
1340,
1353
]
],
[
[
691,
703
],
[
1463,
1475
],
[
1756,
1768
]
],
[
[
1388,
1404
],
[
3054,
3070
],
[
3126,
3142
],
[
3279,
3295
],
[
5254,
5270
]
],
[
[
1422,
1436
],
[
1632,
1646
]
],
[
[
1487,
1496
],
[
1746,
1755
]
],
[
[
1733,
1743
]
],
[
[
1778,
1791
]
],
[
[
5343,
5365
],
[
4332,
4354
]
]
] |
from distutils.core import setup
setup(
name='yanccm',
packages=[
'controller',
'sot',
'ncservice',
'ncservice.configDb',
'ncservice.ncDeviceOps',
'ncservice.ncDeviceOps.threaded',
'view'],
version='0.0.2',
license='MIT',
description='''YANCCM (pronounced yank'em) - Yet Another Network Configuration and Change Managment tool, is
multi-threaded configuration manger for network devices that leverages the NETCONF protocol''',
author='Richard Cunningham',
author_email='cunningr@gmail.com',
url='https://github.com/cunningr/yanccm',
download_url='https://github.com/cunningr/yanccm',
keywords=['Netconf', 'Cisco', 'configuration management'],
install_requires=[
'ncclient',
'lxml',
'pyyaml',
'pymongo',
'tabulate',
'requests',
'jinja2'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'
],
entry_points={
'console_scripts': [
'yanccm = controller.cli:main'
]
}
)
| [
[
[
27,
32
],
[
34,
39
]
]
] |
from terregex.mlr import Node, NodeList, Literal, NotLiteral, \
In, Negate, Range, Category, MinRepeat, MaxRepeat, \
SubPattern, Branch, Any, parse
from terregex.transform import Transformer | [
[
[
25,
29
]
],
[
[
31,
39
]
],
[
[
41,
48
]
],
[
[
50,
60
]
],
[
[
66,
68
]
],
[
[
70,
76
]
],
[
[
78,
83
]
],
[
[
85,
93
]
],
[
[
95,
104
]
],
[
[
106,
115
]
],
[
[
121,
131
]
],
[
[
133,
139
]
],
[
[
141,
144
]
],
[
[
146,
151
]
],
[
[
183,
194
]
]
] |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PrinseqLite(Package):
"""PRINSEQ will help you to preprocess your genomic or metagenomic
sequence data in FASTA or FASTQ format."""
homepage = "http://prinseq.sourceforge.net"
url = "https://sourceforge.net/projects/prinseq/files/standalone/prinseq-lite-0.20.4.tar.gz"
version('0.20.4', sha256='9b5e0dce3b7f02f09e1cc7e8a2dd77c0b133e5e35529d570ee901f53ebfeb56f')
variant('nopca', default=True, description="Graphs version without PCA")
depends_on('perl', type='run')
depends_on('perl-cairo', type='run')
depends_on('perl-digest-md5', type='run')
depends_on('perl-json', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
filter_file(r'#!/usr/bin/perl',
'#!/usr/bin/env perl',
'prinseq-graphs-noPCA.pl')
filter_file(r'#!/usr/bin/perl',
'#!/usr/bin/env perl',
'prinseq-lite.pl')
install('prinseq-graphs-noPCA.pl', prefix.bin)
install('prinseq-lite.pl', prefix.bin)
chmod = which('chmod')
chmod('+x', join_path(self.prefix.bin, 'prinseq-graphs-noPCA.pl'))
chmod('+x', join_path(self.prefix.bin, 'prinseq-lite.pl'))
| [
[
[
216,
217
],
[
238,
245
],
[
522,
529
],
[
620,
627
],
[
698,
708
],
[
733,
743
],
[
774,
784
],
[
820,
830
],
[
902,
908
],
[
930,
941
],
[
1061,
1072
],
[
1184,
1191
],
[
1239,
1246
],
[
1295,
1300
],
[
1330,
1339
],
[
1405,
1414
]
],
[
[
226,
237
]
]
] |
"""
Unit test for Linear Programming
"""
import sys
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_equal,
assert_array_less, assert_warns, suppress_warnings)
from pytest import raises as assert_raises
from scipy.optimize import linprog, OptimizeWarning
from scipy.sparse.linalg import MatrixRankWarning
from scipy.linalg import LinAlgWarning
import scipy.sparse
import pytest
has_umfpack = True
try:
from scikits.umfpack import UmfpackWarning
except ImportError:
has_umfpack = False
has_cholmod = True
try:
import sksparse
from sksparse.cholmod import cholesky as cholmod
except ImportError:
has_cholmod = False
def _assert_iteration_limit_reached(res, maxiter):
assert_(not res.success, "Incorrectly reported success")
assert_(res.success < maxiter, "Incorrectly reported number of iterations")
assert_equal(res.status, 1, "Failed to report iteration limit reached")
def _assert_infeasible(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 2, "failed to report infeasible status")
def _assert_unbounded(res):
# res: linprog result object
assert_(not res.success, "incorrectly reported success")
assert_equal(res.status, 3, "failed to report unbounded status")
def _assert_unable_to_find_basic_feasible_sol(res):
# res: linprog result object
# The status may be either 2 or 4 depending on why the feasible solution
# could not be found. If the undelying problem is expected to not have a
# feasible solution, _assert_infeasible should be used.
assert_(not res.success, "incorrectly reported success")
assert_(res.status in (2, 4), "failed to report optimization failure")
def _assert_success(res, desired_fun=None, desired_x=None,
rtol=1e-8, atol=1e-8):
# res: linprog result object
# desired_fun: desired objective function value or None
# desired_x: desired solution or None
if not res.success:
msg = "linprog status {0}, message: {1}".format(res.status,
res.message)
raise AssertionError(msg)
assert_equal(res.status, 0)
if desired_fun is not None:
assert_allclose(res.fun, desired_fun,
err_msg="converged to an unexpected objective value",
rtol=rtol, atol=atol)
if desired_x is not None:
assert_allclose(res.x, desired_x,
err_msg="converged to an unexpected solution",
rtol=rtol, atol=atol)
def magic_square(n):
"""
Generates a linear program for which integer solutions represent an
n x n magic square; binary decision variables represent the presence
(or absence) of an integer 1 to n^2 in each position of the square.
"""
np.random.seed(0)
M = n * (n**2 + 1) / 2
numbers = np.arange(n**4) // n**2 + 1
numbers = numbers.reshape(n**2, n, n)
zeros = np.zeros((n**2, n, n))
A_list = []
b_list = []
# Rule 1: use every number exactly once
for i in range(n**2):
A_row = zeros.copy()
A_row[i, :, :] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 2: Only one number per square
for i in range(n):
for j in range(n):
A_row = zeros.copy()
A_row[:, i, j] = 1
A_list.append(A_row.flatten())
b_list.append(1)
# Rule 3: sum of rows is M
for i in range(n):
A_row = zeros.copy()
A_row[:, i, :] = numbers[:, i, :]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 4: sum of columns is M
for i in range(n):
A_row = zeros.copy()
A_row[:, :, i] = numbers[:, :, i]
A_list.append(A_row.flatten())
b_list.append(M)
# Rule 5: sum of diagonals is M
A_row = zeros.copy()
A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)]
A_list.append(A_row.flatten())
b_list.append(M)
A_row = zeros.copy()
A_row[:, range(n), range(-1, -n - 1, -1)] = \
numbers[:, range(n), range(-1, -n - 1, -1)]
A_list.append(A_row.flatten())
b_list.append(M)
A = np.array(np.vstack(A_list), dtype=float)
b = np.array(b_list, dtype=float)
c = np.random.rand(A.shape[1])
return A, b, c, numbers
def lpgen_2d(m, n):
""" -> A b c LP test: m*n vars, m+n constraints
row sums == n/m, col sums == 1
https://gist.github.com/denis-bz/8647461
"""
np.random.seed(0)
c = - np.random.exponential(size=(m, n))
Arow = np.zeros((m, m * n))
brow = np.zeros(m)
for j in range(m):
j1 = j + 1
Arow[j, j * n:j1 * n] = 1
brow[j] = n / m
Acol = np.zeros((n, m * n))
bcol = np.zeros(n)
for j in range(n):
j1 = j + 1
Acol[j, j::n] = 1
bcol[j] = 1
A = np.vstack((Arow, Acol))
b = np.hstack((brow, bcol))
return A, b, c.ravel()
def very_random_gen(seed=0):
np.random.seed(seed)
m_eq, m_ub, n = 10, 20, 50
c = np.random.rand(n)-0.5
A_ub = np.random.rand(m_ub, n)-0.5
b_ub = np.random.rand(m_ub)-0.5
A_eq = np.random.rand(m_eq, n)-0.5
b_eq = np.random.rand(m_eq)-0.5
lb = -np.random.rand(n)
ub = np.random.rand(n)
lb[lb < -np.random.rand()] = -np.inf
ub[ub > np.random.rand()] = np.inf
bounds = np.vstack((lb, ub)).T
return c, A_ub, b_ub, A_eq, b_eq, bounds
def nontrivial_problem():
c = [-1, 8, 4, -6]
A_ub = [[-7, -7, 6, 9],
[1, -1, -3, 0],
[10, -10, -7, 7],
[6, -1, 3, 4]]
b_ub = [-3, 6, -6, 6]
A_eq = [[-10, 1, 1, -8]]
b_eq = [-4]
x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391]
f_star = 7083 / 1391
return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star
def l1_regression_prob(seed=0, m=8, d=9, n=100):
'''
Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)}
x in R^d
y in R
n: number of training samples
d: dimension of x, i.e. x in R^d
phi: feature map R^d -> R^m
m: dimension of feature space
'''
np.random.seed(seed)
phi = np.random.normal(0, 1, size=(m, d)) # random feature mapping
w_true = np.random.randn(m)
x = np.random.normal(0, 1, size=(d, n)) # features
y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) # measurements
# construct the problem
c = np.ones(m+n)
c[:m] = 0
A_ub = scipy.sparse.lil_matrix((2*n, n+m))
idx = 0
for ii in range(n):
A_ub[idx, :m] = phi @ x[:, ii]
A_ub[idx, m+ii] = -1
A_ub[idx+1, :m] = -1*phi @ x[:, ii]
A_ub[idx+1, m+ii] = -1
idx += 2
A_ub = A_ub.tocsc()
b_ub = np.zeros(2*n)
b_ub[0::2] = y
b_ub[1::2] = -y
bnds = [(None, None)]*m + [(0, None)]*n
return c, A_ub, b_ub, bnds
def generic_callback_test(self):
# Check that callback is as advertised
last_cb = {}
def cb(res):
message = res.pop('message')
complete = res.pop('complete')
assert_(res.pop('phase') in (1, 2))
assert_(res.pop('status') in range(4))
assert_(isinstance(res.pop('nit'), int))
assert_(isinstance(complete, bool))
assert_(isinstance(message, str))
last_cb['x'] = res['x']
last_cb['fun'] = res['fun']
last_cb['slack'] = res['slack']
last_cb['con'] = res['con']
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method)
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
assert_allclose(last_cb['fun'], res['fun'])
assert_allclose(last_cb['x'], res['x'])
assert_allclose(last_cb['con'], res['con'])
assert_allclose(last_cb['slack'], res['slack'])
def test_unknown_solvers_and_options():
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
assert_raises(ValueError, linprog,
c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki')
assert_raises(ValueError, linprog,
c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki')
assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub,
options={"rr_method": 'ekki-ekki-ekki'})
def test_choose_solver():
# 'highs' chooses 'dual'
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub, b_ub, method='highs')
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
A_ub = None
b_ub = None
A_eq = None
b_eq = None
bounds = None
################
# Common Tests #
################
class LinprogCommonTests:
"""
Base class for `linprog` tests. Generally, each test will be performed
once for every derived class of LinprogCommonTests, each of which will
typically change self.options and/or self.method. Effectively, these tests
are run for many combination of method (simplex, revised simplex, and
interior point) and options (such as pivoting rule or sparse treatment).
"""
##################
# Targeted Tests #
##################
def test_callback(self):
generic_callback_test(self)
def test_disp(self):
# test that display option does not break anything.
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"disp": True})
_assert_success(res, desired_fun=-64.049494229)
def test_docstring_example(self):
# Example from linprog docstring.
c = [-1, 4]
A = [[-3, 1], [1, 2]]
b = [6, 4]
x0_bounds = (None, None)
x1_bounds = (-3, None)
res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
options=self.options, method=self.method)
_assert_success(res, desired_fun=-22)
def test_type_error(self):
# (presumably) checks that linprog recognizes type errors
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1]
A_eq = [[1]]
b_eq = "hello"
assert_raises(TypeError, linprog,
c, A_eq=A_eq, b_eq=b_eq,
method=self.method, options=self.options)
def test_aliasing_b_ub(self):
# (presumably) checks that linprog does not modify b_ub
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_ub = np.array([[1.0]])
b_ub_orig = np.array([3.0])
b_ub = b_ub_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-4, desired_x=[-4])
assert_allclose(b_ub_orig, b_ub)
def test_aliasing_b_eq(self):
# (presumably) checks that linprog does not modify b_eq
# This is tested more carefully in test__linprog_clean_inputs.py
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq_orig = np.array([3.0])
b_eq = b_eq_orig.copy()
bounds = (-4.0, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
assert_allclose(b_eq_orig, b_eq)
def test_non_ndarray_args(self):
# (presumably) checks that linprog accepts list in place of arrays
# This is tested more carefully in test__linprog_clean_inputs.py
c = [1.0]
A_ub = [[1.0]]
b_ub = [3.0]
A_eq = [[1.0]]
b_eq = [2.0]
bounds = (-1.0, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=2, desired_x=[2])
def test_unknown_options(self):
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
def f(c, A_ub=None, b_ub=None, A_eq=None,
b_eq=None, bounds=None, options={}):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=options)
o = {key: self.options[key] for key in self.options}
o['spam'] = 42
assert_warns(OptimizeWarning, f,
c, A_ub=A_ub, b_ub=b_ub, options=o)
def test_invalid_inputs(self):
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# Test ill-formatted bounds
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)])
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)])
assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)])
# Test other invalid inputs
assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2])
assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2])
assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1])
assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1)
# this last check doesn't make sense for sparse presolve
if ("_sparse_presolve" in self.options and
self.options["_sparse_presolve"]):
return
# there aren't 3-D sparse matrices
assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1)
def test_sparse_constraints(self):
# gh-13559: improve error message for sparse inputs when unsupported
def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None):
linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
np.random.seed(0)
m = 100
n = 150
A_eq = scipy.sparse.rand(m, n, 0.5)
x_valid = np.random.randn((n))
c = np.random.randn((n))
ub = x_valid + np.random.rand((n))
lb = x_valid - np.random.rand((n))
bounds = np.column_stack((lb, ub))
b_eq = A_eq * x_valid
if self.method in {'simplex', 'revised simplex'}:
# simplex and revised simplex should raise error
with assert_raises(ValueError, match=f"Method '{self.method}' "
"does not support sparse constraint matrices."):
linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=self.options)
else:
# other methods should succeed
options = {**self.options}
if self.method in {'interior-point'}:
options['sparse'] = True
res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds,
method=self.method, options=options)
assert res.success
def test_maxiter(self):
# test iteration limit w/ Enzo example
c = [4, 8, 3, 0, 0, 0]
A = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b = [185, 155, 600]
np.random.seed(0)
maxiter = 3
res = linprog(c, A_eq=A, b_eq=b, method=self.method,
options={"maxiter": maxiter})
_assert_iteration_limit_reached(res, maxiter)
assert_equal(res.nit, maxiter)
def test_bounds_fixed(self):
# Test fixed bounds (upper equal to lower)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
do_presolve = self.options.get('presolve', True)
res = linprog([1], bounds=(1, 1),
method=self.method, options=self.options)
_assert_success(res, 1, 1)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)],
method=self.method, options=self.options)
_assert_success(res, 12, [5, -1, 3])
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 1], bounds=[(1, 1), (1, 3)],
method=self.method, options=self.options)
_assert_success(res, 2, [1, 1])
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7],
bounds=[(-5, 5), (0, 10), (3.5, 3.5)],
method=self.method, options=self.options)
_assert_success(res, 15, [1, 7, 3.5])
if do_presolve:
assert_equal(res.nit, 0)
def test_bounds_infeasible(self):
# Test ill-valued bounds (upper less than lower)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
do_presolve = self.options.get('presolve', True)
res = linprog([1], bounds=(1, -2), method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
def test_bounds_infeasible_2(self):
# Test ill-valued bounds (lower inf, upper -inf)
# If presolve option True, test if solution found in presolve (i.e.
# number of iterations is 0).
# For the simplex method, the cases do not result in an
# infeasible status, but in a RuntimeWarning. This is a
# consequence of having _presolve() take care of feasibility
# checks. See issue gh-11618.
do_presolve = self.options.get('presolve', True)
simplex_without_presolve = not do_presolve and self.method == 'simplex'
c = [1, 2, 3]
bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)]
bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)]
if simplex_without_presolve:
def g(c, bounds):
res = linprog(c, bounds=bounds, method=self.method, options=self.options)
return res
with pytest.warns(RuntimeWarning):
with pytest.raises(IndexError):
g(c, bounds=bounds_1)
with pytest.warns(RuntimeWarning):
with pytest.raises(IndexError):
g(c, bounds=bounds_2)
else:
res = linprog(c=c, bounds=bounds_1, method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
res = linprog(c=c, bounds=bounds_2, method=self.method, options=self.options)
_assert_infeasible(res)
if do_presolve:
assert_equal(res.nit, 0)
def test_empty_constraint_1(self):
c = [-1, -2]
res = linprog(c, method=self.method, options=self.options)
_assert_unbounded(res)
def test_empty_constraint_2(self):
c = [-1, 1, -1, 1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve requires no iterations
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_empty_constraint_3(self):
c = [1, -1, 1, -1]
bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)]
res = linprog(c, bounds=bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2)
def test_inequality_constraints(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf
c = np.array([3, 2]) * -1 # maximize
A_ub = [[2, 1],
[1, 1],
[1, 0]]
b_ub = [10, 8, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-18, desired_x=[2, 6])
def test_inequality_constraints2(self):
# Minimize linear function subject to linear inequality constraints.
# http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf
# (dead link)
c = [6, 3]
A_ub = [[0, 3],
[-1, -1],
[-2, 1]]
b_ub = [2, -1, -1]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3])
def test_bounds_simple(self):
c = [1, 2]
bounds = (1, 2)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
bounds = [(1, 2), (1, 2)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[1, 1])
def test_bounded_below_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (1.0, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_below_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (0.5, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounded_above_only_1(self):
c = np.array([1.0])
A_eq = np.array([[1.0]])
b_eq = np.array([3.0])
bounds = (None, 10.0)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3, desired_x=[3])
def test_bounded_above_only_2(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, 4)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_infinity(self):
c = np.ones(3)
A_eq = np.eye(3)
b_eq = np.array([1, 2, 3])
bounds = (-np.inf, np.inf)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq))
def test_bounds_mixed(self):
# Problem has one unbounded variable and
# another with a negative lower bound.
c = np.array([-1, 4]) * -1 # maximize
A_ub = np.array([[-3, 1],
[1, 2]], dtype=np.float64)
b_ub = [6, 4]
x0_bounds = (-np.inf, np.inf)
x1_bounds = (-3, np.inf)
bounds = (x0_bounds, x1_bounds)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7])
def test_bounds_equal_but_infeasible(self):
c = [-4, 1]
A_ub = [[7, -2], [0, 1], [2, -2]]
b_ub = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_but_infeasible2(self):
c = [-4, 1]
A_eq = [[7, -2], [0, 1], [2, -2]]
b_eq = [14, 0, 3]
bounds = [(2, 2), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bounds_equal_no_presolve(self):
# There was a bug when a lower and upper bound were equal but
# presolve was not on to eliminate the variable. The bound
# was being converted to an equality constraint, but the bound
# was not eliminated, leading to issues in postprocessing.
c = [1, 2]
A_ub = [[1, 2], [1.1, 2.2]]
b_ub = [4, 8]
bounds = [(1, 2), (2, 2)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_zero_column_1(self):
m, n = 3, 4
np.random.seed(0)
c = np.random.rand(n)
c[1] = 1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = [[1, 0, 1, 1]]
b_ub = 3
bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-9.7087836730413404)
def test_zero_column_2(self):
np.random.seed(0)
m, n = 2, 4
c = np.random.rand(n)
c[1] = -1
A_eq = np.random.rand(m, n)
A_eq[:, 1] = 0
b_eq = np.random.rand(m)
A_ub = np.random.rand(m, n)
A_ub[:, 1] = 0
b_ub = np.random.rand(m)
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
# Unboundedness detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_zero_row_1(self):
c = [1, 2, 3]
A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_eq = [0, 3, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=3)
def test_zero_row_2(self):
A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]]
b_ub = [0, 3, 0]
c = [1, 2, 3]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0)
def test_zero_row_3(self):
m, n = 2, 4
c = np.random.rand(n)
A_eq = np.random.rand(m, n)
A_eq[0, :] = 0
b_eq = np.random.rand(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_zero_row_4(self):
m, n = 2, 4
c = np.random.rand(n)
A_ub = np.random.rand(m, n)
A_ub[0, :] = 0
b_ub = -np.random.rand(m)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_1(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 2, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_eq_2(self):
c = [1, 1, 1, 2]
A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]]
b_eq = [1, 2, 1, 4]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=4)
def test_singleton_row_ub_1(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -2, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_singleton_row_ub_2(self):
c = [1, 1, 1, 2]
A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]]
b_ub = [1, 2, -0.5, 4]
bounds = [(None, None), (0, None), (0, None), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0.5)
def test_infeasible(self):
# Test linprog response to an infeasible problem
c = [-1, -1]
A_ub = [[1, 0],
[0, 1],
[-1, -1]]
b_ub = [2, 2, -5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_infeasible_inequality_bounds(self):
c = [1]
A_ub = [[2]]
b_ub = 4
bounds = (5, 6)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
# Infeasibility detected in presolve
if self.options.get('presolve', True):
assert_equal(res.nit, 0)
def test_unbounded(self):
# Test linprog response to an unbounded problem
c = np.array([1, 1]) * -1 # maximize
A_ub = [[-1, 1],
[-1, -1]]
b_ub = [-1, -2]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_unbounded_below_no_presolve_corrected(self):
c = [1]
bounds = [(None, 1)]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c=c, bounds=bounds,
method=self.method,
options=o)
if self.method == "revised simplex":
# Revised simplex has a special pathway for no constraints.
assert_equal(res.status, 5)
else:
_assert_unbounded(res)
def test_unbounded_no_nontrivial_constraints_1(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, -1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (0, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
if not self.method.lower().startswith("highs"):
assert_equal(res.x[-1], np.inf)
assert_equal(res.message[:36],
"The problem is (trivially) unbounded")
def test_unbounded_no_nontrivial_constraints_2(self):
"""
Test whether presolve pathway for detecting unboundedness after
constraint elimination is working.
"""
c = np.array([0, 0, 0, 1, -1, 1])
A_ub = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1]])
b_ub = np.array([2, -2, 0])
bounds = [(None, None), (None, None), (None, None),
(-1, 1), (-1, 1), (None, 0)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
if not self.method.lower().startswith("highs"):
assert_equal(res.x[-1], -np.inf)
assert_equal(res.message[:36],
"The problem is (trivially) unbounded")
def test_cyclic_recovery(self):
# Test linprogs recovery from cycling using the Klee-Minty problem
# Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf
c = np.array([100, 10, 1]) * -1 # maximize
A_ub = [[1, 0, 0],
[20, 1, 0],
[200, 20, 1]]
b_ub = [1, 100, 10000]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7)
def test_cyclic_bland(self):
# Test the effect of Bland's rule on a cycling problem
c = np.array([-10, 57, 9, 24.])
A_ub = np.array([[0.5, -5.5, -2.5, 9],
[0.5, -1.5, -0.5, 1],
[1, 0, 0, 0]])
b_ub = [0, 0, 1]
# copy the existing options dictionary but change maxiter
maxiter = 100
o = {key: val for key, val in self.options.items()}
o['maxiter'] = maxiter
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
if self.method == 'simplex' and not self.options.get('bland'):
# simplex cycles without Bland's rule
_assert_iteration_limit_reached(res, o['maxiter'])
else:
# other methods, including simplex with Bland's rule, succeed
_assert_success(res, desired_x=[1, 0, 1, 0])
# note that revised simplex skips this test because it may or may not
# cycle depending on the initial basis
def test_remove_redundancy_infeasibility(self):
# mostly a test of redundancy removal, which is carefully tested in
# test__remove_redundancy.py
m, n = 10, 10
c = np.random.rand(n)
A_eq = np.random.rand(m, n)
b_eq = np.random.rand(m)
A_eq[-1, :] = 2 * A_eq[-2, :]
b_eq[-1] *= -1
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
#################
# General Tests #
#################
def test_nontrivial_problem(self):
# Problem involves all constraint types,
# negative resource limits, and rounding issues.
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
def test_lpgen_problem(self):
# Test linprog with a rather large problem (400 variables,
# 40 constraints) generated by https://gist.github.com/denis-bz/8647461
A_ub, b_ub, c = lpgen_2d(20, 20)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-64.049494229)
def test_network_flow(self):
# A network flow problem with supply and demand at nodes
# and with costs along directed edges.
# https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf
c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18]
n, p = -1, 1
A_eq = [
[n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0],
[p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0],
[0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0],
[0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p],
[0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]]
b_eq = [0, 19, -16, 33, 0, 0, -36]
with suppress_warnings() as sup:
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7)
def test_network_flow_limited_capacity(self):
# A network flow problem with supply and demand at nodes
# and with costs and capacities along directed edges.
# http://blog.sommer-forst.de/2013/04/10/
c = [2, 2, 1, 3, 1]
bounds = [
[0, 4],
[0, 2],
[0, 2],
[0, 3],
[0, 5]]
n, p = -1, 1
A_eq = [
[n, n, 0, 0, 0],
[p, 0, n, n, 0],
[0, p, p, 0, n],
[0, 0, 0, p, p]]
b_eq = [-4, 0, 0, 4]
with suppress_warnings() as sup:
# this is an UmfpackWarning but I had trouble importing it
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Solving system with option...")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14)
def test_simplex_algorithm_wikipedia_example(self):
# https://en.wikipedia.org/wiki/Simplex_algorithm#Example
c = [-2, -3, -4]
A_ub = [
[3, 2, 1],
[2, 5, 3]]
b_ub = [10, 15]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-20)
def test_enzo_example(self):
# https://github.com/scipy/scipy/issues/1779 lp2.py
#
# Translated from Octave code at:
# http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm
# and placed under MIT licence by Enzo Michelangeli
# with permission explicitly granted by the original author,
# Prof. Kazunobu Yoshida
c = [4, 8, 3, 0, 0, 0]
A_eq = [
[2, 5, 3, -1, 0, 0],
[3, 2.5, 8, 0, -1, 0],
[8, 10, 4, 0, 0, -1]]
b_eq = [185, 155, 600]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=317.5,
desired_x=[66.25, 0, 17.5, 0, 183.75, 0],
atol=6e-6, rtol=1e-7)
def test_enzo_example_b(self):
# rescued from https://github.com/scipy/scipy/pull/218
c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8]
A_eq = [[-1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]]
b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3]
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-1.77,
desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3])
def test_enzo_example_c_with_degeneracy(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 20
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros(m))
def test_enzo_example_c_with_unboundedness(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [0, 0]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_unbounded(res)
def test_enzo_example_c_with_infeasibility(self):
# rescued from https://github.com/scipy/scipy/pull/218
m = 50
c = -np.ones(m)
tmp = 2 * np.pi * np.arange(m) / (m + 1)
A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp)))
b_eq = [1, 1]
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_infeasible(res)
def test_basic_artificial_vars(self):
# Problem is chosen to test two phase simplex methods when at the end
# of phase 1 some artificial variables remain in the basis.
# Also, for `method='simplex'`, the row in the tableau corresponding
# with the artificial variables is not all zero.
c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004])
A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0],
[0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0],
[1.0, 1.0, 0, 0, 0, 0]])
b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0])
A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]])
b_eq = np.array([0, 0])
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=0, desired_x=np.zeros_like(c),
atol=2e-6)
def test_optimize_result(self):
# check all fields in OptimizeResult
c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
assert_(res.success)
assert_(res.nit)
assert_(not res.status)
assert_(res.message == "Optimization terminated successfully.")
assert_allclose(c @ res.x, res.fun)
assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11)
assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11)
#################
# Bug Fix Tests #
#################
def test_bug_5400(self):
# https://github.com/scipy/scipy/issues/5400
bounds = [
(0, None),
(0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100),
(0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900),
(0, None), (0, None), (0, None), (0, None), (0, None), (0, None)]
f = 1 / 9
g = -1e4
h = -3.1
A_ub = np.array([
[1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0],
[1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1],
[0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0],
[0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0],
[0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0],
[0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0],
[0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]])
b_ub = np.array([
0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900,
900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
sup.filter(OptimizeWarning,
"Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=-106.63507541835018)
def test_bug_6139(self):
# linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Note: This is not strictly a bug as the default tolerance determines
# if a result is "close enough" to zero and should not be expected
# to work for all cases.
c = np.array([1, 1, 1])
A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]])
b_eq = np.array([5.00000000e+00, -1.00000000e+04])
A_ub = -np.array([[0., 1000000., 1010000.]])
b_ub = -np.array([10000000.])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=14.95,
desired_x=np.array([5, 4.95, 5]))
def test_bug_6690(self):
# linprog simplex used to violate bound constraint despite reporting
# success.
# https://github.com/scipy/scipy/issues/6690
A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]])
b_eq = np.array([0.9626])
A_ub = np.array([
[0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0],
[0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37],
[0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0]
])
b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022])
bounds = np.array([
[-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73],
[0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15]
]).T
c = np.array([
-1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28
])
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(OptimizeWarning,
"Solving system with option 'cholesky'")
sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = -1.19099999999
desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800,
0.5000, 0.4700, 0.0900, 0.3200, -0.7300])
_assert_success(res, desired_fun=desired_fun, desired_x=desired_x)
# Add small tol value to ensure arrays are less than or equal.
atol = 1e-6
assert_array_less(bounds[:, 0] - atol, res.x)
assert_array_less(res.x, bounds[:, 1] + atol)
def test_bug_7044(self):
# linprog simplex failed to "identify correct constraints" (?)
# leading to a non-optimal solution if A is rank-deficient.
# https://github.com/scipy/scipy/issues/7044
A_eq, b_eq, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
desired_fun = 1.730550597
_assert_success(res, desired_fun=desired_fun)
assert_allclose(A_eq.dot(res.x), b_eq)
assert_array_less(np.zeros(res.x.size) - 1e-5, res.x)
def test_bug_7237(self):
# https://github.com/scipy/scipy/issues/7237
# linprog simplex "explodes" when the pivot value is very
# close to zero.
c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0])
A_ub = np.array([
[1., -724., 911., -551., -555., -896., 478., -80., -293.],
[1., 566., 42., 937., 233., 883., 392., -909., 57.],
[1., -208., -894., 539., 321., 532., -924., 942., 55.],
[1., 857., -859., 83., 462., -265., -971., 826., 482.],
[1., 314., -424., 245., -424., 194., -443., -104., -429.],
[1., 540., 679., 361., 149., -827., 876., 633., 302.],
[0., -1., -0., -0., -0., -0., -0., -0., -0.],
[0., -0., -1., -0., -0., -0., -0., -0., -0.],
[0., -0., -0., -1., -0., -0., -0., -0., -0.],
[0., -0., -0., -0., -1., -0., -0., -0., -0.],
[0., -0., -0., -0., -0., -1., -0., -0., -0.],
[0., -0., -0., -0., -0., -0., -1., -0., -0.],
[0., -0., -0., -0., -0., -0., -0., -1., -0.],
[0., -0., -0., -0., -0., -0., -0., -0., -1.],
[0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 1.]
])
b_ub = np.array([
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.])
A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]])
b_eq = np.array([[1.]])
bounds = [(None, None)] * 9
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=108.568535, atol=1e-6)
def test_bug_8174(self):
# https://github.com/scipy/scipy/issues/8174
# The simplex method sometimes "explodes" if the pivot value is very
# close to zero.
A_ub = np.array([
[22714, 1008, 13380, -2713.5, -1116],
[-4986, -1092, -31220, 17386.5, 684],
[-4986, 0, 0, -2713.5, 0],
[22714, 0, 0, 17386.5, 0]])
b_ub = np.zeros(A_ub.shape[0])
c = -np.ones(A_ub.shape[1])
bounds = [(0, 1)] * A_ub.shape[1]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex':
_assert_unable_to_find_basic_feasible_sol(res)
else:
_assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6)
def test_bug_8174_2(self):
# Test supplementary example from issue 8174.
# https://github.com/scipy/scipy/issues/8174
# https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution
c = np.array([1, 0, 0, 0, 0, 0, 0])
A_ub = -np.identity(7)
b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]])
A_eq = np.array([
[1, 1, 1, 1, 1, 1, 0],
[0.3, 1.3, 0.9, 0, 0, 0, -1],
[0.3, 0, 0, 0, 0, 0, -2/3],
[0, 0.65, 0, 0, 0, 0, -1/15],
[0, 0, 0.3, 0, 0, 0, -1/15]
])
b_eq = np.array([[100], [0], [0], [0], [0]])
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_fun=43.3333333331385)
def test_bug_8561(self):
# Test that pivot row is chosen correctly when using Bland's rule
# This was originally written for the simplex method with
# Bland's rule only, but it doesn't hurt to test all methods/options
# https://github.com/scipy/scipy/issues/8561
c = np.array([7, 0, -4, 1.5, 1.5])
A_ub = np.array([
[4, 5.5, 1.5, 1.0, -3.5],
[1, -2.5, -2, 2.5, 0.5],
[3, -0.5, 4, -12.5, -7],
[-1, 4.5, 2, -3.5, -2],
[5.5, 2, -4.5, -1, 9.5]])
b_ub = np.array([0, 0, 0, 0, 1])
res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options,
method=self.method)
_assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3])
def test_bug_8662(self):
# linprog simplex used to report incorrect optimal results
# https://github.com/scipy/scipy/issues/8662
c = [-10, 10, 6, 3]
A_ub = [[8, -8, -4, 6],
[-8, 8, 4, -6],
[-4, 4, 8, -4],
[3, -3, -3, -10]]
b_ub = [9, -9, -9, -4]
bounds = [(0, None), (0, None), (0, None), (0, None)]
desired_fun = 36.0000000000
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# Set boundary condition as a constraint
A_ub.append([0, 0, -1, 0])
b_ub.append(0)
bounds[2] = (None, None)
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
rtol = 1e-5
_assert_success(res1, desired_fun=desired_fun, rtol=rtol)
_assert_success(res2, desired_fun=desired_fun, rtol=rtol)
def test_bug_8663(self):
# exposed a bug in presolve
# https://github.com/scipy/scipy/issues/8663
c = [1, 5]
A_eq = [[0, -7]]
b_eq = [-6]
bounds = [(0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7)
def test_bug_8664(self):
# interior-point has trouble with this when presolve is off
# tested for interior-point with presolve off in TestLinprogIPSpecific
# https://github.com/scipy/scipy/issues/8664
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_infeasible(res)
def test_bug_8973(self):
"""
Test whether bug described at:
https://github.com/scipy/scipy/issues/8973
was fixed.
"""
c = np.array([0, 0, 0, 1, -1])
A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]])
b_ub = np.array([2, -2])
bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
# solution vector x is not unique
_assert_success(res, desired_fun=-2)
# HiGHS IPM had an issue where the following wasn't true!
assert_equal(c @ res.x, res.fun)
def test_bug_8973_2(self):
"""
Additional test for:
https://github.com/scipy/scipy/issues/8973
suggested in
https://github.com/scipy/scipy/pull/8985
review by @antonior92
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[-2], desired_fun=0)
def test_bug_10124(self):
"""
Test for linprog docstring problem
'disp'=True caused revised simplex failure
"""
c = np.zeros(1)
A_ub = np.array([[1]])
b_ub = np.array([-2])
bounds = (None, None)
c = [-1, 4]
A_ub = [[-3, 1], [1, 2]]
b_ub = [6, 4]
bounds = [(None, None), (-3, None)]
o = {"disp": True}
o.update(self.options)
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_x=[10, -3], desired_fun=-22)
def test_bug_10349(self):
"""
Test for redundancy removal tolerance issue
https://github.com/scipy/scipy/issues/10349
"""
A_eq = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1]])
b_eq = np.array([221, 210, 10, 141, 198, 102])
c = np.concatenate((0, 1, np.zeros(4)), axis=None)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options)
_assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92)
def test_bug_10466(self):
"""
Test that autoscale fixes poorly-scaled problem
"""
c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.]
A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]]
b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08,
1.00663296e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09, 1.07374182e+09, 1.07374182e+09,
1.07374182e+09]
o = {}
# HiGHS methods don't use autoscale option
if not self.method.startswith("highs"):
o = {"autoscale": True}
o.update(self.options)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "Solving system with option...")
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(RuntimeWarning, "divide by zero encountered...")
sup.filter(RuntimeWarning, "overflow encountered...")
sup.filter(RuntimeWarning, "invalid value encountered...")
sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
assert_allclose(res.fun, -8589934560)
#########################
# Method-specific Tests #
#########################
class LinprogSimplexTests(LinprogCommonTests):
method = "simplex"
class LinprogIPTests(LinprogCommonTests):
method = "interior-point"
class LinprogRSTests(LinprogCommonTests):
method = "revised simplex"
# Revised simplex does not reliably solve these problems.
# Failure is intermittent due to the random choice of elements to complete
# the basis after phase 1 terminates. In any case, linprog exists
# gracefully, reporting numerical difficulties. I do not think this should
# prevent revised simplex from being merged, as it solves the problems
# most of the time and solves a broader range of problems than the existing
# simplex implementation.
# I believe that the root cause is the same for all three and that this
# same issue prevents revised simplex from solving many other problems
# reliably. Somehow the pivoting rule allows the algorithm to pivot into
# a singular basis. I haven't been able to find a reference that
# acknowledges this possibility, suggesting that there is a bug. On the
# other hand, the pivoting rule is quite simple, and I can't find a
# mistake, which suggests that this is a possibility with the pivoting
# rule. Hopefully, a better pivoting rule will fix the issue.
def test_bug_5400(self):
pytest.skip("Intermittent failure acceptable.")
def test_bug_8662(self):
pytest.skip("Intermittent failure acceptable.")
def test_network_flow(self):
pytest.skip("Intermittent failure acceptable.")
class LinprogHiGHSTests(LinprogCommonTests):
def test_callback(self):
# this is the problem from test_callback
cb = lambda res: None
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub,
callback=cb, method=self.method)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method)
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
@pytest.mark.parametrize("options",
[{"maxiter": -1},
{"disp": -1},
{"presolve": -1},
{"time_limit": -1},
{"dual_feasibility_tolerance": -1},
{"primal_feasibility_tolerance": -1},
{"ipm_optimality_tolerance": -1},
{"simplex_dual_edge_weight_strategy": "ekki"},
])
def test_invalid_option_values(self, options):
def f(options):
linprog(1, method=self.method, options=options)
options.update(self.options)
assert_warns(OptimizeWarning, f, options=options)
def test_crossover(self):
c = np.array([1, 1]) * -1 # maximize
A_ub = np.array([[1, 1]])
b_ub = [1]
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
# there should be nonzero crossover iterations for IPM (only)
assert_equal(res.crossover_nit == 0, self.method != "highs-ipm")
################################
# Simplex Option-Specific Tests#
################################
class TestLinprogSimplexDefault(LinprogSimplexTests):
def setup_method(self):
self.options = {}
def test_bug_5400(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_7237_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate error is raised.
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate warning is issued.
self.options.update({'tol': 1e-12})
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexDefault, self).test_bug_8174()
class TestLinprogSimplexBland(LinprogSimplexTests):
def setup_method(self):
self.options = {'bland': True}
def test_bug_5400(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate error is raised.
self.options.update({'tol': 1e-12})
with pytest.raises(AssertionError):
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexBland, self).test_bug_8174()
class TestLinprogSimplexNoPresolve(LinprogSimplexTests):
def setup_method(self):
self.options = {'presolve': False}
is_32_bit = np.intp(0).itemsize < 8
is_linux = sys.platform.startswith('linux')
@pytest.mark.xfail(
condition=is_32_bit and is_linux,
reason='Fails with warning on 32-bit linux')
def test_bug_5400(self):
super(TestLinprogSimplexNoPresolve, self).test_bug_5400()
def test_bug_6139_low_tol(self):
# Linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Without ``presolve`` eliminating such rows the result is incorrect.
self.options.update({'tol': 1e-12})
with pytest.raises(AssertionError, match='linprog status 4'):
return super(TestLinprogSimplexNoPresolve, self).test_bug_6139()
def test_bug_7237_low_tol(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate warning is issued.
self.options.update({'tol': 1e-12})
with pytest.warns(OptimizeWarning):
super(TestLinprogSimplexNoPresolve, self).test_bug_8174()
def test_unbounded_no_nontrivial_constraints_1(self):
pytest.skip("Tests behavior specific to presolve")
def test_unbounded_no_nontrivial_constraints_2(self):
pytest.skip("Tests behavior specific to presolve")
#######################################
# Interior-Point Option-Specific Tests#
#######################################
class TestLinprogIPDense(LinprogIPTests):
options = {"sparse": False}
if has_cholmod:
class TestLinprogIPSparseCholmod(LinprogIPTests):
options = {"sparse": True, "cholesky": True}
if has_umfpack:
class TestLinprogIPSparseUmfpack(LinprogIPTests):
options = {"sparse": True, "cholesky": False}
def test_bug_10466(self):
pytest.skip("Autoscale doesn't fix everything, and that's OK.")
class TestLinprogIPSparse(LinprogIPTests):
options = {"sparse": True, "cholesky": False, "sym_pos": False}
@pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level "
"perturbations in linear system solution in "
"_linprog_ip._sym_solve.")
def test_bug_6139(self):
super(TestLinprogIPSparse, self).test_bug_6139()
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparse, self).test_bug_6690()
def test_magic_square_sparse_no_presolve(self):
# test linprog with a problem with a rank-deficient A_eq matrix
A_eq, b_eq, c, N = magic_square(3)
bounds = (0, 1)
with suppress_warnings() as sup:
if has_umfpack:
sup.filter(UmfpackWarning)
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
sup.filter(OptimizeWarning, "Solving system with option...")
o = {key: self.options[key] for key in self.options}
o["presolve"] = False
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_fun=1.730550597)
def test_sparse_solve_options(self):
# checking that problem is solved with all column permutation options
A_eq, b_eq, c, N = magic_square(3)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(OptimizeWarning, "Invalid permc_spec option")
o = {key: self.options[key] for key in self.options}
permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A',
'COLAMD', 'ekki-ekki-ekki')
# 'ekki-ekki-ekki' raises warning about invalid permc_spec option
# and uses default
for permc_spec in permc_specs:
o["permc_spec"] = permc_spec
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=o)
_assert_success(res, desired_fun=1.730550597)
class TestLinprogIPSparsePresolve(LinprogIPTests):
options = {"sparse": True, "_sparse_presolve": True}
@pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level "
"perturbations in linear system solution in "
"_linprog_ip._sym_solve.")
def test_bug_6139(self):
super(TestLinprogIPSparsePresolve, self).test_bug_6139()
def test_enzo_example_c_with_infeasibility(self):
pytest.skip('_sparse_presolve=True incompatible with presolve=False')
@pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877')
def test_bug_6690(self):
# Test defined in base class, but can't mark as xfail there
super(TestLinprogIPSparsePresolve, self).test_bug_6690()
class TestLinprogIPSpecific:
method = "interior-point"
# the following tests don't need to be performed separately for
# sparse presolve, sparse after presolve, and dense
def test_solver_select(self):
# check that default solver is selected as expected
if has_cholmod:
options = {'sparse': True, 'cholesky': True}
elif has_umfpack:
options = {'sparse': True, 'cholesky': False}
else:
options = {'sparse': True, 'cholesky': False, 'sym_pos': False}
A, b, c = lpgen_2d(20, 20)
res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options)
res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver
assert_allclose(res1.fun, res2.fun,
err_msg="linprog default solver unexpected result",
rtol=1e-15, atol=1e-15)
def test_unbounded_below_no_presolve_original(self):
# formerly caused segfault in TravisCI w/ "cholesky":True
c = [-1]
bounds = [(None, 1)]
res = linprog(c=c, bounds=bounds,
method=self.method,
options={"presolve": False, "cholesky": True})
_assert_success(res, desired_fun=-1)
def test_cholesky(self):
# use cholesky factorization and triangular solves
A, b, c = lpgen_2d(20, 20)
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"cholesky": True}) # only for dense
_assert_success(res, desired_fun=-64.049494229)
def test_alternate_initial_point(self):
# use "improved" initial point
A, b, c = lpgen_2d(20, 20)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...")
sup.filter(OptimizeWarning, "Solving system with option...")
sup.filter(LinAlgWarning, "Ill-conditioned matrix...")
res = linprog(c, A_ub=A, b_ub=b, method=self.method,
options={"ip": True, "disp": True})
# ip code is independent of sparse/dense
_assert_success(res, desired_fun=-64.049494229)
def test_bug_8664(self):
# interior-point has trouble with this when presolve is off
c = [4]
A_ub = [[2], [5]]
b_ub = [4, 4]
A_eq = [[0], [-8], [9]]
b_eq = [3, 2, 10]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sup.filter(OptimizeWarning, "Solving system with option...")
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options={"presolve": False})
assert_(not res.success, "Incorrectly reported success")
########################################
# Revised Simplex Option-Specific Tests#
########################################
class TestLinprogRSCommon(LinprogRSTests):
options = {}
def test_cyclic_bland(self):
pytest.skip("Intermittent failure acceptable.")
def test_nontrivial_problem_with_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_unbounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, None), (None, None), (0, None), (None, None)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bounded_variables(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bounds = [(None, 1), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_negative_unbounded_variable(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
b_eq = [4]
x_star = np.array([-219/385, 582/385, 0, 4/10])
f_star = 3951/385
bounds = [(None, None), (1, None), (0, None), (.4, .6)]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bad_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bad_guess = [1, 2, 3, .5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=bad_guess)
assert_equal(res.status, 6)
def test_redundant_constraints_with_guess(self):
A, b, c, N = magic_square(3)
p = np.random.rand(*c.shape)
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear...")
sup.filter(RuntimeWarning, "invalid value encountered")
sup.filter(LinAlgWarning)
res = linprog(c, A_eq=A, b_eq=b, method=self.method)
res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x)
res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x)
_assert_success(res2, desired_fun=1.730550597)
assert_equal(res2.nit, 0)
_assert_success(res3)
assert_(res3.nit < res.nit) # hot start reduces iterations
class TestLinprogRSBland(LinprogRSTests):
options = {"pivot": "bland"}
############################################
# HiGHS-Simplex-Dual Option-Specific Tests #
############################################
class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests):
method = "highs-ds"
options = {}
def test_lad_regression(self):
'''The scaled model should be optimal but unscaled model infeasible.'''
c, A_ub, b_ub, bnds = l1_regression_prob()
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds,
method=self.method, options=self.options)
assert_equal(res.status, 4)
assert_('An optimal solution to the scaled '
'model was found but' in res.message)
assert_(res.x is not None)
assert_(np.all(res.slack > -1e-6))
assert_(np.all(res.x <= [np.inf if u is None else u for l, u in bnds]))
assert_(np.all(res.x >= [-np.inf if l is None else l for l, u in bnds]))
###################################
# HiGHS-IPM Option-Specific Tests #
###################################
class TestLinprogHiGHSIPM(LinprogHiGHSTests):
method = "highs-ipm"
options = {}
###########################
# Autoscale-Specific Tests#
###########################
class AutoscaleTests:
options = {"autoscale": True}
test_bug_6139 = LinprogCommonTests.test_bug_6139
test_bug_6690 = LinprogCommonTests.test_bug_6690
test_bug_7237 = LinprogCommonTests.test_bug_7237
class TestAutoscaleIP(AutoscaleTests):
method = "interior-point"
def test_bug_6139(self):
self.options['tol'] = 1e-10
return AutoscaleTests.test_bug_6139(self)
class TestAutoscaleSimplex(AutoscaleTests):
method = "simplex"
class TestAutoscaleRS(AutoscaleTests):
method = "revised simplex"
def test_nontrivial_problem_with_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=x_star)
_assert_success(res, desired_fun=f_star, desired_x=x_star)
assert_equal(res.nit, 0)
def test_nontrivial_problem_with_bad_guess(self):
c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem()
bad_guess = [1, 2, 3, .5]
res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method, options=self.options, x0=bad_guess)
assert_equal(res.status, 6)
###########################
# Redundancy Removal Tests#
###########################
class RRTests:
method = "interior-point"
LCT = LinprogCommonTests
# these are a few of the existing tests that have redundancy
test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility
test_bug_10349 = LCT.test_bug_10349
test_bug_7044 = LCT.test_bug_7044
test_NFLC = LCT.test_network_flow_limited_capacity
test_enzo_example_b = LCT.test_enzo_example_b
class TestRRSVD(RRTests):
options = {"rr_method": "SVD"}
class TestRRPivot(RRTests):
options = {"rr_method": "pivot"}
class TestRRID(RRTests):
options = {"rr_method": "ID"}
| [
[
[
48,
51
],
[
67310,
67313
]
],
[
[
60,
71
],
[
67271,
67273
],
[
2909,
2911
],
[
2969,
2971
],
[
3053,
3055
],
[
4287,
4289
],
[
4296,
4298
],
[
4336,
4338
],
[
4374,
4376
],
[
4604,
4606
],
[
4632,
4634
],
[
4678,
4680
],
[
4710,
4712
],
[
4834,
4836
],
[
4866,
4868
],
[
4975,
4977
],
[
5007,
5009
],
[
5094,
5096
],
[
5154,
5156
],
[
5187,
5189
],
[
5226,
5228
],
[
5262,
5264
],
[
5301,
5303
],
[
5336,
5338
],
[
5363,
5365
],
[
5415,
5417
],
[
5394,
5396
],
[
5454,
5456
],
[
5434,
5436
],
[
5474,
5476
],
[
6209,
6211
],
[
6240,
6242
],
[
6315,
6317
],
[
6342,
6344
],
[
6419,
6421
],
[
6506,
6508
],
[
6811,
6813
],
[
7509,
7511
],
[
7967,
7969
],
[
8443,
8445
],
[
10556,
10558
],
[
10587,
10589
],
[
10625,
10627
],
[
10697,
10699
],
[
11112,
11114
],
[
11143,
11145
],
[
11181,
11183
],
[
11253,
11255
],
[
12033,
12035
],
[
13678,
13680
],
[
14025,
14027
],
[
14137,
14139
],
[
14170,
14172
],
[
14214,
14216
],
[
14257,
14259
],
[
14294,
14296
],
[
15367,
15369
],
[
18306,
18308
],
[
18314,
18316
],
[
18361,
18363
],
[
18370,
18372
],
[
19500,
19502
],
[
19511,
19513
],
[
19917,
19919
],
[
19928,
19930
],
[
20356,
20358
],
[
21709,
21711
],
[
21740,
21742
],
[
21773,
21775
],
[
22052,
22054
],
[
22078,
22080
],
[
22103,
22105
],
[
22146,
22148
],
[
22332,
22334
],
[
22400,
22402
],
[
22431,
22433
],
[
22464,
22466
],
[
22744,
22746
],
[
22770,
22772
],
[
22795,
22797
],
[
22834,
22836
],
[
23023,
23025
],
[
23086,
23088
],
[
23112,
23114
],
[
23137,
23139
],
[
23176,
23178
],
[
23184,
23186
],
[
23370,
23372
],
[
23526,
23528
],
[
23576,
23578
],
[
23635,
23637
],
[
23691,
23693
],
[
23699,
23701
],
[
23732,
23734
],
[
25364,
25366
],
[
25394,
25396
],
[
25444,
25446
],
[
25503,
25505
],
[
25861,
25863
],
[
25911,
25913
],
[
25962,
25964
],
[
26021,
26023
],
[
26055,
26057
],
[
26114,
26116
],
[
27093,
27095
],
[
27126,
27128
],
[
27185,
27187
],
[
27550,
27552
],
[
27583,
27585
],
[
27643,
27645
],
[
30511,
30513
],
[
31502,
31504
],
[
31548,
31550
],
[
31685,
31687
],
[
32057,
32059
],
[
32383,
32385
],
[
32428,
32430
],
[
32564,
32566
],
[
32937,
32939
],
[
33249,
33251
],
[
33711,
33713
],
[
33754,
33756
],
[
34844,
34846
],
[
34877,
34879
],
[
34913,
34915
],
[
40606,
40608
],
[
40635,
40637
],
[
40643,
40645
],
[
40688,
40690
],
[
40699,
40701
],
[
40716,
40718
],
[
40927,
40929
],
[
41086,
41088
],
[
41115,
41117
],
[
41123,
41125
],
[
41161,
41163
],
[
41172,
41174
],
[
41189,
41191
],
[
41523,
41525
],
[
41552,
41554
],
[
41560,
41562
],
[
41598,
41600
],
[
41609,
41611
],
[
41626,
41628
],
[
42232,
42234
],
[
42299,
42301
],
[
42489,
42491
],
[
42541,
42543
],
[
42616,
42618
],
[
42808,
42810
],
[
43972,
43974
],
[
45971,
45973
],
[
46127,
46129
],
[
47097,
47099
],
[
47132,
47134
],
[
47195,
47197
],
[
47255,
47257
],
[
47308,
47310
],
[
47565,
47567
],
[
47784,
47786
],
[
47851,
47853
],
[
47885,
47887
],
[
48173,
48175
],
[
48234,
48236
],
[
48422,
48424
],
[
49110,
49112
],
[
50279,
50281
],
[
50502,
50504
],
[
50556,
50558
],
[
51870,
51872
],
[
52009,
52011
],
[
52073,
52075
],
[
52512,
52514
],
[
52717,
52719
],
[
52754,
52756
],
[
53575,
53577
],
[
53623,
53625
],
[
53653,
53655
],
[
53721,
53723
],
[
53957,
53959
],
[
54675,
54677
],
[
54721,
54723
],
[
54933,
54935
],
[
57821,
57823
],
[
57863,
57865
],
[
57923,
57925
],
[
58582,
58584
],
[
58609,
58611
],
[
58640,
58642
],
[
59027,
59029
],
[
59054,
59056
],
[
59085,
59087
],
[
59657,
59659
],
[
59928,
59930
],
[
59980,
59982
],
[
60002,
60004
],
[
64129,
64131
],
[
65290,
65292
],
[
65339,
65341
],
[
76791,
76793
],
[
77588,
77590
],
[
79031,
79033
],
[
79074,
79076
],
[
79091,
79093
],
[
79154,
79156
],
[
79172,
79174
]
],
[
[
99,
106
],
[
751,
758
],
[
812,
819
],
[
1032,
1039
],
[
1226,
1233
],
[
1658,
1665
],
[
1719,
1726
],
[
43162,
43169
],
[
43191,
43198
],
[
43216,
43223
],
[
43248,
43255
],
[
75049,
75056
],
[
78175,
78182
],
[
78881,
78888
],
[
78988,
78995
],
[
79023,
79030
],
[
79066,
79073
],
[
79146,
79153
],
[
7137,
7144
],
[
7181,
7188
],
[
7228,
7235
],
[
7277,
7284
],
[
7321,
7328
]
],
[
[
108,
123
],
[
2297,
2312
],
[
2497,
2512
],
[
7729,
7744
],
[
7777,
7792
],
[
7821,
7836
],
[
7869,
7884
],
[
10895,
10910
],
[
11449,
11464
],
[
43320,
43335
],
[
43364,
43379
],
[
43430,
43445
],
[
50214,
50229
],
[
62300,
62315
],
[
73090,
73105
]
],
[
[
125,
137
],
[
892,
904
],
[
1093,
1105
],
[
1287,
1299
],
[
2229,
2241
],
[
15580,
15592
],
[
16046,
16058
],
[
16285,
16297
],
[
16506,
16518
],
[
16814,
16826
],
[
17261,
17273
],
[
17442,
17454
],
[
17644,
17656
],
[
19032,
19044
],
[
19227,
19239
],
[
19803,
19815
],
[
26418,
26430
],
[
27461,
27473
],
[
27919,
27931
],
[
28367,
28379
],
[
29212,
29224
],
[
30387,
30399
],
[
31215,
31227
],
[
32033,
32045
],
[
32077,
32089
],
[
32912,
32924
],
[
32957,
32969
],
[
58301,
58313
],
[
65603,
65615
],
[
75714,
75726
],
[
76155,
76167
],
[
76584,
76596
],
[
77127,
77139
],
[
77457,
77469
],
[
78111,
78123
],
[
78845,
78857
],
[
80382,
80394
],
[
80712,
80724
]
],
[
[
166,
183
],
[
49408,
49425
],
[
49462,
49479
],
[
50261,
50278
]
],
[
[
185,
197
],
[
12426,
12438
],
[
65197,
65209
]
],
[
[
199,
216
],
[
35005,
35022
],
[
35973,
35990
],
[
37080,
37097
],
[
37915,
37932
],
[
40125,
40142
],
[
46233,
46250
],
[
48534,
48551
],
[
49787,
49804
],
[
52832,
52849
],
[
54009,
54026
],
[
55584,
55601
],
[
56075,
56092
],
[
57345,
57362
],
[
60040,
60057
],
[
61657,
61674
],
[
70141,
70158
],
[
70834,
70851
],
[
74059,
74076
],
[
74765,
74782
],
[
77626,
77643
]
],
[
[
237,
260
],
[
8049,
8062
],
[
8156,
8169
],
[
8259,
8272
],
[
10227,
10240
],
[
12790,
12803
],
[
12863,
12876
],
[
12947,
12960
],
[
13058,
13071
],
[
13131,
13144
],
[
13198,
13211
],
[
13271,
13284
],
[
13338,
13351
],
[
13636,
13649
],
[
14487,
14500
],
[
64222,
64235
]
],
[
[
288,
295
],
[
7596,
7603
],
[
8075,
8082
],
[
8182,
8189
],
[
8285,
8292
],
[
8531,
8538
],
[
9443,
9450
],
[
9820,
9827
],
[
10252,
10259
],
[
10719,
10726
],
[
11275,
11282
],
[
11818,
11825
],
[
14642,
14649
],
[
14964,
14971
],
[
15419,
15426
],
[
15883,
15890
],
[
16086,
16093
],
[
16325,
16332
],
[
16546,
16553
],
[
17122,
17129
],
[
17301,
17308
],
[
17482,
17489
],
[
18880,
18887
],
[
19075,
19082
],
[
19327,
19334
],
[
19555,
19562
],
[
19972,
19979
],
[
20502,
20509
],
[
21029,
21036
],
[
21298,
21305
],
[
21501,
21508
],
[
21832,
21839
],
[
22168,
22175
],
[
22524,
22531
],
[
22859,
22866
],
[
23206,
23213
],
[
23794,
23801
],
[
24167,
24174
],
[
24495,
24502
],
[
25173,
25180
],
[
25649,
25656
],
[
26176,
26183
],
[
26585,
26592
],
[
26878,
26885
],
[
27217,
27224
],
[
27675,
27682
],
[
28123,
28130
],
[
28571,
28578
],
[
28968,
28975
],
[
29485,
29492
],
[
29862,
29869
],
[
30143,
30150
],
[
30634,
30641
],
[
30983,
30990
],
[
31827,
31834
],
[
32706,
32713
],
[
33419,
33426
],
[
34093,
34100
],
[
35118,
35125
],
[
35561,
35568
],
[
36205,
36212
],
[
37164,
37171
],
[
38350,
38357
],
[
38755,
38762
],
[
39472,
39479
],
[
40238,
40245
],
[
40766,
40773
],
[
41239,
41246
],
[
41769,
41776
],
[
42647,
42654
],
[
43020,
43027
],
[
46488,
46495
],
[
47375,
47382
],
[
48941,
48948
],
[
50006,
50013
],
[
52141,
52148
],
[
52984,
52991
],
[
54193,
54200
],
[
54973,
54980
],
[
55808,
55815
],
[
56299,
56306
],
[
56803,
56810
],
[
57503,
57510
],
[
58033,
58040
],
[
58699,
58706
],
[
59321,
59328
],
[
60153,
60160
],
[
62192,
62199
],
[
64257,
64264
],
[
64360,
64367
],
[
65391,
65398
],
[
70504,
70511
],
[
71403,
71410
],
[
72938,
72945
],
[
73017,
73024
],
[
73434,
73441
],
[
73756,
73763
],
[
74314,
74321
],
[
74923,
74930
],
[
75521,
75528
],
[
75962,
75969
],
[
76391,
76398
],
[
76934,
76941
],
[
77328,
77335
],
[
77845,
77852
],
[
77911,
77918
],
[
77987,
77994
],
[
78727,
78734
],
[
80189,
80196
],
[
80583,
80590
],
[
12232,
12239
],
[
12640,
12647
],
[
13911,
13918
],
[
18477,
18484
],
[
65104,
65111
]
],
[
[
297,
312
],
[
12439,
12454
],
[
35056,
35071
],
[
36024,
36039
],
[
38177,
38192
],
[
38244,
38259
],
[
40176,
40191
],
[
46284,
46299
],
[
48656,
48671
],
[
48760,
48775
],
[
49838,
49853
],
[
54131,
54146
],
[
57435,
57450
],
[
60091,
60106
],
[
61708,
61723
],
[
65210,
65225
],
[
66448,
66463
],
[
67036,
67051
],
[
68413,
68428
],
[
70335,
70350
],
[
70885,
70900
],
[
70952,
70967
],
[
74179,
74194
],
[
74855,
74870
],
[
77677,
77692
]
],
[
[
345,
362
],
[
70263,
70280
]
],
[
[
388,
401
],
[
36172,
36185
],
[
37131,
37144
],
[
38317,
38330
],
[
46455,
46468
],
[
48908,
48921
],
[
49973,
49986
],
[
52951,
52964
],
[
55774,
55787
],
[
56265,
56278
],
[
62130,
62143
],
[
74252,
74265
],
[
77812,
77825
]
],
[
[
409,
421
],
[
6544,
6549
],
[
14090,
14095
]
],
[
[
429,
435
],
[
64485,
64491
],
[
67349,
67355
],
[
69416,
69422
],
[
69723,
69729
],
[
71685,
71691
],
[
72133,
72139
],
[
18590,
18596
],
[
18641,
18647
],
[
18728,
18734
],
[
18779,
18785
],
[
63738,
63744
],
[
63824,
63830
],
[
63914,
63920
],
[
65918,
65924
],
[
66151,
66157
],
[
66435,
66441
],
[
66693,
66699
],
[
66975,
66981
],
[
67023,
67029
],
[
67936,
67942
],
[
68116,
68122
],
[
68400,
68406
],
[
68568,
68574
],
[
68686,
68692
],
[
69233,
69239
],
[
72057,
72063
],
[
75335,
75341
]
],
[
[
437,
448
],
[
69065,
69076
],
[
38029,
38040
],
[
48577,
48588
],
[
54052,
54063
],
[
55627,
55638
],
[
56118,
56129
],
[
61773,
61784
],
[
70184,
70195
],
[
72727,
72738
]
],
[
[
493,
507
],
[
38069,
38083
],
[
48617,
48631
],
[
54092,
54106
],
[
55667,
55681
],
[
56158,
56172
],
[
61813,
61827
],
[
70224,
70238
]
],
[
[
532,
543
],
[
69065,
69076
],
[
38029,
38040
],
[
48577,
48588
],
[
54052,
54063
],
[
55627,
55638
],
[
56118,
56129
],
[
61773,
61784
],
[
70184,
70195
],
[
72727,
72738
]
],
[
[
553,
564
],
[
68940,
68951
],
[
72644,
72655
]
],
[
[
588,
596
]
],
[
[
630,
649
]
],
[
[
674,
685
],
[
68940,
68951
],
[
72644,
72655
]
],
[
[
700,
731
],
[
15526,
15557
],
[
34323,
34354
]
],
[
[
970,
988
],
[
17201,
17219
],
[
17382,
17400
],
[
17584,
17602
],
[
18964,
18982
],
[
19159,
19177
],
[
24282,
24300
],
[
24610,
24628
],
[
25277,
25295
],
[
27332,
27350
],
[
27790,
27808
],
[
28238,
28256
],
[
29083,
29101
],
[
29977,
29995
],
[
30258,
30276
],
[
35237,
35255
],
[
41873,
41891
],
[
57622,
57640
]
],
[
[
1165,
1182
],
[
19388,
19405
],
[
19653,
19670
],
[
26291,
26308
],
[
30749,
30766
],
[
31269,
31286
],
[
31942,
31959
],
[
32821,
32838
],
[
41354,
41371
]
],
[
[
1358,
1399
],
[
53187,
53228
]
],
[
[
1796,
1811
],
[
7667,
7682
],
[
8574,
8589
],
[
9544,
9559
],
[
9950,
9965
],
[
10834,
10849
],
[
11390,
11405
],
[
11933,
11948
],
[
15983,
15998
],
[
16212,
16227
],
[
16438,
16453
],
[
16740,
16755
],
[
20070,
20085
],
[
20617,
20632
],
[
21144,
21159
],
[
21413,
21428
],
[
21616,
21631
],
[
21947,
21962
],
[
22283,
22298
],
[
22639,
22654
],
[
22974,
22989
],
[
23321,
23336
],
[
23909,
23924
],
[
25764,
25779
],
[
26700,
26715
],
[
26993,
27008
],
[
28686,
28701
],
[
29600,
29615
],
[
33534,
33549
],
[
34474,
34489
],
[
35676,
35691
],
[
36324,
36339
],
[
37283,
37298
],
[
38469,
38484
],
[
38870,
38885
],
[
39587,
39602
],
[
40357,
40372
],
[
40881,
40896
],
[
42762,
42777
],
[
46607,
46622
],
[
47491,
47506
],
[
49241,
49256
],
[
50160,
50175
],
[
52256,
52271
],
[
53260,
53275
],
[
54312,
54327
],
[
55078,
55093
],
[
56439,
56454
],
[
56505,
56520
],
[
56918,
56933
],
[
58190,
58205
],
[
58814,
58829
],
[
59425,
59440
],
[
60272,
60287
],
[
64421,
64436
],
[
70612,
70627
],
[
71523,
71538
],
[
73581,
73596
],
[
73879,
73894
],
[
74484,
74499
],
[
75647,
75662
],
[
76088,
76103
],
[
76517,
76532
],
[
77060,
77075
],
[
78056,
78071
],
[
78145,
78160
],
[
80315,
80330
]
],
[
[
2654,
2666
],
[
49758,
49770
],
[
70087,
70099
],
[
70805,
70817
],
[
77560,
77572
]
],
[
[
4436,
4444
],
[
9412,
9420
],
[
35942,
35950
],
[
72906,
72914
],
[
73725,
73733
],
[
74029,
74037
]
],
[
[
5065,
5080
],
[
42987,
43002
]
],
[
[
5547,
5565
],
[
35526,
35544
],
[
75486,
75504
],
[
75856,
75874
],
[
76295,
76313
],
[
76734,
76752
],
[
77259,
77277
],
[
80154,
80172
],
[
80514,
80532
]
],
[
[
5914,
5932
],
[
78692,
78710
]
],
[
[
6945,
6966
],
[
9280,
9301
]
],
[
[
7923,
7955
]
],
[
[
8384,
8402
]
],
[
[
8634,
8638
],
[
11286,
11290
],
[
21309,
21313
],
[
21512,
21516
],
[
21843,
21847
],
[
22179,
22183
],
[
22535,
22539
],
[
22870,
22874
],
[
23217,
23221
],
[
24506,
24510
],
[
26596,
26600
],
[
27228,
27232
],
[
28134,
28138
],
[
28582,
28586
],
[
35129,
35133
],
[
37175,
37179
],
[
38361,
38365
],
[
39483,
39487
],
[
40249,
40253
],
[
40777,
40781
],
[
41250,
41254
],
[
41780,
41784
],
[
50017,
50021
],
[
56814,
56818
],
[
60164,
60168
],
[
62203,
62207
],
[
70515,
70519
],
[
71414,
71418
]
],
[
[
8646,
8650
],
[
11292,
11296
],
[
21315,
21319
],
[
21518,
21522
],
[
21849,
21853
],
[
22185,
22189
],
[
22541,
22545
],
[
22876,
22880
],
[
23223,
23227
],
[
24512,
24516
],
[
26602,
26606
],
[
27234,
27238
],
[
28140,
28144
],
[
28588,
28592
],
[
35135,
35139
],
[
37181,
37185
],
[
38367,
38371
],
[
39489,
39493
],
[
40255,
40259
],
[
40783,
40787
],
[
41256,
41260
],
[
41786,
41790
],
[
50023,
50027
],
[
56820,
56824
],
[
60170,
60174
],
[
62209,
62213
],
[
70521,
70525
],
[
71420,
71424
]
],
[
[
8658,
8662
],
[
10742,
10746
],
[
20525,
20529
],
[
21052,
21056
],
[
21321,
21325
],
[
21524,
21528
],
[
23817,
23821
],
[
24190,
24194
],
[
25196,
25200
],
[
26901,
26905
],
[
27698,
27702
],
[
28991,
28995
],
[
29508,
29512
],
[
29885,
29889
],
[
30166,
30170
],
[
30657,
30661
],
[
31850,
31854
],
[
32729,
32733
],
[
33442,
33446
],
[
34116,
34120
],
[
36228,
36232
],
[
38778,
38782
],
[
46511,
46515
],
[
53007,
53011
],
[
55831,
55835
],
[
56322,
56326
],
[
58056,
58060
],
[
58722,
58726
],
[
59344,
59348
],
[
65429,
65433
]
],
[
[
8670,
8674
],
[
10748,
10752
],
[
20531,
20535
],
[
21058,
21062
],
[
21327,
21331
],
[
21530,
21534
],
[
23823,
23827
],
[
24196,
24200
],
[
25202,
25206
],
[
26907,
26911
],
[
27704,
27708
],
[
28997,
29001
],
[
29514,
29518
],
[
29891,
29895
],
[
30172,
30176
],
[
30663,
30667
],
[
31856,
31860
],
[
32735,
32739
],
[
33448,
33452
],
[
34122,
34126
],
[
36234,
36238
],
[
38784,
38788
],
[
46517,
46521
],
[
53013,
53017
],
[
55837,
55841
],
[
56328,
56332
],
[
58062,
58066
],
[
58728,
58732
],
[
59350,
59354
],
[
65440,
65444
]
],
[
[
8682,
8688
],
[
20537,
20543
],
[
21064,
21070
],
[
26620,
26626
],
[
26913,
26919
],
[
27252,
27258
],
[
27710,
27716
],
[
28158,
28164
],
[
28606,
28612
],
[
29897,
29903
],
[
30669,
30675
],
[
33454,
33460
],
[
34128,
34134
],
[
35153,
35159
],
[
35596,
35602
],
[
36240,
36246
],
[
37199,
37205
],
[
38790,
38796
],
[
39507,
39513
],
[
40273,
40279
],
[
40801,
40807
],
[
41274,
41280
],
[
41804,
41810
],
[
42682,
42688
],
[
50041,
50047
],
[
54228,
54234
],
[
57538,
57544
],
[
60188,
60194
],
[
62227,
62233
],
[
65475,
65481
],
[
71438,
71444
],
[
74958,
74964
],
[
75556,
75562
],
[
77363,
77369
],
[
80224,
80230
],
[
80618,
80624
]
],
[
[
8756,
8774
],
[
62445,
62463
],
[
62512,
62530
],
[
62586,
62604
],
[
63988,
64006
],
[
79584,
79602
],
[
79637,
79655
],
[
79690,
79708
],
[
80883,
80901
]
],
[
[
62425,
62444
],
[
65803,
65822
],
[
66565,
66584
],
[
67160,
67179
]
],
[
[
62497,
62511
],
[
68886,
68900
],
[
68990,
69004
],
[
69115,
69129
],
[
69325,
69339
],
[
71605,
71619
]
],
[
[
62571,
62585
],
[
75259,
75273
],
[
78262,
78276
]
],
[
[
63970,
63987
],
[
78485,
78502
],
[
79357,
79374
]
],
[
[
65777,
65802
],
[
66484,
66509
]
],
[
[
66541,
66564
],
[
67076,
67099
]
],
[
[
67131,
67159
],
[
67506,
67534
],
[
68018,
68046
],
[
68449,
68477
]
],
[
[
68867,
68885
]
],
[
[
68963,
68989
]
],
[
[
69088,
69114
]
],
[
[
69305,
69324
],
[
69674,
69693
],
[
69892,
69911
]
],
[
[
71577,
71604
],
[
71943,
71970
],
[
72302,
72329
]
],
[
[
72361,
72382
]
],
[
[
75239,
75258
]
],
[
[
78243,
78261
]
],
[
[
78457,
78484
]
],
[
[
79337,
79356
]
],
[
[
79513,
79527
],
[
79747,
79761
],
[
79939,
79953
],
[
80003,
80017
],
[
79875,
79889
]
],
[
[
79731,
79746
]
],
[
[
79918,
79938
]
],
[
[
79987,
80002
]
],
[
[
80834,
80841
],
[
81237,
81244
],
[
81302,
81309
],
[
81366,
81373
]
],
[
[
81227,
81236
]
],
[
[
81290,
81301
]
],
[
[
81357,
81365
]
]
] |
import unittest
from pyowm.agroapi10.polygon import Polygon, GeoPoint, GeoPolygon
class TestPolygon(unittest.TestCase):
geopoint= GeoPoint(34, -56.3)
geopolygon = GeoPolygon([
[[2.3, 57.32], [23.19, -20.2], [-120.4, 19.15], [2.3, 57.32]]
])
def test_polygon_fails_with_wrong_parameters(self):
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, 123.4, 'user')
self.assertRaises(AssertionError, Polygon, 'id', 'polygon', 'wrong', self.geopoint, 123.4, 'user')
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, 'wrong', 123.4, 'user')
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, None, 'user')
self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, -77, 'user')
def test_area_kilometers_property(self):
area_hs = 456.78
expected = area_hs * 0.01
instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, area_hs, 'user')
self.assertEqual(expected, instance.area_km)
instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, None, 'user')
self.assertIsNone(instance.area_km)
def test_from_dict(self):
_id = "5abb9fb82c8897000bde3e87"
name = "Polygon Sample"
coords = [121.1867, 37.6739]
geopolygon = GeoPolygon([[
[-121.1958, 37.6683],
[-121.1779, 37.6687],
[-121.1773, 37.6792],
[-121.1958, 37.6792],
[-121.1958, 37.6683]]])
center = GeoPoint(coords[0], coords[1])
area = 190.6343
user_id = "557066d0ff7a7e3897531d94"
the_dict = {
"id": _id,
"geo_json": {
"type": "Feature",
"properties": {
},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[-121.1958, 37.6683],
[-121.1779, 37.6687],
[-121.1773, 37.6792],
[-121.1958, 37.6792],
[-121.1958, 37.6683]
]
]
}
},
"name": name,
"center": coords,
"area": area,
"user_id": user_id
}
expected = Polygon(_id, name, geopolygon, center, area, user_id)
result = Polygon.from_dict(the_dict)
self.assertEqual(expected.id, result.id)
self.assertEqual(expected.name, result.name)
self.assertEqual(expected.area, result.area)
self.assertEqual(expected.user_id, result.user_id)
self.assertEqual(expected.center.lat, result.center.lat)
self.assertEqual(expected.center.lon, result.center.lon)
self.assertEqual(expected.geopolygon.geojson(), result.geopolygon.geojson())
# now testing with dirty data
self.assertRaises(AssertionError, Polygon.from_dict, None)
the_dict['center'] = ['no_lon', 'no_lat']
self.assertRaises(ValueError, Polygon.from_dict, the_dict)
the_dict['center'] = coords
del the_dict['id']
self.assertRaises(AssertionError, Polygon.from_dict, the_dict)
def test_repr(self):
instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, 1.2, 'user')
repr(instance)
instance = Polygon('id')
repr(instance)
| [
[
[
7,
15
],
[
102,
110
]
],
[
[
52,
59
],
[
364,
371
],
[
479,
486
],
[
586,
593
],
[
695,
702
],
[
809,
816
],
[
1004,
1011
],
[
1150,
1157
],
[
2491,
2498
],
[
2562,
2569
],
[
3100,
3107
],
[
3214,
3221
],
[
3349,
3356
],
[
3423,
3430
],
[
3535,
3542
]
],
[
[
61,
69
],
[
137,
145
],
[
1630,
1638
]
],
[
[
71,
81
],
[
174,
184
],
[
1427,
1437
]
],
[
[
90,
101
]
]
] |
def convert_request_to_dictionary(request, fields):
emp = {}
for field in fields:
if field in request.json:
emp[field] = request.json[field]
del emp["identity"]
return emp | [
[
[
4,
33
]
]
] |
import os
from pydub import playback
from playsound import playsound
from simpleaudio import play_buffer
import winsound
from manuscript.tools.counter import Counter
def play_sound(sound, block=True):
if sound is not None:
prefix = "tmp"
with Counter(prefix) as counter:
tmp_file = os.path.join(".", prefix + f"_{counter:010d}.mp3")
sound.export(tmp_file)
playsound(tmp_file, block=block)
#os.remove(tmp_file)
| [
[
[
7,
9
],
[
317,
319
]
],
[
[
29,
37
]
],
[
[
60,
69
],
[
415,
424
]
],
[
[
94,
105
]
],
[
[
113,
121
]
],
[
[
159,
166
],
[
266,
273
]
],
[
[
173,
183
]
]
] |
from telegram import ReplyKeyboardMarkup, KeyboardButton
def get_keyboard():
contact_button = KeyboardButton('Отправить контакты', request_contact=True)
location_button = KeyboardButton('Отправить локацию', request_location=True)
my_keyboard = ReplyKeyboardMarkup([['Анекдот', 'Начать'],
[contact_button, location_button]], resize_keyboard=True)
return my_keyboard
| [
[
[
21,
40
],
[
262,
281
]
],
[
[
42,
56
],
[
102,
116
],
[
184,
198
]
],
[
[
64,
76
]
]
] |
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
"""Queries the pytorch op registry and generates ODS and CC sources for the ops.
"""
from typing import List, Optional, TextIO
import argparse
import logging
import os
import sys
from .utils import TextEmitter
from .registry import Registry, JitOperator
# Mapping from torch types to their corresponding ODS type predicates.
# Use `get_ods_type` instead of using this directly.
TORCH_TYPE_TO_ODS_TYPE = {
"Tensor": "AnyTorchTensorType",
"Tensor?": "AnyTorchOptionalTensorType",
"Tensor?[]": "AnyTorchListOfOptionalTensorType",
"Tensor[]": "AnyTorchListOfTensorType",
"Scalar": "AnyTorchScalarType",
"Scalar?": "AnyTorchOptionalScalarType",
"int": "Torch_IntType",
"int[]": "AnyTorchListOfTorchIntType",
"int?": "AnyTorchOptionalIntType",
"int[]?": "AnyTorchOptionalListOfTorchIntType",
"bool": "Torch_BoolType",
"bool[]": "AnyTorchListOfTorchBoolType",
"bool?": "AnyTorchOptionalBoolType",
"float": "Torch_FloatType",
"float?": "AnyTorchOptionalFloatType",
"t[]": "AnyTorchListType",
"t": "AnyTorchType",
"t1": "AnyTorchType",
"t2": "AnyTorchType",
"Any": "AnyTorchType",
"Device": "Torch_DeviceType",
"Device?": "AnyTorchOptionalDeviceType",
"Generator": "Torch_GeneratorType",
"Generator?": "AnyTorchOptionalGeneratorType",
"str": "Torch_StringType",
"str?": "AnyTorchOptionalStringType",
"str[]": "AnyTorchListOfTorchStringType",
"Dict": "Torch_DictType",
"__torch__.torch.classes.quantized.LinearPackedParamsBase": "Torch_LinearParamsType",
}
def get_ods_type(type: str):
# TODO: Increase precision on dict type modeling.
if type.startswith("Dict("):
type = "Dict"
ods_type = TORCH_TYPE_TO_ODS_TYPE.get(type)
if ods_type is None:
raise Exception(
f"{type!r} not in TORCH_TYPE_TO_ODS_TYPE mapping. Please add it!")
return ods_type
def _get_main_module_name() -> str:
# pytype: disable=attribute-error
return sys.modules["__main__"].__loader__.name
# pytype: enable=attribute-error
ODS_BANNER = f"""//===-------------------------------------------------------*- tablegen -*-===//
//
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Also available under a BSD-style license. See LICENSE.
//
// Operation summaries and descriptions were systematically derived from public
// API docstrings and are licensed accordingly:
// https://github.com/pytorch/pytorch/blob/master/LICENSE
//===----------------------------------------------------------------------===//
//
// This file is automatically generated. Please do not edit.
// Generated via:
// ```
// python -m {_get_main_module_name()}
// ```
//
//===----------------------------------------------------------------------===//
"""
def raw_emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*, traits: List[str],
has_folder: bool, has_canonicalizer: bool):
"""Emit the ODS for a JitOperator to a textual file.
This is the lowest level of emission and is responsible for low-level
textual emission details. This function should not have any "smarts"
for deducing traits/etc.
You probably don't want to call this directly.
"""
p_td = lambda *args: emitter_td.print(*args)
op_name, cpp_class_name = operator.get_mlir_names()
# Generate unique result names for ops with nameless results
multiple_results = len(operator.returns) > 1
def generic_result_name(i):
return "result" + (str(i) if multiple_results else "")
p_td(
f"def Torch_{cpp_class_name} : Torch_Op<{emitter_td.quote(op_name)}, [")
with emitter_td.indent():
with emitter_td.indent():
p_td(",\n".join(traits))
p_td("]> {")
with emitter_td.indent():
summary = f"Generated op for `{operator.unique_key}`"
p_td(f"let summary = {emitter_td.quote(summary)};")
p_td(f"let arguments = (ins")
with emitter_td.indent():
if operator.is_vararg:
p_td("Variadic<AnyTorchType>:$operands")
else:
p_td(",\n".join([
f"""{get_ods_type(arg["type"])}:${arg["name"]}"""
for arg in operator.arguments
]))
p_td(");")
p_td(f"let results = (outs")
with emitter_td.indent():
if operator.is_varret:
p_td("Variadic<AnyTorchType>:$results")
else:
p_td(",\n".join([
f"""{get_ods_type(ret["type"])}:${ret["name"] or generic_result_name(e)}"""
for e, ret in enumerate(operator.returns)
]))
p_td(");")
if operator.is_vararg or operator.is_varret:
if operator.is_vararg:
assembly_operands = "`(` $operands `)`"
assembly_operand_types = "qualified(type($operands))"
else:
assembly_operands = " `,` ".join("$" + arg["name"]
for arg in operator.arguments)
assembly_operand_types = " `,` ".join(
f"""qualified(type(${arg["name"]}))""" for arg in operator.arguments)
if operator.is_varret:
assembly_result_types = "qualified(type($results))"
else:
assembly_result_types = " `,` ".join(
f"""qualified(type(${ret["name"] or generic_result_name(e)}))"""
for e, ret in enumerate(operator.returns))
if assembly_operand_types and assembly_result_types:
maybe_arrow = " `->` "
else:
maybe_arrow = ""
assembly_format = f"{assembly_operands} attr-dict `:` {assembly_operand_types}{maybe_arrow}{assembly_result_types}"
p_td(f"let assemblyFormat = {emitter_td.quote(assembly_format)};")
else:
p_td(f"let hasCustomAssemblyFormat = 1;")
p_td(f"""let extraClassDefinition = [{{
ParseResult {cpp_class_name}::parse(OpAsmParser &parser, OperationState &result) {{
return parseDefaultTorchOp(parser, result, {len(operator.arguments)}, {len(operator.returns)});
}}
void {cpp_class_name}::print(OpAsmPrinter &printer) {{
printDefaultTorchOp(printer, *this, {len(operator.arguments)}, {len(operator.returns)});
}}
}}];
""")
if has_folder:
p_td("let hasFolder = 1;")
if has_canonicalizer:
p_td("let hasCanonicalizer = 1;")
p_td("}")
p_td("\n")
def emit_op(operator: JitOperator,
emitter_td: TextEmitter,
*,
traits: Optional[List[str]] = None,
has_folder: bool = False,
has_canonicalizer: bool = False):
"""Main entry point for op emission.
Besides emitting the op, it deduces / adds traits based on the operator
information.
"""
if traits is None:
traits = []
# All Torch operators allow type refinement.
traits += ["AllowsTypeRefinement"]
if operator.has_value_semantics():
traits += ["HasValueSemantics"]
if operator.is_readonly():
traits += ["ReadOnly"]
raw_emit_op(operator,
emitter_td,
traits=traits,
has_folder=has_folder,
has_canonicalizer=has_canonicalizer)
def emit_ops(emitter_td: TextEmitter, registry: Registry):
def emit(key, **kwargs):
emit_op(registry[key], emitter_td, **kwargs)
def emit_with_mutating_variants(key, **kwargs):
operator = registry[key]
emit_op(operator, emitter_td, **kwargs)
ns, unqual, overload = operator.triple
emit_op(registry.get_by_triple((ns, unqual + "_", overload)),
emitter_td,
traits=["IsTrailingUnderscoreInplaceVariant"])
# ==========================================================================
# `aten::` namespace.
# ==========================================================================
# Elementwise tensor compute ops
for key in [
"aten::tanh : (Tensor) -> (Tensor)",
"aten::hardtanh : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::relu : (Tensor) -> (Tensor)",
"aten::leaky_relu : (Tensor, Scalar) -> (Tensor)",
"aten::log : (Tensor) -> (Tensor)",
"aten::sigmoid : (Tensor) -> (Tensor)",
"aten::hardsigmoid : (Tensor) -> (Tensor)",
"aten::hardswish : (Tensor) -> (Tensor)",
"aten::erf : (Tensor) -> (Tensor)",
"aten::silu : (Tensor) -> (Tensor)",
"aten::sin : (Tensor) -> (Tensor)",
"aten::exp : (Tensor) -> (Tensor)",
"aten::cos : (Tensor) -> (Tensor)",
"aten::neg : (Tensor) -> (Tensor)",
"aten::floor : (Tensor) -> (Tensor)",
"aten::ceil : (Tensor) -> (Tensor)",
"aten::bitwise_not : (Tensor) -> (Tensor)",
"aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::div.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lerp.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)",
"aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::gt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::lt.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::ne.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::div.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::le.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)",
"aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)",
"aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)",
"aten::log2 : (Tensor) -> (Tensor)",
"aten::rsqrt : (Tensor) -> (Tensor)",
"aten::abs : (Tensor) -> (Tensor)",
"aten::reciprocal : (Tensor) -> (Tensor)",
"aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)",
"aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)",
"aten::square : (Tensor) -> (Tensor)",
]:
emit_with_mutating_variants(key)
# Elementwise tensor compute ops that don't have the standard mutating
# variants.
emit("aten::addcmul : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::addcdiv : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::maximum : (Tensor, Tensor) -> (Tensor)")
emit("aten::minimum : (Tensor, Tensor) -> (Tensor)")
emit("aten::rsub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::gelu : (Tensor, str) -> (Tensor)")
emit("aten::pow.Tensor_Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::threshold_backward : (Tensor, Tensor, Scalar) -> (Tensor)")
# Ops without value semantics but the corresponding without trailing
# underscore variant doesn't exist.
emit("aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::uniform_ : (Tensor, float, float, Generator?) -> (Tensor)")
emit("aten::rand_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::bernoulli : (Tensor, Generator?) -> (Tensor)")
emit("aten::bernoulli_.float : (Tensor, float, Generator?) -> (Tensor)")
emit("aten::bernoulli_.Tensor : (Tensor, Tensor, Generator?) -> (Tensor)")
emit_with_mutating_variants("aten::triu : (Tensor, int) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put : (Tensor, Tensor?[], Tensor, bool) -> (Tensor)")
emit_with_mutating_variants(
"aten::index_put.hacked_twin : (Tensor, Tensor[], Tensor, bool) -> (Tensor)")
# Non-elementwise tensor compute ops
emit("aten::linear : (Tensor, Tensor, Tensor?) -> (Tensor)")
emit("aten::mm : (Tensor, Tensor) -> (Tensor)")
emit("aten::addmm : (Tensor, Tensor, Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::matmul : (Tensor, Tensor) -> (Tensor)")
emit(
"aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)"
)
emit("aten::convolution : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::convolution_overrideable : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)")
emit("aten::flip : (Tensor, int[]) -> (Tensor)")
emit(
"aten::native_batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float, bool) -> (Tensor)"
)
emit(
"aten::layer_norm : (Tensor, int[], Tensor?, Tensor?, float, bool) -> (Tensor)"
)
emit(
"aten::native_layer_norm : (Tensor, int[], Tensor?, Tensor?, float) -> (Tensor, Tensor, Tensor)"
)
emit(
"aten::max_pool2d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)"
)
emit(
"aten::max_pool2d_with_indices : (Tensor, int[], int[], int[], int[], bool) -> (Tensor, Tensor)"
)
emit(
"aten::max_pool2d_with_indices_backward : (Tensor, Tensor, int[], int[], int[], int[], bool, Tensor) -> (Tensor)"
)
emit(
"aten::avg_pool2d : (Tensor, int[], int[], int[], bool, bool, int?) -> (Tensor)"
)
emit(
"aten::softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::log_softmax.int : (Tensor, int, int?) -> (Tensor)"
)
emit(
"aten::_log_softmax : (Tensor, int, bool) -> (Tensor)"
)
emit("aten::adaptive_avg_pool2d : (Tensor, int[]) -> (Tensor)")
emit("aten::topk : (Tensor, int, int, bool, bool) -> (Tensor, Tensor)")
emit("aten::transpose.int : (Tensor, int, int) -> (Tensor)")
emit("aten::permute : (Tensor, int[]) -> (Tensor)")
emit("aten::bmm : (Tensor, Tensor) -> (Tensor)")
emit("aten::cumsum : (Tensor, int, int?) -> (Tensor)")
emit("aten::floor_divide.Scalar : (Tensor, Scalar) -> (Tensor)")
emit("aten::logsumexp : (Tensor, int[], bool) -> (Tensor)")
emit("aten::mean.dim : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)")
emit("aten::sqrt : (Tensor) -> (Tensor)")
emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)")
emit("aten::mean : (Tensor, int?) -> (Tensor)")
emit("aten::std : (Tensor, bool) -> (Tensor)")
emit("aten::var : (Tensor, bool) -> (Tensor)")
emit("aten::nll_loss_forward : (Tensor, Tensor, Tensor?, int, int) -> (Tensor, Tensor)")
emit("aten::nll_loss_backward : (Tensor, Tensor, Tensor, Tensor?, int, int, Tensor) -> (Tensor)")
emit("aten::bincount : (Tensor, Tensor?, int) -> (Tensor)")
# Misc tensor ops.
emit("aten::constant_pad_nd : (Tensor, int[], Scalar) -> (Tensor)")
emit("aten::pad : (Tensor, int[], str, float?) -> (Tensor)")
emit("aten::squeeze.dim : (Tensor, int) -> (Tensor)", has_folder=True)
emit("aten::unsqueeze : (Tensor, int) -> (Tensor)")
emit("aten::squeeze : (Tensor) -> (Tensor)", has_folder=True)
emit("aten::flatten.using_ints : (Tensor, int, int) -> (Tensor)")
emit("aten::dim : (Tensor) -> (int)", has_folder=True)
emit("aten::size : (Tensor) -> (int[])", has_canonicalizer=True)
emit("aten::Bool.Tensor : (Tensor) -> (bool)")
emit("aten::ones : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::new_ones : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros : (int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zero_ : (Tensor) -> (Tensor)")
emit("aten::new_zeros : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::tensor : (t[], int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.bool : (bool, int?, Device?, bool) -> (Tensor)")
emit("aten::tensor.int : (int, int?, Device?, bool) -> (Tensor)")
emit("aten::_shape_as_tensor : (Tensor) -> (Tensor)")
emit("aten::all : (Tensor) -> (Tensor)")
emit("aten::any : (Tensor) -> (Tensor)")
emit("aten::any.dim : (Tensor, int, bool) -> (Tensor)")
emit("aten::arange : (Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start : (Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::arange.start_step : (Scalar, Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::argmax : (Tensor, int?, bool) -> (Tensor)")
emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)")
emit("aten::clone : (Tensor, int?) -> (Tensor)")
emit("aten::contiguous : (Tensor, int) -> (Tensor)")
emit("aten::copy_ : (Tensor, Tensor, bool) -> (Tensor)")
emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)")
emit("aten::detach : (Tensor) -> (Tensor)")
emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)")
emit("aten::empty_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::new_empty : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::zeros_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::ones_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::empty.memory_format : (int[], int?, int?, Device?, bool?, int?) -> (Tensor)")
emit("aten::expand : (Tensor, int[], bool) -> (Tensor)")
emit("aten::expand_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::broadcast_to : (Tensor, int[]) -> (Tensor)")
emit("aten::index.Tensor : (Tensor, Tensor?[]) -> (Tensor)")
emit("aten::index_select : (Tensor, int, Tensor) -> (Tensor)")
emit("aten::_index_put_impl_ : (Tensor, Tensor?[], Tensor, bool, bool) -> (Tensor)")
emit("aten::item : (Tensor) -> (Scalar)")
emit("aten::masked_select : (Tensor, Tensor) -> (Tensor)")
emit("aten::numel : (Tensor) -> (int)")
emit("aten::repeat : (Tensor, int[]) -> (Tensor)")
emit("aten::reshape : (Tensor, int[]) -> (Tensor)")
emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)")
emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)")
emit("aten::select.int : (Tensor, int, int) -> (Tensor)")
emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True)
emit("aten::stack : (Tensor[], int) -> (Tensor)")
emit("aten::sum : (Tensor, int?) -> (Tensor)")
emit("aten::sum.dim_IntList : (Tensor, int[], bool, int?) -> (Tensor)")
emit("aten::max : (Tensor) -> (Tensor)")
emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)")
emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True)
emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)")
emit("aten::to.prim_Device : (Tensor, Device?, int?, bool, bool) -> (Tensor)")
emit("aten::type_as : (Tensor, Tensor) -> (Tensor)")
emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True)
emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)")
emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)")
emit("aten::where.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)")
emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)")
emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)")
emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)")
emit("aten::len.Tensor : (Tensor) -> (int)")
emit("aten::cpu : (Tensor) -> (Tensor)")
emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)")
emit("aten::IntImplicit : (Tensor) -> (int)")
emit("aten::tensor.float : (float, int?, Device?, bool) -> (Tensor)")
emit("aten::Int.Tensor : (Tensor) -> (int)", has_folder=True)
emit("aten::Float.Tensor : (Tensor) -> (float)", has_folder=True)
emit_with_mutating_variants("aten::dropout : (Tensor, float, bool) -> (Tensor)")
emit("aten::t : (Tensor) -> (Tensor)")
emit("aten::full : (int[], Scalar, int?, int?, Device?, bool?) -> (Tensor)")
emit("aten::full_like : (Tensor, Scalar, int?, int?, Device?, bool?, int?) -> (Tensor)")
# Dict ops.
emit("aten::__contains__.str : (Dict(str, t), str) -> (bool)", has_folder=True)
emit("aten::__getitem__.Dict_str : (Dict(str, t), str) -> (t)", has_folder=True)
emit("aten::_set_item.str : (Dict(str, t), str, t) -> ()")
emit("aten::keys.str : (Dict(str, t)) -> (str[])")
emit("aten::get.default_str : (Dict(str, t), str, t) -> (t)")
emit("aten::Delete.Dict_str : (Dict(str, t), str) -> ()")
# List ops.
emit("aten::cat : (Tensor[], int) -> (Tensor)")
emit("aten::append.t : (t[], t) -> (t[])")
emit("aten::add.t : (t[], t[]) -> (t[])")
emit("aten::eq.int_list : (int[], int[]) -> (bool)", has_folder=True)
emit("aten::list.t : (t[]) -> (t[])")
emit("aten::slice.t : (t[], int?, int?, int) -> (t[])")
emit("aten::insert.t : (t[], int, t) -> ()")
emit("aten::ne.int_list : (int[], int[]) -> (bool)")
# Str ops.
emit("aten::add.str : (str, str) -> (str)")
emit("aten::eq.str : (str, str) -> (bool)", has_folder=True)
emit("aten::str : (t) -> (str)")
emit("aten::format : (...) -> (str)")
emit("aten::join : (str, str[]) -> (str)")
# Type conversion ops.
emit("aten::Float.Scalar : (Scalar) -> (float)", has_folder=True)
emit("aten::Float.str : (str) -> (float)")
emit("aten::Int.float : (float) -> (int)")
# Primitive ops
emit("aten::__range_length : (int, int, int) -> (int)", has_folder=True)
emit("aten::__derive_index : (int, int, int) -> (int)", has_folder=True)
emit("aten::gt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ge.int : (int, int) -> (bool)", has_folder=True)
emit("aten::lt.int : (int, int) -> (bool)", has_folder=True)
emit("aten::le.int : (int, int) -> (bool)", has_folder=True)
emit("aten::ne.int : (int, int) -> (bool)", has_folder=True)
emit("aten::eq.int : (int, int) -> (bool)", has_folder=True)
emit("aten::floordiv.int : (int, int) -> (int)", has_folder=True)
emit("aten::remainder.int : (int, int) -> (int)", has_folder=True)
emit("aten::add.int : (int, int) -> (int)", has_folder=True)
emit("aten::sub.int : (int, int) -> (int)", has_folder=True)
emit("aten::mul.int : (int, int) -> (int)", has_folder=True)
emit("aten::neg.int : (int) -> (int)", has_folder=True)
emit("aten::log.int : (int) -> (float)")
emit("aten::add.float_int : (float, int) -> (float)")
emit("aten::sub.float : (float, float) -> (float)")
emit("aten::mul.float : (float, float) -> (float)")
emit("aten::div.float : (float, float) -> (float)", has_folder=True)
emit("aten::neg.float : (float) -> (float)")
emit("aten::eq.float : (float, float) -> (bool)", has_folder=True)
emit("aten::gt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::ge.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float : (float, float) -> (bool)", has_folder=True)
emit("aten::lt.float_int : (float, int) -> (bool)")
emit("aten::ge.float_int : (float, int) -> (bool)")
emit("aten::ne.float_int : (float, int) -> (bool)")
emit("aten::gt.float_int : (float, int) -> (bool)")
emit("aten::__and__.bool : (bool, bool) -> (bool)")
emit("aten::ne.bool : (bool, bool) -> (bool)", has_folder=True)
emit("aten::__is__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__isnot__ : (t1, t2) -> (bool)", has_folder=True)
emit("aten::__not__ : (bool) -> (bool)", has_folder=True)
emit("aten::len.t : (t[]) -> (int)",
has_folder=True,
has_canonicalizer=True)
emit("aten::__getitem__.t : (t[], int) -> (t)", has_canonicalizer=True)
emit("aten::_set_item.t : (t[], int, t) -> (t[])")
emit("aten::div : (Scalar, Scalar) -> (float)")
emit("aten::add : (Scalar, Scalar) -> (Scalar)")
emit("aten::eq.device : (Device, Device) -> (bool)")
emit("aten::ceil.float : (float) -> (int)", has_folder=True)
# backprop ops
emit("aten::_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
emit("aten::tanh_backward : (Tensor, Tensor) -> (Tensor)")
emit("aten::gelu_backward : (Tensor, Tensor, str) -> (Tensor)")
emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)")
# ==========================================================================
# `prim::` namespace.
# ==========================================================================
emit("prim::layout : (Tensor) -> (int)")
emit("prim::TupleIndex : (Any, int) -> (Any)", has_canonicalizer=True)
emit("prim::device : (Tensor) -> (Device)")
emit("prim::dtype : (Tensor) -> (int)", has_folder=True)
emit("prim::TupleUnpack : (Any) -> (...)", has_canonicalizer=True)
emit("prim::NumToTensor.Scalar : (Scalar) -> (Tensor)")
emit("prim::min.self_int : (int[]) -> (int)", has_folder=True)
emit("prim::min.int : (int, int) -> (int)")
emit("prim::max.self_int : (int[]) -> (int)")
emit("prim::max.int : (int, int) -> (int)", has_folder=True)
emit("prim::RaiseException : (str, str?) -> ()")
emit("prim::Uninitialized : () -> (Any)",
has_canonicalizer=True, traits=["NoSideEffect"])
emit("prim::unchecked_cast : (t) -> (t)", has_folder=True,
traits=["DeclareOpInterfaceMethods<CastOpInterface>"])
emit("prim::Print : (...) -> ()")
emit("prim::tolist : (...) -> (...)")
emit("prim::abs.Scalar : (Scalar) -> (Scalar)")
# ==========================================================================
# `quantized::` namespace.
# ==========================================================================
emit(
"quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)",
traits=["HasValueSemantics"])
def dump_registered_ops(outfile: TextIO, registry: Registry):
for _, v in sorted(registry.by_unique_key.items()):
outfile.write(repr(v))
def main(args: argparse.Namespace):
registry = Registry.load()
if args.debug_registry_dump:
with open(args.debug_registry_dump, "w") as debug_registry_dump:
dump_registered_ops(debug_registry_dump, registry)
td_path = os.path.join(args.torch_ir_include_dir, "GeneratedTorchOps.td")
with open(td_path, "w") as f_td:
emitter_td = TextEmitter(f_td)
emitter_td.print(ODS_BANNER)
emit_ops(emitter_td, registry)
def _create_argparse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(prog="generate_ods")
parser.add_argument(
"--torch_ir_include_dir",
required=True,
help="Directory in include/ containing the Torch dialect")
parser.add_argument(
"--debug_registry_dump",
help="File to dump the the PyTorch JIT operator registry into")
return parser
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
parser = _create_argparse()
args = parser.parse_args()
main(args)
| [
[
[
360,
364
],
[
3286,
3290
],
[
7090,
7094
]
],
[
[
366,
374
],
[
7081,
7089
]
],
[
[
376,
382
],
[
27102,
27108
]
],
[
[
391,
399
],
[
27235,
27243
],
[
27714,
27722
],
[
27752,
27760
]
],
[
[
407,
414
],
[
28127,
28134
],
[
28153,
28160
]
],
[
[
422,
424
],
[
27470,
27472
]
],
[
[
432,
435
],
[
2244,
2247
]
],
[
[
456,
467
],
[
3246,
3257
],
[
7033,
7044
],
[
7814,
7825
],
[
27592,
27603
]
],
[
[
490,
498
],
[
7837,
7845
],
[
27120,
27128
],
[
27271,
27279
]
],
[
[
500,
511
],
[
3205,
3216
],
[
6996,
7007
]
],
[
[
637,
659
],
[
1975,
1997
]
],
[
[
1828,
1840
],
[
4574,
4586
],
[
4947,
4959
]
],
[
[
2163,
2184
],
[
3055,
3076
]
],
[
[
2323,
2333
],
[
27635,
27645
]
],
[
[
3183,
3194
],
[
7614,
7625
]
],
[
[
6978,
6985
],
[
7885,
7892
],
[
8024,
8031
],
[
8119,
8126
]
],
[
[
7793,
7801
],
[
27655,
27663
]
],
[
[
27073,
27092
],
[
27405,
27424
]
],
[
[
27224,
27228
],
[
28235,
28239
]
],
[
[
27692,
27708
],
[
28181,
28197
]
],
[
[
28172,
28178
],
[
28211,
28217
]
],
[
[
28204,
28208
],
[
28240,
28244
]
]
] |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'micropython-ulab'
copyright = '2019, Zoltán Vörös'
author = 'Zoltán Vörös'
# The full version, including alpha/beta/rc tags
release = '0.26'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
author=u'Zoltán Vörös'
copyright=author
language='en'
latex_documents = [
(master_doc, 'ulab-manual.tex', 'Micropython ulab documentation',
'Zoltán Vörös', 'manual'),
]
| [
[
[
684,
691
]
],
[
[
713,
722
]
],
[
[
746,
752
]
],
[
[
820,
827
]
],
[
[
1067,
1077
]
],
[
[
1158,
1172
]
],
[
[
1383,
1399
]
],
[
[
1593,
1603
]
],
[
[
1856,
1872
]
],
[
[
1888,
1898
],
[
1986,
1996
]
],
[
[
1910,
1916
],
[
1943,
1949
]
],
[
[
1933,
1942
]
],
[
[
1950,
1958
]
],
[
[
1965,
1980
]
]
] |
import spacy
from spacy.lang.en import English
from spacy.util import minibatch, compounding
from spacy.util import decaying
class ExperimentParam:
def __init__(self, TRAIN_DATA: list, max_batch_sizes: dict, model_type='ner',
dropout_start: float = 0.6, dropout_end: float = 0.2, interval: float = 1e-4):
self.TRAIN_DATA = TRAIN_DATA
self.max_batch_sizes = max_batch_sizes
self.model_type = model_type
self.dropout_start = dropout_start
self.dropout_end = dropout_end
self.interval = interval
def get_batches(self):
"""
max_batch_sizes =
Initialize with batch size 1, and compound to a maximum determined by your data size and problem type.
{"tagger": 32, "parser": 16, "ner": 16, "textcat": 64}
"""
max_batch_size = self.max_batch_sizes[self.model_type]
if len(self.TRAIN_DATA) < 1000:
max_batch_size /= 2
if len(self.TRAIN_DATA) < 500:
max_batch_size /= 2
batch_size = compounding(1, max_batch_size, 1.001)
batches = minibatch(self.TRAIN_DATA, size=batch_size)
return batches
@property
def determine_dropout(self):
"""
For small datasets, it’s useful to set a high dropout rate at first, and decay it down towards a more reasonable value. This helps avoid the network immediately overfitting, while still encouraging it to learn some of the more interesting things in your data.
"""
dropout = decaying(self.dropout_start, self.dropout_end, self.interval)
return dropout
| [
[
[
7,
12
]
],
[
[
39,
46
]
],
[
[
70,
79
],
[
1101,
1110
]
],
[
[
81,
92
],
[
1045,
1056
]
],
[
[
116,
124
],
[
1528,
1536
]
],
[
[
133,
148
]
]
] |
from example_system import serializer
from example_system.bike import Bike
from example_system.human import Human
def run_example() -> None:
krzysztof = Human(name="Krzysztof", age=37)
giant_bike = Bike(brand="Giant", model="Contend AR")
krzysztof_json = serializer.serialize(krzysztof)
print(krzysztof_json)
bike_json = serializer.serialize(giant_bike)
print(bike_json)
krzysztof_deserialized = serializer.deserialize(krzysztof_json)
print(krzysztof)
print(krzysztof_deserialized)
bike_deserialized = serializer.deserialize(bike_json)
print(giant_bike)
print(bike_deserialized)
if __name__ == "__main__":
run_example()
| [
[
[
27,
37
],
[
270,
280
],
[
344,
354
],
[
428,
438
],
[
546,
556
]
],
[
[
70,
74
],
[
208,
212
]
],
[
[
108,
113
],
[
159,
164
]
],
[
[
120,
131
],
[
664,
675
]
]
] |
'''
Created by auto_sdk on 2020.11.25
'''
from dingtalk.api.base import RestApi
class OapiSmartdeviceBatcheventPostRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.device_event_vos = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.smartdevice.batchevent.post'
| [
[
[
72,
79
],
[
123,
130
],
[
165,
172
]
],
[
[
86,
122
]
]
] |
import pandas as pd
import io
from joblib import load
import logging
logging.getLogger().setLevel(logging.INFO)
def generate_data():
new_data = pd.DataFrame({
'Pclass':[3,2,1],
'Sex': ['male', 'female', 'male'],
'Age':[4, 22, 28]
})
return new_data
def load_model():
try:
return load('../output/titanic_model_rf.pkl')
except:
try:
return load('../../output/titanic_model_rf.pkl')
except:
logging.error('Model not loaded')
def predict_new(X, probs=True):
model = load_model()
p = model.get_preprocessing()
X = p.clean_data(X)
X = p.categ_encoding(X)
columns = model.get_columns()
for col in columns:
if col not in X.columns:
X[col] = 0
if probs:
return model.predict_proba(X)[:,1]
else:
return model.predict(X)
if __name__ == "__main__":
df = generate_data()
preds = predict_new(df, probs=True)
logging.info("Predictions:")
print(preds)
| [
[
[
7,
19
],
[
150,
152
]
],
[
[
27,
29
]
],
[
[
49,
53
],
[
331,
335
],
[
415,
419
]
],
[
[
61,
68
],
[
70,
77
],
[
99,
106
],
[
986,
993
],
[
485,
492
]
],
[
[
118,
131
],
[
926,
939
]
],
[
[
293,
303
],
[
565,
575
]
],
[
[
525,
536
],
[
954,
965
]
],
[
[
921,
923
],
[
966,
968
]
],
[
[
946,
951
],
[
1025,
1030
]
]
] |
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Generic, Type, TypeVar, Union
from .devices import I2CDevice
from .parsers import RegisterParser
from .typing import RegisterState
BlockType = TypeVar("BlockType")
class RegisterBlock(Generic[BlockType], ABC):
"""
Abstract base class for collections of registers that represent distinct
features of an I2C device. A RegisterBlock translates between high-level
data structures and the low-level representation of that data as expressed
by RegisterParsers. For example, for the DS series RTCs, there are sub-
classes of RegisterBlock for the clock, the alarms, and their configuration
states. The Clock subclass encapsulates RegisterParsers for the BCD-ish
encoding of the Hour, Minute, Second, etc. stored in the device registers.
RegisterBlock is a Generic type. When subclassing, add the appropriate type
for the value represented by the subclass to its signature:
class TimekeepingRegisterBlock(RegisterBlock[datetime]): ...
A RegisterBlock subclass should define one or more attributes that are
RegisterParsers. Subclasses must also define two methods:
1) `_value` to read the data from its attributes and produce a value of
the designated type
2) `_prepare_update` to set its attributes to a given value
For example, suppose some device stored a positive decimal number like
12.34 with the integer part in register 0x00 and the fractional part in
register 0x01, each represented as 2 digit standard BCD. You want to read
or write this value as a 2-tuple of ints. A RegisterBlock for accessing
this number could be:
class DecimalRegisterBlock(RegisterBlock[Tuple[int, int]]):
integer_part = BCDRegisterParser(0x00)
fractional_part = BCDRegisterParser(0x01)
def _value(self) -> Tuple[int, int]:
return self.integer_part, self.fractional_part
def _prepare_update(self, value: Tuple[int, int]) -> None:
self.integer_part, self.fractional_part = value
"""
@property
def register_state(self) -> "RegisterState":
"""
Accesses register state from the most recent read of the parent device.
"""
return self._register_state
@register_state.setter
def register_state(self, state: "RegisterState") -> None:
"""
Setting register_state also keeps a copy to use as pending_state.
"""
self._register_state = state
self.pending_state = self._register_state.copy()
def __init__(self) -> None:
"""
Initialize a new RegisterBlock. RegisterBlock is a data descriptor, so
it must be used as an attribute on a subclass of I2CDevice in order to
have access to the device register state.
"""
# The very first access to the descriptor will populate actual state.
self.register_state: RegisterState = []
def __get__(
self, instance: "I2CDevice", owner: Type["I2CDevice"]
) -> BlockType:
"""
RegisterBlock is a data descriptor with access to the state of the
I2CDevice instance that it belongs to, so we can use that register
state for all parsers associated with this RegisterBlock (see
RegisterParser.__get__).
It is important for all RegisterParser instances to have a shared
register state (i.e. the state stored in this class) in order to avoid
mistakes if the state changes during a read. For example, if an RTC's
Second register is read at 0 minutes 59 seconds, and then the clock
ticks before we read the Minute register, the time would come out as
1 minute 59 seconds. Maxim DS RTCs (and probably others) use of 2 sets
of registers to prevent this issue from affecting I2C block reads, so
we just need to make sure we only make one call to `read_registers()`
for all the RegisterParsers within a RegisterBlock.
"""
if not instance:
raise AttributeError(
"RegisterBlock must be accessed from an I2CDevice instance."
)
self.register_state = instance.read_registers()
return self._value()
def __set__(self, instance: "I2CDevice", value: BlockType) -> None:
"""
Setting the value of the RegisterBlock updates its state via the
RegisterParser descriptors that belong to the block.
"""
# Make sure we have the latest state loaded before modifying it
self.register_state = instance.read_registers()
self._prepare_update(value)
# A minor optimization to only write a contiguous block from the first
# changed register to the last changed register, leaving the rest
# unmodified. This helps improve the speed of small updates.
addresses_changed = [
i
for i, b in enumerate(self.pending_state)
if b != self._register_state[i]
]
first_changed = min(addresses_changed)
last_changed = max(addresses_changed)
to_write = self.pending_state[first_changed : last_changed + 1]
instance.write_registers(to_write, first_changed)
@abstractmethod
def _prepare_update(self, value: BlockType) -> None:
"""
Subclasses should define behavior for setting the values of their
RegisterParser attributes to reflect the requested `value` for the
RegisterBlock. Parsers' `__set__` methods call `update_register_state`
on this instance so they can all keep their pending state in sync.
"""
@abstractmethod
def _value(self) -> BlockType:
"""
Value should return an appropriate object to represent the state of
this register block e.g. a datetime for the clock/alarms or a float for
the temperature
"""
def update_register_state(
self, address: Union[int, slice], value: "RegisterState"
) -> None:
"""
RegisterParsers should call this method to stage their changes to the
register state. This allows parsers to be aware of each other's pending
changes so e.g. two distinct parsers can flip two different bits in the
same register. Once all parsers have staged their changes (implement
via _prepare_update), the __set__ method will write all the changes to
the parent I2CDevice instance.
Parameters
----------
address : Union[int, slice]
The register address(es) to set
value : RegisterState
The bytes to insert at address
"""
if isinstance(address, int):
address = slice(address, address + 1)
if len(value) != len(self.pending_state[address]):
raise ValueError("Value must have as many bytes as slice")
self.pending_state[address] = value
class DatetimeRegisterBlock(RegisterBlock[datetime]):
"""
Base class whose subclasses keep track of the register addresses where
various components of the date/time/alarms are stored for RTC ICs such
as the Maxim DS series.
"""
hour: RegisterParser[int]
minute: RegisterParser[int]
day_of_month: RegisterParser[int]
# Define defaults for attributes that may be left unset, e.g. the DS3231
# and DS1337 have no seconds for Alarm 2, and no year or month for either
# Alarm.
@property
def second(self) -> Union[RegisterParser[int], int]:
return 0
@second.setter
def second(self, value: int) -> None:
pass
@property
def month(self) -> Union[RegisterParser[int], int]:
return datetime.now().month
@month.setter
def month(self, value: int) -> None:
pass
@property
def year(self) -> Union[RegisterParser[int], int]:
return datetime.now().year
@year.setter
def year(self, value: int) -> None:
pass
def _prepare_update(self, value: datetime) -> None:
# FIXME pycharm doesn't understand you can assign an int to the
# parser descriptors, but mypy does
self.second = value.second
self.minute = value.minute
self.hour = value.hour
self.day_of_month = value.day
self.month = value.month
self.year = value.year
def _value(self) -> datetime:
try:
value = datetime(
self.year,
self.month,
self.day_of_month,
self.hour,
self.minute,
self.second,
)
except ValueError as err:
raise ValueError(
"Could not parse datetime. Perhaps the register state is"
"invalid? Try setting to a known valid state first."
) from err
return value
| [
[
[
16,
19
],
[
293,
296
]
],
[
[
21,
35
],
[
5303,
5317
],
[
5708,
5722
]
],
[
[
57,
65
],
[
7029,
7037
],
[
7754,
7762
],
[
7933,
7941
],
[
8062,
8070
],
[
8426,
8434
],
[
8469,
8477
]
],
[
[
85,
92
],
[
273,
280
]
],
[
[
94,
98
],
[
3075,
3079
]
],
[
[
100,
107
],
[
230,
237
]
],
[
[
109,
114
],
[
6017,
6022
],
[
7543,
7548
],
[
7706,
7711
],
[
7885,
7890
]
],
[
[
137,
146
]
],
[
[
168,
182
],
[
7246,
7260
],
[
7278,
7292
],
[
7316,
7330
],
[
7549,
7563
],
[
7712,
7726
],
[
7891,
7905
]
],
[
[
203,
216
],
[
2994,
3007
]
],
[
[
218,
227
],
[
281,
290
],
[
3102,
3111
],
[
4358,
4367
],
[
5355,
5364
],
[
5747,
5756
]
],
[
[
259,
272
],
[
7015,
7028
]
],
[
[
6993,
7014
]
]
] |
"""A flexible Python library for atomic structure generation."""
| [] |
import asyncio
import logging
import time
from datetime import datetime
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
from blspy import PrivateKey, G1Element
from seno.cmds.init_funcs import check_keys
from seno.consensus.block_rewards import calculate_base_farmer_reward
from seno.protocols.protocol_message_types import ProtocolMessageTypes
from seno.server.outbound_message import NodeType, make_msg
from seno.simulator.simulator_protocol import FarmNewBlockProtocol
from seno.types.blockchain_format.coin import Coin
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from seno.util.byte_types import hexstr_to_bytes
from seno.util.ints import uint32, uint64
from seno.util.keychain import bytes_to_mnemonic, generate_mnemonic
from seno.util.path import path_from_root
from seno.util.ws_message import WsRpcMessage, create_payload_dict
from seno.wallet.cc_wallet.cc_wallet import CCWallet
from seno.wallet.rl_wallet.rl_wallet import RLWallet
from seno.wallet.did_wallet.did_wallet import DIDWallet
from seno.wallet.trade_record import TradeRecord
from seno.wallet.transaction_record import TransactionRecord
from seno.wallet.util.backup_utils import download_backup, get_backup_info, upload_backup
from seno.wallet.util.trade_utils import trade_record_to_dict
from seno.wallet.util.transaction_type import TransactionType
from seno.wallet.util.wallet_types import WalletType
from seno.wallet.wallet_info import WalletInfo
from seno.wallet.wallet_node import WalletNode
# Timeout for response from wallet/full node for sending a transaction
TIMEOUT = 30
log = logging.getLogger(__name__)
class WalletRpcApi:
def __init__(self, wallet_node: WalletNode):
assert wallet_node is not None
self.service = wallet_node
self.service_name = "seno_wallet"
def get_routes(self) -> Dict[str, Callable]:
return {
# Key management
"/log_in": self.log_in,
"/get_public_keys": self.get_public_keys,
"/get_private_key": self.get_private_key,
"/generate_mnemonic": self.generate_mnemonic,
"/add_key": self.add_key,
"/delete_key": self.delete_key,
"/delete_all_keys": self.delete_all_keys,
# Wallet node
"/get_sync_status": self.get_sync_status,
"/get_height_info": self.get_height_info,
"/farm_block": self.farm_block, # Only when node simulator is running
"/get_initial_freeze_period": self.get_initial_freeze_period,
"/get_network_info": self.get_network_info,
# Wallet management
"/get_wallets": self.get_wallets,
"/create_new_wallet": self.create_new_wallet,
# Wallet
"/get_wallet_balance": self.get_wallet_balance,
"/get_transaction": self.get_transaction,
"/get_transactions": self.get_transactions,
"/get_next_address": self.get_next_address,
"/send_transaction": self.send_transaction,
"/create_backup": self.create_backup,
"/get_transaction_count": self.get_transaction_count,
"/get_farmed_amount": self.get_farmed_amount,
"/create_signed_transaction": self.create_signed_transaction,
# Coloured coins and trading
"/cc_set_name": self.cc_set_name,
"/cc_get_name": self.cc_get_name,
"/cc_spend": self.cc_spend,
"/cc_get_colour": self.cc_get_colour,
"/create_offer_for_ids": self.create_offer_for_ids,
"/get_discrepancies_for_offer": self.get_discrepancies_for_offer,
"/respond_to_offer": self.respond_to_offer,
"/get_trade": self.get_trade,
"/get_all_trades": self.get_all_trades,
"/cancel_trade": self.cancel_trade,
# DID Wallet
"/did_update_recovery_ids": self.did_update_recovery_ids,
"/did_spend": self.did_spend,
"/did_get_pubkey": self.did_get_pubkey,
"/did_get_did": self.did_get_did,
"/did_recovery_spend": self.did_recovery_spend,
"/did_get_recovery_list": self.did_get_recovery_list,
"/did_create_attest": self.did_create_attest,
"/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery,
"/did_create_backup_file": self.did_create_backup_file,
# RL wallet
"/rl_set_user_info": self.rl_set_user_info,
"/send_clawback_transaction:": self.send_clawback_transaction,
"/add_rate_limited_funds:": self.add_rate_limited_funds,
}
async def _state_changed(self, *args) -> List[WsRpcMessage]:
"""
Called by the WalletNode or WalletStateManager when something has changed in the wallet. This
gives us an opportunity to send notifications to all connected clients via WebSocket.
"""
if len(args) < 2:
return []
data = {
"state": args[0],
}
if args[1] is not None:
data["wallet_id"] = args[1]
if args[2] is not None:
data["additional_data"] = args[2]
return [create_payload_dict("state_changed", data, "seno_wallet", "wallet_ui")]
async def _stop_wallet(self):
"""
Stops a currently running wallet/key, which allows starting the wallet with a new key.
Each key has it's own wallet database.
"""
if self.service is not None:
self.service._close()
await self.service._await_closed()
##########################################################################################
# Key management
##########################################################################################
async def log_in(self, request):
"""
Logs in the wallet with a specific key.
"""
fingerprint = request["fingerprint"]
if self.service.logged_in_fingerprint == fingerprint:
return {"fingerprint": fingerprint}
await self._stop_wallet()
log_in_type = request["type"]
recovery_host = request["host"]
testing = False
if "testing" in self.service.config and self.service.config["testing"] is True:
testing = True
if log_in_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif log_in_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
else:
started = await self.service._start(fingerprint)
if started is True:
return {"fingerprint": fingerprint}
elif testing is True and self.service.backup_initialized is False:
response = {"success": False, "error": "not_initialized"}
return response
elif self.service.backup_initialized is False:
backup_info = None
backup_path = None
try:
private_key = self.service.get_key_for_fingerprint(fingerprint)
last_recovery = await download_backup(recovery_host, private_key)
backup_path = path_from_root(self.service.root_path, "last_recovery")
if backup_path.exists():
backup_path.unlink()
backup_path.write_text(last_recovery)
backup_info = get_backup_info(backup_path, private_key)
backup_info["backup_host"] = recovery_host
backup_info["downloaded"] = True
except Exception as e:
log.error(f"error {e}")
response = {"success": False, "error": "not_initialized"}
if backup_info is not None:
response["backup_info"] = backup_info
response["backup_path"] = f"{backup_path}"
return response
return {"success": False, "error": "Unknown Error"}
async def get_public_keys(self, request: Dict):
fingerprints = [sk.get_g1().get_fingerprint() for (sk, seed) in self.service.keychain.get_all_private_keys()]
return {"public_key_fingerprints": fingerprints}
async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]:
for sk, seed in self.service.keychain.get_all_private_keys():
if sk.get_g1().get_fingerprint() == fingerprint:
return sk, seed
return None, None
async def get_private_key(self, request):
fingerprint = request["fingerprint"]
sk, seed = await self._get_private_key(fingerprint)
if sk is not None:
s = bytes_to_mnemonic(seed) if seed is not None else None
return {
"private_key": {
"fingerprint": fingerprint,
"sk": bytes(sk).hex(),
"pk": bytes(sk.get_g1()).hex(),
"seed": s,
},
}
return {"success": False, "private_key": {"fingerprint": fingerprint}}
async def generate_mnemonic(self, request: Dict):
return {"mnemonic": generate_mnemonic().split(" ")}
async def add_key(self, request):
if "mnemonic" not in request:
raise ValueError("Mnemonic not in request")
# Adding a key from 24 word mnemonic
mnemonic = request["mnemonic"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
fingerprint = sk.get_g1().get_fingerprint()
await self._stop_wallet()
# Makes sure the new key is added to config properly
started = False
check_keys(self.service.root_path)
request_type = request["type"]
if request_type == "new_wallet":
started = await self.service._start(fingerprint=fingerprint, new_wallet=True)
elif request_type == "skip":
started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True)
elif request_type == "restore_backup":
file_path = Path(request["file_path"])
started = await self.service._start(fingerprint=fingerprint, backup_file=file_path)
if started is True:
return {"fingerprint": fingerprint}
raise ValueError("Failed to start")
async def delete_key(self, request):
await self._stop_wallet()
fingerprint = request["fingerprint"]
self.service.keychain.delete_key_by_fingerprint(fingerprint)
path = path_from_root(
self.service.root_path,
f"{self.service.config['database_path']}-{fingerprint}",
)
if path.exists():
path.unlink()
return {}
async def delete_all_keys(self, request: Dict):
await self._stop_wallet()
self.service.keychain.delete_all_keys()
path = path_from_root(self.service.root_path, self.service.config["database_path"])
if path.exists():
path.unlink()
return {}
##########################################################################################
# Wallet Node
##########################################################################################
async def get_sync_status(self, request: Dict):
assert self.service.wallet_state_manager is not None
syncing = self.service.wallet_state_manager.sync_mode
synced = await self.service.wallet_state_manager.synced()
return {"synced": synced, "syncing": syncing, "genesis_initialized": True}
async def get_height_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
peak = self.service.wallet_state_manager.peak
if peak is None:
return {"height": 0}
else:
return {"height": peak.height}
async def get_network_info(self, request: Dict):
assert self.service.wallet_state_manager is not None
network_name = self.service.config["selected_network"]
address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"]
return {"network_name": network_name, "network_prefix": address_prefix}
async def farm_block(self, request):
raw_puzzle_hash = decode_puzzle_hash(request["address"])
request = FarmNewBlockProtocol(raw_puzzle_hash)
msg = make_msg(ProtocolMessageTypes.farm_new_block, request)
await self.service.server.send_to_all([msg], NodeType.FULL_NODE)
return {}
##########################################################################################
# Wallet Management
##########################################################################################
async def get_wallets(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries()
return {"wallets": wallets}
async def _create_backup_and_upload(self, host) -> None:
assert self.service.wallet_state_manager is not None
try:
if "testing" in self.service.config and self.service.config["testing"] is True:
return None
now = time.time()
file_name = f"backup_{now}"
path = path_from_root(self.service.root_path, file_name)
await self.service.wallet_state_manager.create_wallet_backup(path)
backup_text = path.read_text()
response = await upload_backup(host, backup_text)
success = response["success"]
if success is False:
log.error("Failed to upload backup to wallet backup service")
elif success is True:
log.info("Finished upload of the backup file")
except Exception as e:
log.error(f"Exception in upload backup. Error: {e}")
async def create_new_wallet(self, request: Dict):
assert self.service.wallet_state_manager is not None
wallet_state_manager = self.service.wallet_state_manager
main_wallet = wallet_state_manager.main_wallet
host = request["host"]
if request["wallet_type"] == "cc_wallet":
if request["mode"] == "new":
async with self.service.wallet_state_manager.lock:
cc_wallet: CCWallet = await CCWallet.create_new_cc(
wallet_state_manager, main_wallet, request["amount"]
)
colour = cc_wallet.get_colour()
asyncio.create_task(self._create_backup_and_upload(host))
return {
"type": cc_wallet.type(),
"colour": colour,
"wallet_id": cc_wallet.id(),
}
elif request["mode"] == "existing":
async with self.service.wallet_state_manager.lock:
cc_wallet = await CCWallet.create_wallet_for_cc(
wallet_state_manager, main_wallet, request["colour"]
)
asyncio.create_task(self._create_backup_and_upload(host))
return {"type": cc_wallet.type()}
elif request["wallet_type"] == "rl_wallet":
if request["rl_type"] == "admin":
log.info("Create rl admin wallet")
async with self.service.wallet_state_manager.lock:
rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager)
success = await rl_admin.admin_create_coin(
uint64(int(request["interval"])),
uint64(int(request["limit"])),
request["pubkey"],
uint64(int(request["amount"])),
uint64(int(request["fee"])) if "fee" in request else uint64(0),
)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_admin.rl_info.admin_pubkey is not None
return {
"success": success,
"id": rl_admin.id(),
"type": rl_admin.type(),
"origin": rl_admin.rl_info.rl_origin,
"pubkey": rl_admin.rl_info.admin_pubkey.hex(),
}
elif request["rl_type"] == "user":
log.info("Create rl user wallet")
async with self.service.wallet_state_manager.lock:
rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager)
asyncio.create_task(self._create_backup_and_upload(host))
assert rl_user.rl_info.user_pubkey is not None
return {
"id": rl_user.id(),
"type": rl_user.type(),
"pubkey": rl_user.rl_info.user_pubkey.hex(),
}
elif request["wallet_type"] == "did_wallet":
if request["did_type"] == "new":
backup_dids = []
num_needed = 0
for d in request["backup_dids"]:
backup_dids.append(hexstr_to_bytes(d))
if len(backup_dids) > 0:
num_needed = uint64(request["num_of_backup_ids_needed"])
async with self.service.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_state_manager,
main_wallet,
int(request["amount"]),
backup_dids,
uint64(num_needed),
)
my_did = did_wallet.get_my_DID()
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
}
elif request["did_type"] == "recovery":
async with self.service.wallet_state_manager.lock:
did_wallet = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_state_manager, main_wallet, request["filename"]
)
assert did_wallet.did_info.temp_coin is not None
assert did_wallet.did_info.temp_puzhash is not None
assert did_wallet.did_info.temp_pubkey is not None
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
coin_list = did_wallet.did_info.temp_coin.as_list()
newpuzhash = did_wallet.did_info.temp_puzhash
pubkey = did_wallet.did_info.temp_pubkey
return {
"success": True,
"type": did_wallet.type(),
"my_did": my_did,
"wallet_id": did_wallet.id(),
"coin_name": coin_name,
"coin_list": coin_list,
"newpuzhash": newpuzhash.hex(),
"pubkey": pubkey.hex(),
"backup_dids": did_wallet.did_info.backup_ids,
"num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed,
}
##########################################################################################
# Wallet
##########################################################################################
async def get_wallet_balance(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(wallet_id)
balance = await wallet.get_confirmed_balance(unspent_records)
pending_balance = await wallet.get_unconfirmed_balance(unspent_records)
spendable_balance = await wallet.get_spendable_balance(unspent_records)
pending_change = await wallet.get_pending_change_balance()
max_send_amount = await wallet.get_max_send_amount(unspent_records)
unconfirmed_removals: Dict[
bytes32, Coin
] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id)
wallet_balance = {
"wallet_id": wallet_id,
"confirmed_wallet_balance": balance,
"unconfirmed_wallet_balance": pending_balance,
"spendable_balance": spendable_balance,
"pending_change": pending_change,
"max_send_amount": max_send_amount,
"unspent_coin_count": len(unspent_records),
"pending_coin_removal_count": len(unconfirmed_removals),
}
return {"wallet_balance": wallet_balance}
async def get_transaction(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"]))
tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id)
if tr is None:
raise ValueError(f"Transaction 0x{transaction_id.hex()} not found")
return {
"transaction": tr,
"transaction_id": tr.name,
}
async def get_transactions(self, request: Dict) -> Dict:
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
if "start" in request:
start = request["start"]
else:
start = 0
if "end" in request:
end = request["end"]
else:
end = 50
transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between(wallet_id, start, end)
formatted_transactions = []
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
for tx in transactions:
formatted = tx.to_json_dict()
formatted["to_address"] = encode_puzzle_hash(tx.to_puzzle_hash, prefix)
formatted_transactions.append(formatted)
return {
"transactions": formatted_transactions,
"wallet_id": wallet_id,
}
async def get_initial_freeze_period(self, _: Dict):
freeze_period = self.service.constants.INITIAL_FREEZE_END_TIMESTAMP
return {"INITIAL_FREEZE_END_TIMESTAMP": freeze_period}
async def get_next_address(self, request: Dict) -> Dict:
"""
Returns a new address
"""
assert self.service.wallet_state_manager is not None
if request["new_address"] is True:
create_new = True
else:
create_new = False
wallet_id = uint32(int(request["wallet_id"]))
wallet = self.service.wallet_state_manager.wallets[wallet_id]
selected = self.service.config["selected_network"]
prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"]
if wallet.type() == WalletType.STANDARD_WALLET:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
elif wallet.type() == WalletType.COLOURED_COIN:
raw_puzzle_hash = await wallet.get_puzzle_hash(create_new)
address = encode_puzzle_hash(raw_puzzle_hash, prefix)
else:
raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes")
return {
"wallet_id": wallet_id,
"address": address,
}
async def send_transaction(self, request):
assert self.service.wallet_state_manager is not None
if await self.service.wallet_state_manager.synced() is False:
raise ValueError("Wallet needs to be fully synced before sending transactions")
if int(time.time()) < self.service.constants.INITIAL_FREEZE_END_TIMESTAMP:
end_date = datetime.fromtimestamp(float(self.service.constants.INITIAL_FREEZE_END_TIMESTAMP))
raise ValueError(f"No transactions before: {end_date}")
wallet_id = int(request["wallet_id"])
wallet = self.service.wallet_state_manager.wallets[wallet_id]
if not isinstance(request["amount"], int) or not isinstance(request["fee"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
puzzle_hash: bytes32 = decode_puzzle_hash(request["address"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def get_transaction_count(self, request):
wallet_id = int(request["wallet_id"])
count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id)
return {"wallet_id": wallet_id, "count": count}
async def create_backup(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["file_path"])
await self.service.wallet_state_manager.create_wallet_backup(file_path)
return {}
##########################################################################################
# Coloured Coins and Trading
##########################################################################################
async def cc_set_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
await wallet.set_name(str(request["name"]))
return {"wallet_id": wallet_id}
async def cc_get_name(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
name: str = await wallet.get_name()
return {"wallet_id": wallet_id, "name": name}
async def cc_spend(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"])
if not isinstance(request["amount"], int) or not isinstance(request["amount"], int):
raise ValueError("An integer amount or fee is required (too many decimals)")
amount: uint64 = uint64(request["amount"])
if "fee" in request:
fee = uint64(request["fee"])
else:
fee = uint64(0)
async with self.service.wallet_state_manager.lock:
tx: TransactionRecord = await wallet.generate_signed_transaction([amount], [puzzle_hash], fee)
await wallet.push_transaction(tx)
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def cc_get_colour(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id]
colour: str = wallet.get_colour()
return {"colour": colour, "wallet_id": wallet_id}
async def create_offer_for_ids(self, request):
assert self.service.wallet_state_manager is not None
offer = request["ids"]
file_name = request["filename"]
async with self.service.wallet_state_manager.lock:
(
success,
spend_bundle,
error,
) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids(offer, file_name)
if success:
self.service.wallet_state_manager.trade_manager.write_offer_to_disk(Path(file_name), spend_bundle)
return {}
raise ValueError(error)
async def get_discrepancies_for_offer(self, request):
assert self.service.wallet_state_manager is not None
file_name = request["filename"]
file_path = Path(file_name)
async with self.service.wallet_state_manager.lock:
(
success,
discrepancies,
error,
) = await self.service.wallet_state_manager.trade_manager.get_discrepancies_for_offer(file_path)
if success:
return {"discrepancies": discrepancies}
raise ValueError(error)
async def respond_to_offer(self, request):
assert self.service.wallet_state_manager is not None
file_path = Path(request["filename"])
async with self.service.wallet_state_manager.lock:
(
success,
trade_record,
error,
) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(file_path)
if not success:
raise ValueError(error)
return {}
async def get_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
trade_id = request["trade_id"]
trade: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(trade_id)
if trade is None:
raise ValueError(f"No trade with trade id: {trade_id}")
result = trade_record_to_dict(trade)
return {"trade": result}
async def get_all_trades(self, request: Dict):
assert self.service.wallet_state_manager is not None
trade_mgr = self.service.wallet_state_manager.trade_manager
all_trades = await trade_mgr.get_all_trades()
result = []
for trade in all_trades:
result.append(trade_record_to_dict(trade))
return {"trades": result}
async def cancel_trade(self, request: Dict):
assert self.service.wallet_state_manager is not None
wsm = self.service.wallet_state_manager
secure = request["secure"]
trade_id = hexstr_to_bytes(request["trade_id"])
async with self.service.wallet_state_manager.lock:
if secure:
await wsm.trade_manager.cancel_pending_offer_safely(trade_id)
else:
await wsm.trade_manager.cancel_pending_offer(trade_id)
return {}
async def get_backup_info(self, request: Dict):
file_path = Path(request["file_path"])
sk = None
if "words" in request:
mnemonic = request["words"]
passphrase = ""
try:
sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase)
except KeyError as e:
return {
"success": False,
"error": f"The word '{e.args[0]}' is incorrect.'",
"word": e.args[0],
}
elif "fingerprint" in request:
sk, seed = await self._get_private_key(request["fingerprint"])
if sk is None:
raise ValueError("Unable to decrypt the backup file.")
backup_info = get_backup_info(file_path, sk)
return {"backup_info": backup_info}
##########################################################################################
# Distributed Identities
##########################################################################################
async def did_update_recovery_ids(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = []
for _ in request["new_list"]:
recovery_list.append(hexstr_to_bytes(_))
if "num_verifications_required" in request:
new_amount_verifications_required = uint64(request["num_verifications_required"])
else:
new_amount_verifications_required = len(recovery_list)
async with self.service.wallet_state_manager.lock:
success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required)
# Update coin with new ID info
updated_puz = await wallet.get_new_puzzle()
spend_bundle = await wallet.create_spend(updated_puz.get_tree_hash())
if spend_bundle is not None and success:
return {"success": True}
return {"success": False}
async def did_spend(self, request):
wallet_id = int(request["wallet_id"])
async with self.service.wallet_state_manager.lock:
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
spend_bundle = await wallet.create_spend(request["puzzlehash"])
if spend_bundle is not None:
return {"success": True}
return {"success": False}
async def did_get_did(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did: str = wallet.get_my_DID()
async with self.service.wallet_state_manager.lock:
coins = await wallet.select_coins(1)
if coins is None or coins == set():
return {"success": True, "wallet_id": wallet_id, "my_did": my_did}
else:
coin = coins.pop()
return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()}
async def did_get_recovery_list(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
recovery_list = wallet.did_info.backup_ids
recover_hex_list = []
for _ in recovery_list:
recover_hex_list.append(_.hex())
return {
"success": True,
"wallet_id": wallet_id,
"recover_list": recover_hex_list,
"num_required": wallet.did_info.num_of_backup_ids_needed,
}
async def did_recovery_spend(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed:
return {"success": False, "reason": "insufficient messages"}
async with self.service.wallet_state_manager.lock:
(
info_list,
message_spend_bundle,
) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"])
if "pubkey" in request:
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
else:
assert wallet.did_info.temp_pubkey is not None
pubkey = wallet.did_info.temp_pubkey
if "puzhash" in request:
puzhash = hexstr_to_bytes(request["puzhash"])
else:
assert wallet.did_info.temp_puzhash is not None
puzhash = wallet.did_info.temp_puzhash
success = await wallet.recovery_spend(
wallet.did_info.temp_coin,
puzhash,
info_list,
pubkey,
message_spend_bundle,
)
return {"success": success}
async def did_get_pubkey(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex()
return {"success": True, "pubkey": pubkey}
async def did_create_attest(self, request):
wallet_id = int(request["wallet_id"])
wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
async with self.service.wallet_state_manager.lock:
info = await wallet.get_info_for_recovery()
coin = hexstr_to_bytes(request["coin_name"])
pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"]))
spend_bundle = await wallet.create_attestment(
coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"]
)
if spend_bundle is not None:
return {
"success": True,
"message_spend_bundle": bytes(spend_bundle).hex(),
"info": [info[0].hex(), info[1].hex(), info[2]],
}
else:
return {"success": False}
async def did_get_information_needed_for_recovery(self, request):
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
my_did = did_wallet.get_my_DID()
coin_name = did_wallet.did_info.temp_coin.name().hex()
return {
"success": True,
"wallet_id": wallet_id,
"my_did": my_did,
"coin_name": coin_name,
"newpuzhash": did_wallet.did_info.temp_puzhash,
"pubkey": did_wallet.did_info.temp_pubkey,
"backup_dids": did_wallet.did_info.backup_ids,
}
async def did_create_backup_file(self, request):
try:
wallet_id = int(request["wallet_id"])
did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id]
did_wallet.create_backup(request["filename"])
return {"wallet_id": wallet_id, "success": True}
except Exception:
return {"wallet_id": wallet_id, "success": False}
##########################################################################################
# Rate Limited Wallet
##########################################################################################
async def rl_set_user_info(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = uint32(int(request["wallet_id"]))
rl_user = self.service.wallet_state_manager.wallets[wallet_id]
origin = request["origin"]
async with self.service.wallet_state_manager.lock:
await rl_user.set_user_info(
uint64(request["interval"]),
uint64(request["limit"]),
origin["parent_coin_info"],
origin["puzzle_hash"],
origin["amount"],
request["admin_pubkey"],
)
return {}
async def send_clawback_transaction(self, request):
assert self.service.wallet_state_manager is not None
wallet_id = int(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
fee = int(request["fee"])
async with self.service.wallet_state_manager.lock:
tx = await wallet.clawback_rl_coin_transaction(fee)
await wallet.push_transaction(tx)
# Transaction may not have been included in the mempool yet. Use get_transaction to check.
return {
"transaction": tx,
"transaction_id": tx.name,
}
async def add_rate_limited_funds(self, request):
wallet_id = uint32(request["wallet_id"])
wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id]
puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash)
request["wallet_id"] = 1
request["puzzle_hash"] = puzzle_hash
async with self.service.wallet_state_manager.lock:
await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"])
return {"status": "SUCCESS"}
async def get_farmed_amount(self, request):
tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards()
amount = 0
pool_reward_amount = 0
farmer_reward_amount = 0
fee_amount = 0
last_height_farmed = 0
for record in tx_records:
height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE)
if height > last_height_farmed:
last_height_farmed = height
if record.type == TransactionType.COINBASE_REWARD:
pool_reward_amount += record.amount
if record.type == TransactionType.FEE_REWARD:
fee_amount += record.amount - calculate_base_farmer_reward(height)
farmer_reward_amount += calculate_base_farmer_reward(height)
amount += record.amount
assert amount == pool_reward_amount + farmer_reward_amount + fee_amount
return {
"farmed_amount": amount,
"pool_reward_amount": pool_reward_amount,
"farmer_reward_amount": farmer_reward_amount,
"fee_amount": fee_amount,
"last_height_farmed": last_height_farmed,
}
async def create_signed_transaction(self, request):
if "additions" not in request or len(request["additions"]) < 1:
raise ValueError("Specify additions list")
additions: List[Dict] = request["additions"]
amount_0: uint64 = uint64(additions[0]["amount"])
assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT
puzzle_hash_0 = hexstr_to_bytes(additions[0]["puzzle_hash"])
if len(puzzle_hash_0) != 32:
raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0}")
additional_outputs = []
for addition in additions[1:]:
receiver_ph = hexstr_to_bytes(addition["puzzle_hash"])
if len(receiver_ph) != 32:
raise ValueError(f"Address must be 32 bytes. {receiver_ph}")
amount = uint64(addition["amount"])
if amount > self.service.constants.MAX_COIN_AMOUNT:
raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}")
additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount})
fee = uint64(0)
if "fee" in request:
fee = uint64(request["fee"])
coins = None
if "coins" in request and len(request["coins"]) > 0:
coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]])
async with self.service.wallet_state_manager.lock:
signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction(
amount_0, puzzle_hash_0, fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs
)
return {"signed_tx": signed_tx}
| [
[
[
7,
14
],
[
15039,
15046
],
[
15576,
15583
],
[
16400,
16407
],
[
17093,
17100
]
],
[
[
22,
29
],
[
1682,
1689
]
],
[
[
37,
41
],
[
13719,
13723
],
[
24688,
24692
]
],
[
[
63,
71
],
[
24779,
24787
]
],
[
[
92,
96
],
[
6633,
6637
],
[
10522,
10526
],
[
26255,
26259
],
[
29090,
29094
],
[
29355,
29359
],
[
29866,
29870
],
[
31662,
31666
]
],
[
[
116,
124
],
[
1936,
1944
]
],
[
[
126,
130
],
[
1926,
1930
],
[
8212,
8216
],
[
9320,
9324
],
[
11218,
11222
],
[
11724,
11728
],
[
12049,
12053
],
[
12333,
12337
],
[
13233,
13237
],
[
14418,
14422
],
[
20079,
20083
],
[
20070,
20074
],
[
20878,
20882
],
[
21566,
21570
],
[
21557,
21561
],
[
22090,
22094
],
[
22081,
22085
],
[
23094,
23098
],
[
23296,
23300
],
[
23287,
23291
],
[
30259,
30263
],
[
30735,
30739
],
[
31113,
31117
],
[
31635,
31639
],
[
42269,
42273
]
],
[
[
132,
136
],
[
4797,
4801
],
[
13319,
13323
],
[
40902,
40906
],
[
42264,
42268
]
],
[
[
138,
146
],
[
8454,
8462
],
[
8476,
8484
],
[
21731,
21739
],
[
30451,
30459
]
],
[
[
148,
153
],
[
8448,
8453
]
],
[
[
173,
183
],
[
8463,
8473
]
],
[
[
185,
194
],
[
35840,
35849
],
[
37233,
37242
]
],
[
[
229,
239
],
[
10111,
10121
]
],
[
[
281,
309
],
[
41561,
41589
],
[
41638,
41666
]
],
[
[
360,
380
],
[
12838,
12858
]
],
[
[
422,
430
],
[
12938,
12946
]
],
[
[
432,
440
],
[
12829,
12837
]
],
[
[
487,
507
],
[
12777,
12797
]
],
[
[
554,
558
],
[
20909,
20913
],
[
43362,
43366
]
],
[
[
612,
619
],
[
20900,
20907
],
[
21667,
21674
],
[
21657,
21664
],
[
25299,
25306
],
[
27502,
27509
]
],
[
[
650,
668
],
[
12720,
12738
],
[
25309,
25327
],
[
27512,
27530
]
],
[
[
670,
688
],
[
22829,
22847
],
[
23964,
23982
],
[
24157,
24175
]
],
[
[
722,
737
],
[
17656,
17671
],
[
21675,
21690
],
[
31284,
31299
],
[
32941,
32956
],
[
35861,
35876
],
[
36095,
36110
],
[
37174,
37189
],
[
37254,
37269
],
[
37371,
37386
],
[
42446,
42461
],
[
42701,
42716
]
],
[
[
765,
771
],
[
20166,
20172
],
[
23556,
23562
],
[
39137,
39143
],
[
40374,
40380
]
],
[
[
773,
779
],
[
16082,
16088
],
[
16140,
16146
],
[
16238,
16244
],
[
16294,
16300
],
[
16347,
16353
],
[
17750,
17756
],
[
18136,
18142
],
[
25252,
25258
],
[
25243,
25249
],
[
25395,
25401
],
[
25450,
25456
],
[
27765,
27771
],
[
27756,
27762
],
[
27838,
27844
],
[
27893,
27899
],
[
33061,
33067
],
[
39393,
39399
],
[
39438,
39444
],
[
42325,
42331
],
[
42316,
42322
],
[
42879,
42885
],
[
43174,
43180
],
[
43231,
43237
]
],
[
[
811,
828
],
[
8878,
8895
]
],
[
[
830,
847
],
[
9355,
9372
]
],
[
[
875,
889
],
[
7407,
7421
],
[
10971,
10985
],
[
11322,
11336
],
[
13790,
13804
]
],
[
[
923,
935
],
[
4802,
4814
]
],
[
[
937,
956
],
[
5309,
5328
]
],
[
[
1001,
1009
],
[
14844,
14852
],
[
14827,
14835
],
[
15426,
15434
],
[
26770,
26778
],
[
27092,
27100
],
[
27417,
27425
],
[
28381,
28389
]
],
[
[
1054,
1062
],
[
15947,
15955
],
[
15930,
15938
],
[
17027,
17035
],
[
17010,
17018
],
[
39835,
39843
],
[
40419,
40427
]
],
[
[
1109,
1118
],
[
17911,
17920
],
[
17893,
17902
],
[
18600,
18609
],
[
32778,
32787
],
[
33820,
33829
],
[
34174,
34183
],
[
34775,
34784
],
[
35318,
35327
],
[
36635,
36644
],
[
36975,
36984
],
[
37876,
37885
],
[
38518,
38527
]
],
[
[
1156,
1167
],
[
30460,
30471
]
],
[
[
1211,
1228
],
[
21740,
21757
],
[
25535,
25552
],
[
27978,
27995
],
[
40907,
40924
]
],
[
[
1271,
1286
],
[
7333,
7348
]
],
[
[
1288,
1303
],
[
7629,
7644
],
[
32366,
32381
]
],
[
[
1305,
1318
],
[
13991,
14004
]
],
[
[
1360,
1380
],
[
30629,
30649
],
[
31006,
31026
]
],
[
[
1427,
1442
],
[
41372,
41387
],
[
41487,
41502
]
],
[
[
1485,
1495
],
[
23843,
23853
],
[
24038,
24048
]
],
[
[
1532,
1542
],
[
13324,
13334
]
],
[
[
1579,
1589
],
[
1768,
1778
]
],
[
[
1662,
1669
]
],
[
[
1676,
1679
],
[
7830,
7833
],
[
14115,
14118
],
[
14227,
14230
],
[
14317,
14320
],
[
15798,
15801
],
[
16880,
16883
]
],
[
[
1718,
1730
]
]
] |
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
# import sys
# sys.path.append("../simulated_fqi/")
from simulated_fqi import NFQNetwork, ContrastiveNFQNetwork
import matplotlib.pyplot as plt
import numpy as np
def train(x, y, groups, network, optimizer):
predicted_q_values = network(x, groups).squeeze()
loss = F.mse_loss(predicted_q_values, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
# def test_contrastive_network():
# # Setup agent
# network = ContrastiveNFQNetwork(state_dim=0, is_contrastive=True, nonlinearity=nn.Identity)
# optimizer = optim.Rprop(network.parameters())
# # Generate data
# n, m = 100, 100
# beta_shared = -1
# beta_fg = 2.1
# x_bg, x_fg = np.linspace(-3, 3, m), np.linspace(-3, 3, n)
# x = np.concatenate([x_bg, x_fg])
# groups = np.concatenate([np.zeros(m), np.ones(n)])
# y = beta_shared * x + beta_fg * groups * x# + np.random.normal(scale=0.5, size=m+n)
# x = torch.FloatTensor(x).unsqueeze(1)
# y = torch.FloatTensor(y)
# groups = torch.FloatTensor(groups).unsqueeze(1)
# for epoch in range(200):
# loss = train(x, y, groups, network, optimizer)
# # if epoch % 10 == 0:
# # print("Epoch: {:4d}, Loss: {:4f}".format(epoch, loss))
# network.eval()
# with torch.no_grad():
# preds = network(x, groups)
# assert np.allclose(preds.squeeze().numpy(), y.squeeze().numpy(), atol=1e-4)
# plt.scatter(x, preds, c=groups)
# plt.show()
# import ipdb; ipdb.set_trace()
if __name__ == "__main__":
test_contrastive_network()
| [
[
[
7,
12
]
],
[
[
20,
40
]
],
[
[
48,
72
],
[
372,
373
]
],
[
[
80,
94
]
],
[
[
174,
184
]
],
[
[
186,
207
]
],
[
[
215,
239
]
],
[
[
247,
258
]
],
[
[
265,
270
]
]
] |
"""
WSGI config for tw project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tw.settings")
application = get_wsgi_application()
| [
[
[
225,
227
],
[
280,
282
]
],
[
[
258,
278
],
[
358,
378
]
],
[
[
344,
355
]
]
] |
import torch
def combine_masks_with_batch(masks, n_obj, th=0.5, return_as_onehot = False):
""" Combine mask for different objects.
Different methods are the following:
* `max_per_pixel`: Computes the final mask taking the pixel with the highest
probability for every object.
# Arguments
masks: Tensor with shape[B, nobj, H, W]. H, W on batches must be same
method: String. Method that specifies how the masks are fused.
# Returns
[B, 1, H, W]
"""
# masks : B, nobj, h, w
# output : h,w
marker = torch.argmax(masks, dim=1, keepdim=True) #
if not return_as_onehot:
out_mask = torch.unsqueeze(torch.zeros_like(masks)[:,0],1) #[B, 1, H, W]
for obj_id in range(n_obj):
try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)
except: raise NotImplementedError
out_mask[tmp_mask] = obj_id + 1 # [B, 1, H, W]
if return_as_onehot:
out_mask = torch.zeros_like(masks) # [B, nobj, H, W]
for obj_id in range(n_obj):
try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th)
except: raise NotImplementedError
out_mask[:, obj_id] = tmp_mask[:,0].type(torch.cuda.FloatTensor)
return out_mask
| [
[
[
7,
12
],
[
584,
589
],
[
675,
680
],
[
691,
696
],
[
1008,
1013
],
[
1270,
1275
]
],
[
[
18,
42
]
]
] |
class Task(object):
def __init__(self,name):
self.name = name
pass
def run(self):
pass
| [
[
[
6,
10
]
]
] |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geekshop.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
[
[
91,
93
],
[
123,
125
]
],
[
[
101,
104
],
[
578,
581
]
],
[
[
111,
115
],
[
621,
625
]
]
] |
"""
OpenVINO DL Workbench
Class for creation job for creating and exporting inference report
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import json
import os
from contextlib import closing
from sqlalchemy.orm import Session
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.enumerates import JobTypesEnum, StatusEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.jobs.interfaces.job_observers import ExportInferenceReportDBObserver
from wb.main.models import SingleInferenceInfoModel, DownloadableArtifactsModel, InferenceReportExportJobModel
class InferenceReportExportJob(IJob):
job_type = JobTypesEnum.export_inference_report
_job_model_class = InferenceReportExportJobModel
ext = '.csv'
def __init__(self, job_id: int, **unused_kwargs):
super().__init__(job_id=job_id)
export_project_report_db_observer = ExportInferenceReportDBObserver(job_id=self._job_id)
self._job_state_subject.attach(export_project_report_db_observer)
self._attach_default_db_and_socket_observers()
def run(self):
self._job_state_subject.update_state(log='Starting inference report creation job.',
status=StatusEnum.running,
progress=0)
with closing(get_db_session_for_celery()) as session:
session: Session
job_model: InferenceReportExportJobModel = self.get_job_model(session)
artifact: DownloadableArtifactsModel = job_model.shared_artifact
artifact_path = artifact.build_full_artifact_path(ext=self.ext)
inference_job: SingleInferenceInfoModel = job_model.inference
per_layer_data = json.loads(inference_job.runtime_representation)
# create report
with open(artifact_path, 'w', newline='') as csvfile:
report_writer = csv.writer(csvfile, delimiter=';')
report_writer.writerow(
['Execution Order', 'Layer Name', 'Layer Type', 'Execution Time', 'Runtime Precision'])
for layer in per_layer_data:
report_writer.writerow([
layer['details'][0]['executionParams']['execOrder'],
layer['layerName'],
layer['layerType'],
layer['execTime'][0] if layer['execTime'][0] != 'not_executed' else 0,
layer['runtimePrecision'],
])
artifact.update(artifact_path)
artifact.write_record(session)
self._job_state_subject.update_state(log='Finishing inference report job.', status=StatusEnum.ready,
progress=100)
self._job_state_subject.detach_all_observers()
def on_failure(self, exception: Exception):
with closing(get_db_session_for_celery()) as session:
job_model = self.get_job_model(session)
artifact = job_model.downloadable_artifact
artifact_path = artifact.build_full_artifact_path(ext=self.ext)
if os.path.isfile(artifact_path):
os.remove(artifact_path)
super().on_failure(exception)
| [
[
[
678,
681
],
[
2467,
2470
]
],
[
[
689,
693
],
[
2292,
2296
]
],
[
[
701,
703
],
[
3689,
3691
],
[
3732,
3734
]
],
[
[
727,
734
],
[
1875,
1882
],
[
3446,
3453
]
],
[
[
763,
770
],
[
1945,
1952
]
],
[
[
817,
842
],
[
1883,
1908
],
[
3454,
3479
]
],
[
[
874,
886
],
[
1193,
1205
]
],
[
[
888,
898
],
[
1785,
1795
],
[
3252,
3262
]
],
[
[
940,
944
],
[
1171,
1175
]
],
[
[
995,
1026
],
[
1439,
1470
]
],
[
[
1054,
1078
],
[
2216,
2240
]
],
[
[
1080,
1106
],
[
2058,
2084
]
],
[
[
1108,
1137
],
[
1253,
1282
],
[
1976,
2005
]
],
[
[
1146,
1170
]
]
] |
import sys
class KaffeError(Exception):
pass
def print_stderr(msg):
sys.stderr.write('%s\n' % msg)
| [
[
[
7,
10
],
[
78,
81
]
],
[
[
18,
28
]
],
[
[
55,
67
]
]
] |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest initialization for handlers. No actual tests"""
__author__ = 'lschumacher@google.com (Lee Schumacher)'
import main
import webob
import urllib
from google.appengine.ext import webapp
def initialize_handler(
handler_class, action, repo='haiti', environ=None, params=None):
"""Initialize handler_cless and return initialized handler.
"""
params_str = ('?' + urllib.urlencode(params)) if params else ''
request = webapp.Request(webob.Request.blank(
'/' + repo + '/' + action + params_str, environ=environ).environ)
response = webapp.Response()
return handler_class(request, response, main.setup_env(request))
| [
[
[
639,
649
]
],
[
[
702,
706
],
[
1216,
1220
]
],
[
[
714,
719
],
[
1044,
1049
]
],
[
[
727,
733
],
[
971,
977
]
],
[
[
768,
774
],
[
1029,
1035
],
[
1154,
1160
]
],
[
[
781,
799
]
]
] |
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class Copy(function_node.FunctionNode):
"""Copies the input variable onto the specified device."""
def __init__(self, out_device):
self.out_device = out_device
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1
)
def forward(self, inputs):
x, = inputs
self._in_device = cuda.get_device_from_array(x).id
if int(self.out_device) == -1:
return cuda.to_cpu(x),
else:
return cuda.to_gpu(x, self.out_device),
def backward(self, indexes, grad_outputs):
return Copy(self._in_device).apply(grad_outputs)
def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
This function supports copies from host to host, from host to device,
from device to device and from device to host.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable to be copied.
dst (int): Target device specifier.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> import chainer.backends.cuda as cuda
>>> x = np.random.uniform(-1, 1, (5, 10))
>>> cuda.get_device_from_array(x).id
-1
>>> y = F.copy(x, 0) # from host to device0
>>> cuda.get_device_from_array(y.data).id
0
>>> z = F.copy(y, -1) # from device0 to host
>>> cuda.get_device_from_array(z.data).id
-1
"""
y, = Copy(dst).apply((x,))
return y
| [
[
[
29,
33
],
[
478,
482
],
[
569,
573
],
[
618,
622
]
],
[
[
54,
67
],
[
118,
131
]
],
[
[
94,
104
],
[
338,
348
]
],
[
[
113,
117
],
[
714,
718
],
[
1797,
1801
]
],
[
[
762,
766
]
]
] |
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from program_helper.ast.ops import Node
from utilities.vocab_building_dictionary import DELIM
class DSymtabMod(Node):
def __init__(self, val,
type_helper=None,
child=None, sibling=None):
super().__init__(val, child, sibling)
self.type_helper = type_helper if type_helper is not None else DELIM
self.type = DSymtabMod.name()
@staticmethod
def name():
return 'DSymtabMod'
| [
[
[
614,
618
],
[
692,
696
]
],
[
[
667,
672
],
[
923,
928
]
],
[
[
681,
691
],
[
949,
959
]
]
] |
from datetime import date
from flask_wtf import FlaskForm
from wtforms import StringField
class ProducaoFinalizadasForm(FlaskForm):
nome = StringField('Nome:')
data_comeco = StringField('Data de início:')
data_coleta = StringField('Data de coleta:') | [
[
[
21,
25
]
],
[
[
48,
57
],
[
122,
131
]
],
[
[
78,
89
],
[
145,
156
],
[
184,
195
],
[
233,
244
]
],
[
[
98,
121
]
]
] |
# coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-11-16
Contact: support@automox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OneOfDeviceFiltersInnerValueItems(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""OneOfDeviceFiltersInnerValueItems - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OneOfDeviceFiltersInnerValueItems, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfDeviceFiltersInnerValueItems):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
253,
259
],
[
2032,
2038
]
],
[
[
267,
269
]
],
[
[
292,
295
],
[
1084,
1087
]
],
[
[
303,
336
],
[
1788,
1821
],
[
2272,
2305
]
]
] |
import ugame
import stage
import utils
GAME = None
#######################################################
# Game
class Game(stage.Stage):
"""Base class for a game and its display"""
# TODO: add game state machine
# TODO: make each screen a state, and make a transition between them when player overlaps with trigger zones
# TODO: have a combat state
def __init__(self, display=None, fps=12):
# require singleton
global GAME
if GAME:
raise ValueError("Only one Game is allowed at a time")
GAME = self
# NOTE: PyGamer display is 160x128
if display:
super().__init__(display, fps)
else:
super().__init__(ugame.display, fps)
self.midX = int(self.width*0.5)
self.midY = int(self.height*0.5)
self.spriteSize = 16 # static size of sprites in pixels using the stage library
self.bounceX = self.width-self.spriteSize
self.bounceY = self.height-self.spriteSize
self.tilesX = int(self.width/self.spriteSize) # number of tiles that will fit in game
self.tilesY = int(self.height/self.spriteSize)
self.map = None
self.updaters = []
self.sprites = []
self.forceRefresh = False # force a refresh on the next frame
self._pauseObject = None # object that receives updates while game is paused
self.framesToWaitAfterPause = 2
self._curFramesWaiting = 0
def addToUpdates(self, obj):
if isinstance(obj, list):
self.updaters.extend(obj)
else:
self.updaters.append(obj)
def removeFromUpdates(self, obj):
if not isinstance(obj, list):
obj = list(obj)
for o in obj:
self.updaters.remove(o)
def addToSprites(self, obj, updater=True):
if isinstance(obj, list):
self.sprites.extend(obj)
else:
self.sprites.append(obj)
if updater:
self.addToUpdates(obj)
def removeFromSprites(self, obj, updater=True):
if not isinstance(obj, list):
obj = list(obj)
for o in obj:
self.sprites.remove(o)
if updater:
self.removeFromUpdates(obj)
def pause(self, pauseObject):
self._pauseObject = pauseObject
def resume(self):
self._pauseObject = None
self._curFramesWaiting = 0
def gameLoop(self):
while True:
if self._pauseObject:
self._pauseObject.update()
elif self._curFramesWaiting < self.framesToWaitAfterPause:
ugame.buttons.get_pressed() # clear out button press cache
self._curFramesWaiting += 1
else:
for obj in self.updaters:
obj.update()
if not self.forceRefresh:
self.render_sprites(self.sprites)
else:
self.render_block(0, 0)
self.forceRefresh = False
self.tick()
#######################################################
# Map
class TileMap(stage.Grid):
"""A tile map for the whole screen, utilizing a tile set from the given bank"""
def __init__(self, bank, width=8, height=8, palette=None, buffer=None):
super().__init__(bank, width, height, palette, buffer)
self.shaking = 0
self.framesToShake = 4
self._curShakeFrame = 0
self.solidTypes = [] # tile types that should be treated as solid walls for collision
self.triggerTypes = [] # tile types that should trigger some action when overlapped
def fromHexList(self, tileList):
"""
Given a list of hex codes, update the tile map
Example:
tileList = [
"0123456789ABCDEF", # row 0
"0123456790ABCDEF", # row 1
...
]
"""
# validate input
if len(tileList) != self.height:
raise ValueError("Length of tileList is {} but expected {}".format(len(tileList), self.height))
# iterate through tile list
x = 0
y = 0
for row in tileList:
if len(row) != self.width:
raise ValueError("Length of row {} is {} but expected {}".format(y, len(row), self.width))
for tileValue in row:
self.tile(x, y, int(tileValue, 16))
x += 1
y += 1
x = 0
def shake(self, amount=4):
self.shaking = amount
self._curShakeFrame = 0
def handleTrigger(self, sprite, x, y, tileType):
"""Handle special actions based on the tile type"""
pass
def update(self):
if self.shaking != 0:
GAME.forceRefresh = True
if self._curShakeFrame % 2 == 0:
self.move(self.shaking, 0)
else:
self.move(-self.shaking, 0)
self._curShakeFrame += 1
if self._curShakeFrame >= self.framesToShake:
self._curShakeFrame = 0
self.shaking = 0
#######################################################
# Entities
class Moveable(stage.Sprite):
"""Base class for moveable sprites like a player or enemy"""
def __init__(self, bank, x, y):
super().__init__(bank, 0, x, y)
self.x = x
self.y = y
self.collider = utils.BoundingBox(self,2, 2, 12, 12)
self.animations = utils.StateMachine()
def getTilesInCollider(self, dx=0, dy=0):
"""Calculate the grid tiles that are underneath each corner of this sprite's bounding box"""
tiles = []
rect = utils.Rectangle(self.collider.x+dx, self.collider.y+dy, self.collider.width, self.collider.height)
# top left
point = rect.getTopLeft()
point[0] >>= 4 # divide by 16
point[1] >>= 4 # divide by 16
if point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY:
tiles.append(point)
# top right
point = rect.getTopRight()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# bottom left
point = rect.getBtmLeft()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# bottom right
point = rect.getBtmRight()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# return list of tiles
return tiles
def getMovement(self):
"""
Determine desired movement (whether AI or player controls) and return dx, dy for this frame
NOTE: tile collision currently only supports moving in one direction at a time (no diagonal)
"""
return 0, 0
def applyMovementAndAnims(self, dx, dy):
"""Apply the desired movement and animations to this sprite"""
# handle movement and constrain to the stage
self.x = max(min(self.x + dx, GAME.bounceX), 0)
self.y = max(min(self.y + dy, GAME.bounceY), 0)
# finish movement
self.move(self.x, self.y)
self.collider.update()
self.animations.update()
def checkTileCollision(self, dx, dy):
"""Check the game map for collisions with tiles. Works best by checking one axis at a time"""
if dx != 0:
# check map for impassable OR special handler tiles
tiles = self.getTilesInCollider(dx, 0)
for t in tiles:
tileType = GAME.map.tile(x=t[0], y=t[1])
if tileType in GAME.map.solidTypes:
if dx > 0:
self.x = ((t[0]-1) << 4) + self.collider.dx - 1
else:
self.x = ((t[0]+1) << 4) - self.collider.dx + 1
dx = 0
break
elif tileType in GAME.map.triggerTypes:
GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType)
if dy != 0:
# check map for impassable OR special handler tiles
tiles = self.getTilesInCollider(0, dy)
for t in tiles:
tileType = GAME.map.tile(x=t[0], y=t[1])
if tileType in GAME.map.solidTypes:
if dy > 0:
self.y = ((t[1]-1) << 4) + self.collider.dy - 1
else:
self.y = ((t[1]+1) << 4) - self.collider.dy + 1
dy = 0
break
elif tileType in GAME.map.triggerTypes:
GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType)
return dx, dy
def getAnimation(self, dx, dy):
"""Update the animation based on the movement and state"""
pass
def update(self):
super().update()
dx, dy = self.getMovement()
dx, dy = self.checkTileCollision(dx, dy)
self.getAnimation(dx, dy)
self.applyMovementAndAnims(dx, dy)
#######################################################
# Animation Helpers
class AnimState(utils.State):
"""
Base class for animation states in a state machine
Expects all the frames to be consecutive in the sprite sheet
Can delay a number of game frames between each animation frame (ex: delay of 1 with 12 fps means delay 1/12 sec between animation frames)
"""
LOOP_FOREVER = -1
ROTATE_MIRROR = 4
ROTATE_90CW = 1
ROTATE_90CCW = 2
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0):
"""
Create the new state. By default, the animation will advance each game frame, and it will loop forever.
"""
super().__init__(name)
self.sprite = sprite
self.frameStart = frameStart
self.frameEnd = frameEnd
self._curFrame = frameStart
self.delay = delay
self._curDelay = 0
self.numTimes = numTimes
self._curTimes = 0
self.nextState = nextState
self.rotate = rotate
def enter(self, machine):
utils.log("Entering {} and setting frame to {}. Will repeat {} times and then go to state {}".format(self.name, self.frameStart, self.numTimes, self.nextState))
self.sprite.set_frame(self.frameStart, self.rotate)
self._curFrame = self.frameStart
self._curDelay = 0
def update(self, machine):
# handle delay in the animation
if self.delay > 0:
if self._curDelay < self.delay:
self._curDelay += 1
return
# advance the frame in the animation
self._curFrame += 1
self._curDelay = 0
# handle looping/exiting animation
if self._curFrame > self.frameEnd:
self._curFrame = self.frameStart
self._curTimes += 1
if self.numTimes != self.LOOP_FOREVER and self._curTimes > self.numTimes:
self.goToNextState(machine)
return
self.sprite.set_frame(self._curFrame, self.rotate)
def goToNextState(self, machine):
machine.goToState(self.nextState)
class AnimLoop(AnimState):
"""
Loop an animation for a sprite. Expects all the frames to be consecutive in the sprite sheet.
"""
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, rotate=0):
super().__init__(name, sprite, frameStart, frameEnd, delay, rotate=rotate)
class AnimRepeatN(AnimState):
"""
Repeat an animation N times. Expects all the frames to be consecutive in the sprite sheet.
"""
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0):
super().__init__(name, sprite, frameStart, frameEnd, delay, numTimes, nextState, rotate)
#######################################################
# GUI
class Dialog(TileMap):
"""A modal text dialog built using a tile map"""
def __init__(self, bank, width=8, height=2, text1=None, text2=None, sprite1=None, palette=None, buffer=None):
super().__init__(bank, width, height, palette, buffer)
self.showing = False
# first line of text
self.marginX = 4
self.marginY = 4
self.text = None
if text1:
self.text1 = stage.Text(width=len(text1), height=1)
self.text1.text(text1)
# second line of text
self.marginX2 = self.marginX
self.marginY2 = self.marginY + 15
self.text2 = None
if text2:
self.text2 = stage.Text(width=len(text2), height=1)
self.text2.text(text2)
# extra sprite
self.sprite1 = None
if sprite1:
self.sprite1 = sprite1
# frames to wait at start (avoids accidental button presses)
self.framesToWait = 2
self._curFramesWaiting = 0
def move(self, x, y, z=None):
if self.text1:
self.text1.move(x+self.marginX, y+self.marginY, z)
if self.text2:
self.text2.move(x+self.marginX2, y+self.marginY2, z)
super().move(x, y, z)
def show(self):
"""Display this dialog on top of all the other layers and pause the game"""
if self.showing:
return
GAME.layers.insert(0, self)
if self.text1:
GAME.layers.insert(0, self.text1)
if self.text2:
GAME.layers.insert(0, self.text2)
if self.sprite1:
GAME.layers.insert(0, self.sprite1)
GAME.forceRefresh = True
GAME.pause(self)
self.showing = True
self._curFramesWaiting = 0
def hide(self):
"""Hide this dialog and unpause the game"""
if not self.showing:
return
GAME.layers.remove(self)
if self.text1:
GAME.layers.remove(self.text1)
if self.text2:
GAME.layers.remove(self.text2)
if self.sprite1:
GAME.layers.remove(self.sprite1)
GAME.forceRefresh = True
GAME.resume()
self.showing = False
def update(self):
"""Update function called while the game is paused"""
if self._curFramesWaiting < self.framesToWait:
self._curFramesWaiting += 1
return
| [
[
[
7,
12
],
[
663,
668
],
[
2354,
2359
]
],
[
[
20,
25
],
[
128,
133
],
[
2778,
2783
],
[
4590,
4595
],
[
11318,
11323
],
[
11540,
11545
]
],
[
[
33,
38
],
[
8368,
8373
],
[
4789,
4794
],
[
4848,
4853
],
[
5037,
5042
],
[
9293,
9298
]
],
[
[
40,
44
],
[
455,
459
],
[
4219,
4223
],
[
5303,
5307
],
[
5330,
5334
],
[
5509,
5513
],
[
5536,
5540
],
[
5740,
5744
],
[
5767,
5771
],
[
5973,
5977
],
[
6000,
6004
],
[
6552,
6556
],
[
6604,
6608
],
[
7029,
7033
],
[
7082,
7086
],
[
7318,
7322
],
[
7351,
7355
],
[
7575,
7579
],
[
7628,
7632
],
[
7864,
7868
],
[
7897,
7901
],
[
12172,
12176
],
[
12225,
12229
],
[
12284,
12288
],
[
12345,
12349
],
[
12385,
12389
],
[
12414,
12418
],
[
12595,
12599
],
[
12645,
12649
],
[
12701,
12705
],
[
12759,
12763
],
[
12796,
12800
],
[
12825,
12829
]
],
[
[
123,
127
]
],
[
[
2770,
2777
],
[
10939,
10946
]
],
[
[
4581,
4589
]
],
[
[
8358,
8367
],
[
10249,
10258
],
[
10544,
10553
]
],
[
[
10240,
10248
]
],
[
[
10532,
10543
]
],
[
[
10932,
10938
]
],
[
[
526,
530
]
]
] |
"""
TODO: Add doc what this file is doing
"""
from marshmallow import Schema, post_dump
class RootSchema(Schema):
SKIP_VALUES = [None]
@post_dump
def remove_skip_values(self, data, many, **kwargs):
return {
key: value for key, value in data.items()
if value not in self.SKIP_VALUES
}
@post_dump(pass_original=True)
def add_extra(self, serialized, original, many, **kwargs):
from kubi_ecs_logger.models.include import INCLUDE_FIELDS
for k, v in original.__dict__.items():
if k not in serialized and v is not None:
type_name = str(type(v).__name__).lower()
if type_name in INCLUDE_FIELDS:
schema = INCLUDE_FIELDS[type_name].schema
data = schema.dump(v)
if "kind" not in data:
data["kind"] = type_name
serialized[k] = data
elif isinstance(v, (int, float, str, bool, dict)):
if not str(k).startswith('_'):
serialized[k] = v
return serialized
| [
[
[
70,
76
],
[
107,
113
]
],
[
[
78,
87
],
[
147,
156
],
[
345,
354
]
],
[
[
96,
106
]
]
] |
import os
import io
import time
import base64
import functools
from PIL import Image
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from helpers import *
os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True"
class PythonPredictor:
def __init__(self, config):
# Import TF-Hub module
self.hub_module = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1")
def predict(self, payload):
# Preprocess image
hr_image = preprocess_image(payload["image_b64"])
# Run model
fake_image = self.hub_module(hr_image)
# convert to base64
img = get_image(tf.squeeze(fake_image))
im_file = io.BytesIO()
img.save(im_file, format="PNG")
im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")
return im_bytes
| [
[
[
7,
9
],
[
181,
183
]
],
[
[
17,
19
],
[
680,
682
]
],
[
[
27,
31
]
],
[
[
39,
45
],
[
752,
758
]
],
[
[
53,
62
]
],
[
[
80,
85
]
],
[
[
93,
104
]
],
[
[
112,
128
],
[
638,
640
]
],
[
[
136,
157
],
[
343,
346
]
],
[
[
179,
180
],
[
478,
494
],
[
628,
637
]
],
[
[
236,
251
]
]
] |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------------------
# DISCLAIMER: This is just a slightly adjusted version of the EpsilonGreedyPolicy in TF-Agents.
# Most of the code here is directly copied from there.
# I changed it such that the policy in the epsilon case is not random, but sampled from
# the original policy distribution.
# ------------------------------------------------------------------------------------------
"""Policy implementation that generates epsilon-greedy actions from a policy.
TODO(kbanoop): Make policy state optional in the action method.
"""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import Optional, Text
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.policies import policy_utilities
from tf_agents.policies import greedy_policy
from tf_agents.policies import tf_policy
from tf_agents.trajectories import policy_step
from tf_agents.typing import types
from tf_agents.utils import nest_utils
tfd = tfp.distributions
class EpsilonGreedyPolicy(tf_policy.TFPolicy):
"""Returns epsilon-greedy samples of a given policy."""
def __init__(self,
policy: tf_policy.TFPolicy,
epsilon: types.FloatOrReturningFloat,
name: Optional[Text] = None):
"""Builds an epsilon-greedy MixturePolicy wrapping the given policy.
Args:
policy: A policy implementing the tf_policy.TFPolicy interface.
epsilon: The probability of taking the random action represented as a
float scalar, a scalar Tensor of shape=(), or a callable that returns a
float scalar or Tensor.
name: The name of this policy.
Raises:
ValueError: If epsilon is invalid.
"""
try:
observation_and_action_constraint_splitter = (
policy.observation_and_action_constraint_splitter)
except AttributeError:
observation_and_action_constraint_splitter = None
try:
accepts_per_arm_features = policy.accepts_per_arm_features
except AttributeError:
accepts_per_arm_features = False
self._greedy_policy = greedy_policy.GreedyPolicy(policy)
self._epsilon = epsilon
self._epsilon_policy = self._greedy_policy.wrapped_policy # this is my main change from the original code
super(EpsilonGreedyPolicy, self).__init__(
policy.time_step_spec,
policy.action_spec,
policy.policy_state_spec,
policy.info_spec,
emit_log_probability=policy.emit_log_probability,
observation_and_action_constraint_splitter=(
observation_and_action_constraint_splitter),
name=name)
@property
def wrapped_policy(self) -> tf_policy.TFPolicy:
return self._greedy_policy.wrapped_policy
def _variables(self):
return self._greedy_policy.variables()
def _get_epsilon(self):
if callable(self._epsilon):
return self._epsilon()
else:
return self._epsilon
def _action(self, time_step, policy_state, seed):
seed_stream = tfp.util.SeedStream(seed=seed, salt='epsilon_greedy')
greedy_action = self._greedy_policy.action(time_step, policy_state)
epsilon_action = self._epsilon_policy.action(time_step, (), seed_stream())
outer_shape = nest_utils.get_outer_shape(time_step, self._time_step_spec)
rng = tf.random.uniform(
outer_shape, maxval=1.0, seed=seed_stream(), name='epsilon_rng')
cond = tf.greater(rng, self._get_epsilon())
# Selects the action/info from the random policy with probability epsilon.
# TODO(b/133175894): tf.compat.v1.where only supports a condition which is
# either a scalar or a vector. Use tf.compat.v2 so that it can support any
# condition whose leading dimensions are the same as the other operands of
# tf.where.
outer_ndims = int(outer_shape.shape[0])
if outer_ndims >= 2:
raise ValueError(
'Only supports batched time steps with a single batch dimension')
action = tf.nest.map_structure(lambda g, r: tf.compat.v1.where(cond, g, r),
greedy_action.action, epsilon_action.action)
if greedy_action.info:
if not epsilon_action.info:
raise ValueError('Incompatible info field')
# Note that the objects in PolicyInfo may have different shapes, so we
# need to call nest_utils.where() on each type of object.
info = tf.nest.map_structure(lambda x, y: nest_utils.where(cond, x, y),
greedy_action.info, epsilon_action.info)
if self._emit_log_probability:
# At this point, info.log_probability contains the log prob of the
# action chosen, conditioned on the policy that was chosen. We want to
# emit the full log probability of the action, so we'll add in the log
# probability of choosing the policy.
random_log_prob = tf.nest.map_structure(
lambda t: tf.math.log(tf.zeros_like(t) + self._get_epsilon()),
info.log_probability)
greedy_log_prob = tf.nest.map_structure(
lambda t: tf.math.log(tf.ones_like(t) - self._get_epsilon()),
random_log_prob)
log_prob_of_chosen_policy = nest_utils.where(cond, greedy_log_prob,
random_log_prob)
log_prob = tf.nest.map_structure(lambda a, b: a + b,
log_prob_of_chosen_policy,
info.log_probability)
info = policy_step.set_log_probability(info, log_prob)
# Overwrite bandit policy info type.
if policy_utilities.has_bandit_policy_type(info, check_for_tensor=True):
# Generate mask of the same shape as bandit_policy_type (batch_size, 1).
# This is the opposite of `cond`, which is 1-D bool tensor (batch_size,)
# that is true when greedy policy was used, otherwise `cond` is false.
random_policy_mask = tf.reshape(tf.logical_not(cond),
tf.shape(info.bandit_policy_type))
bandit_policy_type = policy_utilities.bandit_policy_uniform_mask(
info.bandit_policy_type, mask=random_policy_mask)
info = policy_utilities.set_bandit_policy_type(
info, bandit_policy_type)
else:
if epsilon_action.info:
raise ValueError('Incompatible info field')
info = ()
# The state of the epsilon greedy policy is the state of the underlying
# greedy policy (the random policy carries no state).
# It is commonly assumed that the new policy state only depends only
# on the previous state and "time_step", the action (be it the greedy one
# or the random one) does not influence the new policy state.
state = greedy_action.state
return policy_step.PolicyStep(action, state, info)
def _distribution(self, time_step, policy_state):
raise NotImplementedError(
'EpsilonGreedyPolicy does not support distributions yet.')
| [
[
[
1271,
1286
]
],
[
[
1310,
1318
]
],
[
[
1368,
1382
]
],
[
[
1403,
1411
],
[
2071,
2079
]
],
[
[
1413,
1417
],
[
2080,
2084
]
],
[
[
1426,
1442
],
[
4101,
4103
],
[
4204,
4206
],
[
4756,
4758
],
[
5171,
5173
],
[
5654,
5656
],
[
5812,
5814
],
[
6103,
6105
],
[
6731,
6733
],
[
6742,
6744
],
[
6804,
6806
],
[
4791,
4793
],
[
5699,
5701
],
[
5711,
5713
],
[
5857,
5859
],
[
5869,
5871
]
],
[
[
1506,
1535
],
[
1807,
1810
],
[
3807,
3810
]
],
[
[
1576,
1592
],
[
6391,
6407
],
[
6868,
6884
],
[
6990,
7006
]
],
[
[
1624,
1637
],
[
2908,
2921
]
],
[
[
1669,
1678
],
[
1853,
1862
],
[
1977,
1986
],
[
3477,
3486
]
],
[
[
1714,
1725
],
[
6291,
6302
],
[
7573,
7584
]
],
[
[
1755,
1760
],
[
2021,
2026
]
],
[
[
1789,
1799
],
[
4031,
4041
],
[
5974,
5984
],
[
5206,
5216
]
],
[
[
1801,
1804
]
],
[
[
1833,
1852
],
[
3091,
3110
]
]
] |
from os import system
def comprar(comida, juguetes):
comprado = ""
while not comprado:
system("cls")
comprar = (input("Que quiere comprar? Alimentos | Juguetes : ")).lower()
if comprar == "alimento":
print(f"Carne: {comida['carne']['cantidad']}|Agua: {comida['agua']['cantidad']}|Huesos: {comida['hueso']['cantidad']}")
producto = (input("Que queres comprar?: ")).lower()
if producto in comida.keys():
cantidad = input("Cuánto quieres comprar?: ")
if cantidad.isdecimal():
comida[producto]['cantidad'] += int(cantidad)
comprado = producto
if comprar == "juguete":
print("Pelota | Soga | Muñeco")
producto = (input("Que quieres comprar?: ")).lower()
if producto in juguetes.keys():
juguetes[producto] = "si"
comprado = producto | [
[
[
15,
21
],
[
95,
101
]
],
[
[
27,
34
]
]
] |
"""
Third generation models implementation (VPIN)
"""
import pandas as pd
def get_vpin(volume: pd.Series, buy_volume: pd.Series, window: int = 1) -> pd.Series:
"""
Get Volume-Synchronized Probability of Informed Trading (VPIN) from bars, p. 292-293.
:param volume: (pd.Series) bar volume
:param buy_volume: (pd.Series) bar volume classified as buy (either tick rule, BVC or aggressor side methods applied)
:param window: (int) estimation window
:return: (pd.Series) VPIN series
"""
sell_volume = volume - buy_volume
volume_imbalance = abs(buy_volume - sell_volume)
return volume_imbalance.rolling(window=window).mean() / volume
| [
[
[
61,
73
],
[
151,
153
],
[
97,
99
],
[
120,
122
]
],
[
[
80,
88
]
]
] |
"""
github3.gists.comment
---------------------
Module containing the logic for a GistComment
"""
from github3.models import BaseComment
from github3.users import User
class GistComment(BaseComment):
"""This object represents a comment on a gist.
Two comment instances can be checked like so::
c1 == c2
c1 != c2
And is equivalent to::
c1.id == c2.id
c1.id != c2.id
See also: http://developer.github.com/v3/gists/comments/
"""
def __init__(self, comment, session=None):
super(GistComment, self).__init__(comment, session)
#: :class:`User <github3.users.User>` who made the comment
#: Unless it is not associated with an account
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self) # (No coverage)
def __repr__(self):
return '<Gist Comment [{0}]>'.format(self.user.login)
| [
[
[
128,
139
],
[
191,
202
]
],
[
[
166,
170
],
[
802,
806
]
],
[
[
179,
190
],
[
552,
563
]
]
] |
import numpy as np
class NeuralNetwork(object):
def __init__(self, topology, epsilon, numLabels):
self.theta = []
self.topology = topology
self.numLabels = numLabels
self.gradientChecking = False
for layer in range(len(self.topology)):
if layer == 0:
continue
self.theta.append(np.random.rand(self.topology[layer], self.topology[layer - 1] + 1) * 2 * epsilon - epsilon)
def gradientDescent(self, iters, alpha, lamda, X, Y):
self.X = X
self.Y = Y
for i in range(iters):
(J, thetaGrad) = self.getCostAndGradient(lamda)
# gradient checking
if self.gradientChecking:
thetaCopy = self.theta.copy()
for i in range(len(self.topology) - 1):
for j in range(self.topology[i + 1]):
for k in range(self.topology[i]):
EPS = 0.00001
self.theta[i][j, k] += EPS
J2 = self.getCostAndGradient(lamda)[0]
self.theta[i][j, k] -= 2 * EPS
J1 = self.getCostAndGradient(lamda)[0]
print(str((J2 - J1) / (2 * EPS) - thetaGrad[i][j, k]))
self.theta = thetaCopy
# end
for layer in range(len(self.topology) - 1):
self.theta[layer] -= thetaGrad[layer] * alpha
print("Iter " + str(i) + ": " + str(J))
def predict(self, x):
x = x.reshape((x.shape[0], 1))
x = np.concatenate(([[1]], x))
for layer in range(1, len(self.topology)):
x = np.matmul(self.theta[layer - 1], x)
for i in range(x.shape[0]):
x[i, 0] = self.sigmoid(x[i, 0])
if layer != len(self.topology) - 1:
x = np.concatenate(([[1]], x))
prediction = -1
predictionSurety = -1
for i in range(self.numLabels):
if x[i, 0] > predictionSurety:
prediction = i
predictionSurety = x[i, 0]
return prediction
def getCostAndGradient(self, lamda):
J = 0
thetaGrad = []
for layer in range(len(self.topology)):
if layer == 0:
continue
thetaGrad.append(np.zeros((self.topology[layer], self.topology[layer - 1] + 1)))
m = self.X.shape[0]
for example in range(m):
x = self.X[example].copy()
x = x.reshape((x.shape[0], 1))
y = np.zeros(self.numLabels)
y[self.Y[example]] = 1
y = y.reshape((y.shape[0], 1))
a = []
z = []
delta = []
for layer in range(len(self.topology)):
if layer == 0:
a.append(np.concatenate(([[1]], x)))
z.append(np.concatenate(([[1]], x)))
delta.append(0)
continue
z.append(np.matmul(self.theta[layer - 1], a[layer - 1]))
a.append(z[layer].copy())
for i in range(self.topology[layer]):
a[layer][i, 0] = self.sigmoid(a[layer][i, 0])
if layer != len(self.topology) - 1:
a[layer] = np.concatenate(([[1]], a[layer]))
z[layer] = np.concatenate(([[1]], z[layer]))
delta.append(0)
for layer in range(len(self.topology) - 1, 0, -1):
if layer == len(self.topology) - 1:
delta[layer] = a[layer] - y
thetaGrad[layer - 1] += np.matmul(delta[layer], a[layer - 1].transpose())
continue
sigDerZ = z[layer].copy()
for i in range(self.topology[layer] + 1):
sigDerZ[i] = self.sigmoidDerivative(sigDerZ[i])
if layer >= len(self.topology) - 2:
delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1]) * sigDerZ
else:
delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1][1:, :]) * sigDerZ
thetaGrad[layer - 1] += np.matmul(delta[layer][1:, :], a[layer - 1].transpose())
J += np.sum(-(1 - y) * np.log(1 - a[len(self.topology) - 1])) - np.sum(y * np.log(a[len(self.topology) - 1]))
J /= m
for layer in range(len(self.topology) - 1):
thetaGrad[layer] *= (1 / m)
for i in range(len(self.topology) - 1):
for j in range(self.topology[i + 1]):
for k in range(1, self.topology[i]):
J += (lamda / (2 * m)) * self.theta[i][j, k] ** 2
thetaGrad[i][j, k] += (lamda / m) * self.theta[i][j, k]
return (J, thetaGrad)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoidDerivative(self, x):
sig = self.sigmoid(x)
return sig * (1 - sig) | [
[
[
7,
18
],
[
363,
365
],
[
1627,
1629
],
[
1721,
1723
],
[
1913,
1915
],
[
2412,
2414
],
[
2644,
2646
],
[
2933,
2935
],
[
2990,
2992
],
[
3108,
3110
],
[
3401,
3403
],
[
3466,
3468
],
[
3756,
3758
],
[
4124,
4126
],
[
4250,
4252
],
[
4383,
4385
],
[
4470,
4472
],
[
4488,
4490
],
[
4529,
4531
],
[
4540,
4542
],
[
5105,
5107
]
],
[
[
26,
39
]
]
] |
"""“Write a for loop to print all the values in the half_lives list from Operations on Lists, all on a single line. half_lives refers to [87.74, 24110.0, 6537.0, 14.4, 376000.0]."""
half_lives = [87.74, 24110.0, 6537.0, 14.4, 376000.0]
for i in half_lives:
print(i , end=' ') | [
[
[
185,
195
],
[
248,
258
]
],
[
[
243,
244
],
[
270,
271
]
]
] |
#!/usr/bin/env python3
"""Read a correct swagger file and check whether it conforms to a style guide."""
import argparse
import pathlib
from typing import List
import sys
import swagger_to.intermediate
import swagger_to.style
import swagger_to.swagger
def main() -> int:
"""Execute the main routine."""
parser = argparse.ArgumentParser("Reads a correct swagger file and checks that it conforms to the style guide.")
parser.add_argument("--swagger_path", help="path to the swagger file", required=True)
parser.add_argument("--verbose", help="if set, prints as much information as possible.", action="store_true")
parser.add_argument(
"--with_line_number",
help="if set, prints the errors with the corresponding file name and line number.",
action="store_true")
args = parser.parse_args()
assert isinstance(args.swagger_path, str)
assert isinstance(args.verbose, bool)
assert isinstance(args.with_line_number, bool)
swagger_path = pathlib.Path(args.swagger_path)
if not swagger_path.exists():
print("File not found error: Swagger file does not exist: {}".format(swagger_path))
return 2
swagger, errs = swagger_to.swagger.parse_yaml_file(path=swagger_path)
if errs:
print("Value error: Failed to parse Swagger file {}:\n{}".format(swagger_path, "\n".join(errs)))
return 2
intermediate_typedefs = swagger_to.intermediate.to_typedefs(swagger=swagger)
intermediate_params = swagger_to.intermediate.to_parameters(swagger=swagger, typedefs=intermediate_typedefs)
endpoints = swagger_to.intermediate.to_endpoints(
swagger=swagger, typedefs=intermediate_typedefs, params=intermediate_params)
result = swagger_to.style.perform(swagger=swagger, typedefs=intermediate_typedefs, endpoints=endpoints)
if result:
complaints = '\n'.join(
format_complaints(
complaints=result,
swagger_path=str(swagger_path),
verbose=args.verbose,
with_line_number=args.with_line_number))
print("Style checks failed: \n{}".format(complaints))
return 1
print("Style checks succeeded.")
return 0
def format_complaints(complaints: List[swagger_to.style.Complaint], swagger_path: str, verbose: bool,
with_line_number: bool) -> List[str]:
"""
Convert a list of complaints into a well-formatted list of error messages.
:param complaints:
:param swagger_path:
:param verbose:
:param with_line_number:
:return:
"""
if with_line_number:
complaints.sort(key=lambda complaint: complaint.line)
complaints_str = [] # type: List[str]
for complaint in complaints:
complaint_str = ''
if with_line_number:
complaint_str += "{}:{} ".format(swagger_path, complaint.line)
else:
complaint_str += "{}: ".format(complaint.where)
complaint_str += "{} ".format(complaint.message)
if verbose:
complaint_str += "\"{}\"".format(complaint.what.replace('\n', ' '))
complaints_str.append(complaint_str)
return complaints_str
if __name__ == "__main__":
sys.exit(main())
| [
[
[
112,
120
],
[
324,
332
]
],
[
[
128,
135
],
[
999,
1006
]
],
[
[
155,
159
],
[
2370,
2374
],
[
2253,
2257
]
],
[
[
168,
171
],
[
3221,
3224
]
],
[
[
180,
203
]
],
[
[
211,
227
]
],
[
[
235,
253
],
[
1196,
1206
],
[
1414,
1424
],
[
1493,
1503
],
[
1597,
1607
],
[
1734,
1744
],
[
2258,
2268
]
],
[
[
260,
264
],
[
3230,
3234
]
],
[
[
2223,
2240
],
[
1889,
1906
]
]
] |
# coding: utf-8
"""
Katib
Swagger description for Katib # noqa: E501
The version of the OpenAPI document: v1beta1-0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubeflow.katib.configuration import Configuration
class V1beta1ExperimentSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'algorithm': 'V1beta1AlgorithmSpec',
'early_stopping': 'V1beta1EarlyStoppingSpec',
'max_failed_trial_count': 'int',
'max_trial_count': 'int',
'metrics_collector_spec': 'V1beta1MetricsCollectorSpec',
'nas_config': 'V1beta1NasConfig',
'objective': 'V1beta1ObjectiveSpec',
'parallel_trial_count': 'int',
'parameters': 'list[V1beta1ParameterSpec]',
'resume_policy': 'str',
'trial_template': 'V1beta1TrialTemplate'
}
attribute_map = {
'algorithm': 'algorithm',
'early_stopping': 'earlyStopping',
'max_failed_trial_count': 'maxFailedTrialCount',
'max_trial_count': 'maxTrialCount',
'metrics_collector_spec': 'metricsCollectorSpec',
'nas_config': 'nasConfig',
'objective': 'objective',
'parallel_trial_count': 'parallelTrialCount',
'parameters': 'parameters',
'resume_policy': 'resumePolicy',
'trial_template': 'trialTemplate'
}
def __init__(self, algorithm=None, early_stopping=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ExperimentSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._algorithm = None
self._early_stopping = None
self._max_failed_trial_count = None
self._max_trial_count = None
self._metrics_collector_spec = None
self._nas_config = None
self._objective = None
self._parallel_trial_count = None
self._parameters = None
self._resume_policy = None
self._trial_template = None
self.discriminator = None
if algorithm is not None:
self.algorithm = algorithm
if early_stopping is not None:
self.early_stopping = early_stopping
if max_failed_trial_count is not None:
self.max_failed_trial_count = max_failed_trial_count
if max_trial_count is not None:
self.max_trial_count = max_trial_count
if metrics_collector_spec is not None:
self.metrics_collector_spec = metrics_collector_spec
if nas_config is not None:
self.nas_config = nas_config
if objective is not None:
self.objective = objective
if parallel_trial_count is not None:
self.parallel_trial_count = parallel_trial_count
if parameters is not None:
self.parameters = parameters
if resume_policy is not None:
self.resume_policy = resume_policy
if trial_template is not None:
self.trial_template = trial_template
@property
def algorithm(self):
"""Gets the algorithm of this V1beta1ExperimentSpec. # noqa: E501
:return: The algorithm of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1AlgorithmSpec
"""
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
"""Sets the algorithm of this V1beta1ExperimentSpec.
:param algorithm: The algorithm of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1AlgorithmSpec
"""
self._algorithm = algorithm
@property
def early_stopping(self):
"""Gets the early_stopping of this V1beta1ExperimentSpec. # noqa: E501
:return: The early_stopping of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1EarlyStoppingSpec
"""
return self._early_stopping
@early_stopping.setter
def early_stopping(self, early_stopping):
"""Sets the early_stopping of this V1beta1ExperimentSpec.
:param early_stopping: The early_stopping of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1EarlyStoppingSpec
"""
self._early_stopping = early_stopping
@property
def max_failed_trial_count(self):
"""Gets the max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501
Max failed trials to mark experiment as failed. # noqa: E501
:return: The max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_failed_trial_count
@max_failed_trial_count.setter
def max_failed_trial_count(self, max_failed_trial_count):
"""Sets the max_failed_trial_count of this V1beta1ExperimentSpec.
Max failed trials to mark experiment as failed. # noqa: E501
:param max_failed_trial_count: The max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:type: int
"""
self._max_failed_trial_count = max_failed_trial_count
@property
def max_trial_count(self):
"""Gets the max_trial_count of this V1beta1ExperimentSpec. # noqa: E501
Max completed trials to mark experiment as succeeded # noqa: E501
:return: The max_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_trial_count
@max_trial_count.setter
def max_trial_count(self, max_trial_count):
"""Sets the max_trial_count of this V1beta1ExperimentSpec.
Max completed trials to mark experiment as succeeded # noqa: E501
:param max_trial_count: The max_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:type: int
"""
self._max_trial_count = max_trial_count
@property
def metrics_collector_spec(self):
"""Gets the metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501
:return: The metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1MetricsCollectorSpec
"""
return self._metrics_collector_spec
@metrics_collector_spec.setter
def metrics_collector_spec(self, metrics_collector_spec):
"""Sets the metrics_collector_spec of this V1beta1ExperimentSpec.
:param metrics_collector_spec: The metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1MetricsCollectorSpec
"""
self._metrics_collector_spec = metrics_collector_spec
@property
def nas_config(self):
"""Gets the nas_config of this V1beta1ExperimentSpec. # noqa: E501
:return: The nas_config of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1NasConfig
"""
return self._nas_config
@nas_config.setter
def nas_config(self, nas_config):
"""Sets the nas_config of this V1beta1ExperimentSpec.
:param nas_config: The nas_config of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1NasConfig
"""
self._nas_config = nas_config
@property
def objective(self):
"""Gets the objective of this V1beta1ExperimentSpec. # noqa: E501
:return: The objective of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1ObjectiveSpec
"""
return self._objective
@objective.setter
def objective(self, objective):
"""Sets the objective of this V1beta1ExperimentSpec.
:param objective: The objective of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1ObjectiveSpec
"""
self._objective = objective
@property
def parallel_trial_count(self):
"""Gets the parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:return: The parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._parallel_trial_count
@parallel_trial_count.setter
def parallel_trial_count(self, parallel_trial_count):
"""Sets the parallel_trial_count of this V1beta1ExperimentSpec.
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:param parallel_trial_count: The parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501
:type: int
"""
self._parallel_trial_count = parallel_trial_count
@property
def parameters(self):
"""Gets the parameters of this V1beta1ExperimentSpec. # noqa: E501
List of hyperparameter configurations. # noqa: E501
:return: The parameters of this V1beta1ExperimentSpec. # noqa: E501
:rtype: list[V1beta1ParameterSpec]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1beta1ExperimentSpec.
List of hyperparameter configurations. # noqa: E501
:param parameters: The parameters of this V1beta1ExperimentSpec. # noqa: E501
:type: list[V1beta1ParameterSpec]
"""
self._parameters = parameters
@property
def resume_policy(self):
"""Gets the resume_policy of this V1beta1ExperimentSpec. # noqa: E501
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:return: The resume_policy of this V1beta1ExperimentSpec. # noqa: E501
:rtype: str
"""
return self._resume_policy
@resume_policy.setter
def resume_policy(self, resume_policy):
"""Sets the resume_policy of this V1beta1ExperimentSpec.
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:param resume_policy: The resume_policy of this V1beta1ExperimentSpec. # noqa: E501
:type: str
"""
self._resume_policy = resume_policy
@property
def trial_template(self):
"""Gets the trial_template of this V1beta1ExperimentSpec. # noqa: E501
:return: The trial_template of this V1beta1ExperimentSpec. # noqa: E501
:rtype: V1beta1TrialTemplate
"""
return self._trial_template
@trial_template.setter
def trial_template(self, trial_template):
"""Sets the trial_template of this V1beta1ExperimentSpec.
:param trial_template: The trial_template of this V1beta1ExperimentSpec. # noqa: E501
:type: V1beta1TrialTemplate
"""
self._trial_template = trial_template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ExperimentSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ExperimentSpec):
return True
return self.to_dict() != other.to_dict()
| [
[
[
196,
202
],
[
12272,
12278
]
],
[
[
210,
212
]
],
[
[
235,
238
],
[
11468,
11471
]
],
[
[
281,
294
],
[
2255,
2268
]
],
[
[
303,
324
],
[
12512,
12533
],
[
12731,
12752
]
]
] |
from collections import namedtuple
import os
import json
import numpy as np
from tqdm import tqdm
from data_generators.utils import load_image_rgb
# Copied from: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
#
# Cityscapes labels
#
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
def label2dict(label):
return {
'name': label.name, 'id': label.id, 'trainId': label.trainId,
'category': label.category, 'catId': label.categoryId, 'hasInstances': label.hasInstances,
'ignoreInEval': label.ignoreInEval, 'color': label.color
}
def save_labels(labels, fpath):
l = []
for label in labels:
l.append(label2dict(label))
fp = open(fpath, 'w')
json.dump(l, fp)
fp.close()
def load_labels(fpath):
fp = open(fpath, 'r')
l = json.load(fp)
fp.close()
labels = []
for item in l:
labels.append(
Label(
item['name'], item['id'], item['trainId'],
item['category'], item['catId'], item['hasInstances'],
item['ignoreInEval'], tuple(item['color']))
)
return labels
class KittiDataset:
def __init__(self):
self.image_ids = []
def load_kitti(self, dataset_dir, subset, tag='simple'):
'Initialization'
assert subset in ['train', 'val'], 'subset must be either train or val but {} is given'.format(subset)
self.labels = load_labels(os.path.join(dataset_dir, 'annotations', 'semantic_{}.json'.format(tag)))
# trainId to colors
self.trainId2colors = {label.trainId: [] for label in self.labels}
for label in self.labels:
self.trainId2colors[label.trainId].append(label.color)
# trainId to name
self.trainId2name = {label.trainId: label.name for label in self.labels}
# number of valid trainIds + background class
self.num_classes = max([label.trainId for label in self.labels if label.trainId >= 0 and label.trainId < 255]) + 2
self.class_names = [self.trainId2name[i] for i in range(self.num_classes - 1)]
self.image_dir = os.path.join(dataset_dir, subset, 'images')
self.label_dir = os.path.join(dataset_dir, subset, 'semantic_rgb')
assert os.path.exists(self.image_dir), 'No such directory: {}'.format(self.image_dir)
assert os.path.exists(self.label_dir), 'No such directory: {}'.format(self.label_dir)
self.image_files = sorted([x for x in os.listdir(self.image_dir) if x.lower().endswith('.png') or x.lower().endswith('.jpg')])
self.label_files = sorted([x for x in os.listdir(self.label_dir) if x.lower().endswith('.png')])
assert len(self.image_files) == len(self.label_files), \
'image - label size mismatch! There are {} image files and {} label files'.format(len(self.image_files), len(self.label_files))
self.num_images = len(self.image_files)
self.image_ids = np.arange(self.num_images)
def check_sanity(self):
for i in tqdm(self.image_ids):
assert self.image_files[i][:-4] == self.label_files[i][:-4],\
'image - label filename mismatch: {} - {}'.format(self.image_files[i], self.label_files[i])
img = load_image_rgb(os.path.join(self.image_dir, self.image_files[i]))
msk = load_image_rgb(os.path.join(self.label_dir, self.label_files[i]))
assert img.shape == msk.shape,\
'img.shape: {}, msk.shape: {}'.format(img.shape, msk.shape)
def load_image(self, image_id):
return load_image_rgb(os.path.join(self.image_dir, self.image_files[image_id]))
def load_mask(self, image_id):
rgb_mask = load_image_rgb(os.path.join(self.label_dir, self.label_files[image_id]))
mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1], self.num_classes - 1))
for cls in range(self.num_classes - 1):
colors = self.trainId2colors[cls]
cls_mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1]))
for color in colors:
cls_mask = np.logical_or(cls_mask, (rgb_mask == color).all(axis=2))
mask[:,:,cls] = cls_mask
return mask
| [
[
[
24,
34
],
[
504,
514
]
],
[
[
43,
45
],
[
3575,
3577
],
[
4252,
4254
],
[
4321,
4323
],
[
4387,
4389
],
[
4481,
4483
],
[
4607,
4609
],
[
4742,
4744
],
[
5391,
5393
],
[
5475,
5477
],
[
5713,
5715
],
[
5841,
5843
]
],
[
[
53,
57
],
[
2852,
2856
],
[
2944,
2948
]
],
[
[
65,
76
],
[
5081,
5083
],
[
5914,
5916
],
[
6102,
6104
],
[
6211,
6213
]
],
[
[
95,
99
],
[
5154,
5158
]
],
[
[
134,
148
],
[
5376,
5390
],
[
5460,
5474
],
[
5698,
5712
],
[
5826,
5840
]
],
[
[
496,
501
],
[
3043,
3048
]
],
[
[
2443,
2453
],
[
2802,
2812
]
],
[
[
2721,
2732
]
],
[
[
2890,
2901
],
[
3563,
3574
]
],
[
[
3276,
3288
]
]
] |
"""PyMC4 continuous random variables for tensorflow."""
import tensorflow_probability as tfp
from pymc4.distributions import abstract
from pymc4.distributions.tensorflow.distribution import BackendDistribution
tfd = tfp.distributions
__all__ = [
"Beta",
"Cauchy",
"ChiSquared",
"Exponential",
"Gamma",
"Gumbel",
"HalfCauchy",
"HalfNormal",
"InverseGamma",
"InverseGaussian",
"Kumaraswamy",
"Laplace",
"LogNormal",
"Logistic",
"LogitNormal",
"Normal",
"Pareto",
"StudentT",
"Triangular",
"Uniform",
"VonMises",
]
class Normal(BackendDistribution, abstract.Normal):
__doc__ = r"""{}
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- mu: loc
- sigma: scale
""".format(
abstract.Normal.__doc__
)
def _init_backend(self):
mu, sigma = self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.Normal(loc=mu, scale=sigma)
class HalfNormal(BackendDistribution, abstract.HalfNormal):
__doc__ = r"""{}
Developer Notes
---------------
Parameter mappings to TensorFlow Probability are as follows:
- sigma: scale
""".format(
abstract.HalfNormal.__doc__
)
def _init_backend(self):
sigma = self.conditions["sigma"]
self._backend_distribution = tfd.HalfNormal(scale=sigma)
class Beta(BackendDistribution, abstract.Beta):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.Beta(concentration0=alpha, concentration1=beta)
class Cauchy(BackendDistribution, abstract.Cauchy):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.Cauchy(loc=alpha, scale=beta)
class ChiSquared(BackendDistribution, abstract.ChiSquared):
def _init_backend(self):
nu = self.conditions["nu"]
self._backend_distribution = tfd.Chi2(df=nu)
class Exponential(BackendDistribution, abstract.Exponential):
def _init_backend(self):
lam = self.conditions["lam"]
self._backend_distribution = tfd.Exponential(rate=lam)
class Gamma(BackendDistribution, abstract.Gamma):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.Gamma(concentration=alpha, rate=beta)
class Gumbel(BackendDistribution, abstract.Gumbel):
def _init_backend(self):
mu, beta = self.conditions["mu"], self.conditions["beta"]
self._backend_distribution = tfd.Gumbel(loc=mu, scale=beta)
class HalfCauchy(BackendDistribution, abstract.HalfCauchy):
def _init_backend(self):
beta = self.conditions["beta"]
self._backend_distribution = tfd.HalfCauchy(loc=0, scale=beta)
class InverseGamma(BackendDistribution, abstract.InverseGamma):
def _init_backend(self):
alpha, beta = self.conditions["alpha"], self.conditions["beta"]
self._backend_distribution = tfd.InverseGamma(concentration=alpha, scale=beta)
class InverseGaussian(BackendDistribution, abstract.InverseGaussian):
def _init_backend(self):
mu, lam = self.conditions["mu"], self.conditions["lam"]
self._backend_distribution = tfd.InverseGaussian(loc=mu, concentration=lam)
class Kumaraswamy(BackendDistribution, abstract.Kumaraswamy):
def _init_backend(self):
a, b = self.conditions["a"], self.conditions["b"]
self._backend_distribution = tfd.Kumaraswamy(concentration0=a, concentration1=b)
class Laplace(BackendDistribution, abstract.Laplace):
def _init_backend(self):
mu, b = self.conditions["mu"], self.conditions["b"]
self._backend_distribution = tfd.Laplace(loc=mu, scale=b)
class Logistic(BackendDistribution, abstract.Logistic):
def _init_backend(self):
mu, s = self.conditions["mu"], self.conditions["s"]
self._backend_distribution = tfd.Logistic(loc=mu, scale=s)
class LogitNormal(BackendDistribution, abstract.LogitNormal):
def _init_backend(self):
mu, sigma = self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=mu, scale=sigma),
bijector=tfp.bijectors.Sigmoid(),
name="LogitNormal",
)
class LogNormal(BackendDistribution, abstract.LogNormal):
def _init_backend(self):
mu, sigma = self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.LogNormal(loc=mu, scale=sigma)
class Pareto(BackendDistribution, abstract.Pareto):
def _init_backend(self):
alpha, m = self.conditions["alpha"], self.conditions["m"]
self._backend_distribution = tfd.Pareto(concentration=alpha, scale=m)
class StudentT(BackendDistribution, abstract.StudentT):
def _init_backend(self):
nu, mu, sigma = self.conditions["nu"], self.conditions["mu"], self.conditions["sigma"]
self._backend_distribution = tfd.StudentT(df=nu, loc=mu, scale=sigma)
class Triangular(BackendDistribution, abstract.Triangular):
def _init_backend(self):
lower, upper, c = self.conditions["lower"], self.conditions["upper"], self.conditions["c"]
self._backend_distribution = tfd.Triangular(low=lower, high=upper, peak=c)
class Uniform(BackendDistribution, abstract.Uniform):
def _init_backend(self):
lower, upper = self.conditions["lower"], self.conditions["upper"]
self._backend_distribution = tfd.Uniform(low=lower, high=upper)
class VonMises(BackendDistribution, abstract.VonMises):
def _init_backend(self):
mu, kappa = self.conditions["mu"], self.conditions["kappa"]
self._backend_distribution = tfd.VonMises(loc=mu, concentration=kappa)
| [
[
[
63,
92
],
[
218,
221
],
[
4400,
4403
]
],
[
[
125,
133
],
[
635,
643
],
[
838,
846
],
[
1075,
1083
],
[
1268,
1276
],
[
1472,
1480
],
[
1714,
1722
],
[
1944,
1952
],
[
2124,
2132
],
[
2311,
2319
],
[
2544,
2552
],
[
2765,
2773
],
[
2968,
2976
],
[
3225,
3233
],
[
3470,
3478
],
[
3706,
3714
],
[
3918,
3926
],
[
4135,
4143
],
[
4506,
4514
],
[
4732,
4740
],
[
4961,
4969
],
[
5223,
5231
],
[
5493,
5501
],
[
5725,
5733
]
],
[
[
190,
209
],
[
614,
633
],
[
1054,
1073
],
[
1451,
1470
],
[
1693,
1712
],
[
1923,
1942
],
[
2103,
2122
],
[
2290,
2309
],
[
2523,
2542
],
[
2744,
2763
],
[
2947,
2966
],
[
3204,
3223
],
[
3449,
3468
],
[
3685,
3704
],
[
3897,
3916
],
[
4114,
4133
],
[
4485,
4504
],
[
4711,
4730
],
[
4940,
4959
],
[
5202,
5221
],
[
5472,
5491
],
[
5704,
5723
]
],
[
[
212,
215
],
[
1003,
1006
],
[
1410,
1413
],
[
1626,
1629
],
[
1870,
1873
],
[
2067,
2070
],
[
2250,
2253
],
[
2466,
2469
],
[
2694,
2697
],
[
2892,
2895
],
[
3130,
3133
],
[
3382,
3385
],
[
3617,
3620
],
[
3851,
3854
],
[
4064,
4067
],
[
4292,
4295
],
[
4346,
4349
],
[
4661,
4664
],
[
4882,
4885
],
[
5142,
5145
],
[
5410,
5413
],
[
5652,
5655
],
[
5879,
5882
]
],
[
[
237,
244
]
],
[
[
607,
613
]
],
[
[
1043,
1053
]
],
[
[
1446,
1450
]
],
[
[
1686,
1692
]
],
[
[
1912,
1922
]
],
[
[
2091,
2102
]
],
[
[
2284,
2289
]
],
[
[
2516,
2522
]
],
[
[
2733,
2743
]
],
[
[
2934,
2946
]
],
[
[
3188,
3203
]
],
[
[
3437,
3448
]
],
[
[
3677,
3684
]
],
[
[
3888,
3896
]
],
[
[
4102,
4113
]
],
[
[
4475,
4484
]
],
[
[
4704,
4710
]
],
[
[
4931,
4939
]
],
[
[
5191,
5201
]
],
[
[
5464,
5471
]
],
[
[
5695,
5703
]
]
] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import DatadogApiKey
from ._models_py3 import DatadogApiKeyListResponse
from ._models_py3 import DatadogHost
from ._models_py3 import DatadogHostListResponse
from ._models_py3 import DatadogHostMetadata
from ._models_py3 import DatadogInstallMethod
from ._models_py3 import DatadogLogsAgent
from ._models_py3 import DatadogMonitorResource
from ._models_py3 import DatadogMonitorResourceListResponse
from ._models_py3 import DatadogMonitorResourceUpdateParameters
from ._models_py3 import DatadogOrganizationProperties
from ._models_py3 import DatadogSetPasswordLink
from ._models_py3 import DatadogSingleSignOnProperties
from ._models_py3 import DatadogSingleSignOnResource
from ._models_py3 import DatadogSingleSignOnResourceListResponse
from ._models_py3 import ErrorResponseBody
from ._models_py3 import FilteringTag
from ._models_py3 import LinkedResource
from ._models_py3 import LinkedResourceListResponse
from ._models_py3 import MonitoredResource
from ._models_py3 import MonitoredResourceListResponse
from ._models_py3 import MonitoringTagRules
from ._models_py3 import MonitoringTagRulesListResponse
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OperationResult
from ._models_py3 import ResourceProviderDefaultErrorResponse
from ._models_py3 import UserInfo
except (SyntaxError, ImportError):
from ._models import DatadogApiKey # type: ignore
from ._models import DatadogApiKeyListResponse # type: ignore
from ._models import DatadogHost # type: ignore
from ._models import DatadogHostListResponse # type: ignore
from ._models import DatadogHostMetadata # type: ignore
from ._models import DatadogInstallMethod # type: ignore
from ._models import DatadogLogsAgent # type: ignore
from ._models import DatadogMonitorResource # type: ignore
from ._models import DatadogMonitorResourceListResponse # type: ignore
from ._models import DatadogMonitorResourceUpdateParameters # type: ignore
from ._models import DatadogOrganizationProperties # type: ignore
from ._models import DatadogSetPasswordLink # type: ignore
from ._models import DatadogSingleSignOnProperties # type: ignore
from ._models import DatadogSingleSignOnResource # type: ignore
from ._models import DatadogSingleSignOnResourceListResponse # type: ignore
from ._models import ErrorResponseBody # type: ignore
from ._models import FilteringTag # type: ignore
from ._models import LinkedResource # type: ignore
from ._models import LinkedResourceListResponse # type: ignore
from ._models import MonitoredResource # type: ignore
from ._models import MonitoredResourceListResponse # type: ignore
from ._models import MonitoringTagRules # type: ignore
from ._models import MonitoringTagRulesListResponse # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import OperationResult # type: ignore
from ._models import ResourceProviderDefaultErrorResponse # type: ignore
from ._models import UserInfo # type: ignore
from ._microsoft_datadog_client_enums import (
LiftrResourceCategories,
ManagedIdentityTypes,
MarketplaceSubscriptionStatus,
MonitoringStatus,
ProvisioningState,
SingleSignOnStates,
TagAction,
)
__all__ = [
'DatadogApiKey',
'DatadogApiKeyListResponse',
'DatadogHost',
'DatadogHostListResponse',
'DatadogHostMetadata',
'DatadogInstallMethod',
'DatadogLogsAgent',
'DatadogMonitorResource',
'DatadogMonitorResourceListResponse',
'DatadogMonitorResourceUpdateParameters',
'DatadogOrganizationProperties',
'DatadogSetPasswordLink',
'DatadogSingleSignOnProperties',
'DatadogSingleSignOnResource',
'DatadogSingleSignOnResourceListResponse',
'ErrorResponseBody',
'FilteringTag',
'LinkedResource',
'LinkedResourceListResponse',
'MonitoredResource',
'MonitoredResourceListResponse',
'MonitoringTagRules',
'MonitoringTagRulesListResponse',
'OperationDisplay',
'OperationListResult',
'OperationResult',
'ResourceProviderDefaultErrorResponse',
'UserInfo',
'LiftrResourceCategories',
'ManagedIdentityTypes',
'MarketplaceSubscriptionStatus',
'MonitoringStatus',
'ProvisioningState',
'SingleSignOnStates',
'TagAction',
]
| [
[
[
502,
515
]
],
[
[
545,
570
]
],
[
[
600,
611
]
],
[
[
641,
664
]
],
[
[
694,
713
]
],
[
[
743,
763
]
],
[
[
793,
809
]
],
[
[
839,
861
]
],
[
[
891,
925
]
],
[
[
955,
993
]
],
[
[
1023,
1052
]
],
[
[
1082,
1104
]
],
[
[
1134,
1163
]
],
[
[
1193,
1220
]
],
[
[
1250,
1289
]
],
[
[
1319,
1336
]
],
[
[
1366,
1378
]
],
[
[
1408,
1422
]
],
[
[
1452,
1478
]
],
[
[
1508,
1525
]
],
[
[
1555,
1584
]
],
[
[
1614,
1632
]
],
[
[
1662,
1692
]
],
[
[
1722,
1738
]
],
[
[
1768,
1787
]
],
[
[
1817,
1832
]
],
[
[
1862,
1898
]
],
[
[
1928,
1936
]
],
[
[
1997,
2010
]
],
[
[
2052,
2077
]
],
[
[
2119,
2130
]
],
[
[
2172,
2195
]
],
[
[
2237,
2256
]
],
[
[
2298,
2318
]
],
[
[
2360,
2376
]
],
[
[
2418,
2440
]
],
[
[
2482,
2516
]
],
[
[
2558,
2596
]
],
[
[
2638,
2667
]
],
[
[
2709,
2731
]
],
[
[
2773,
2802
]
],
[
[
2844,
2871
]
],
[
[
2913,
2952
]
],
[
[
2994,
3011
]
],
[
[
3053,
3065
]
],
[
[
3107,
3121
]
],
[
[
3163,
3189
]
],
[
[
3231,
3248
]
],
[
[
3290,
3319
]
],
[
[
3361,
3379
]
],
[
[
3421,
3451
]
],
[
[
3493,
3509
]
],
[
[
3551,
3570
]
],
[
[
3612,
3627
]
],
[
[
3669,
3705
]
],
[
[
3747,
3755
]
],
[
[
3824,
3847
]
],
[
[
3853,
3873
]
],
[
[
3879,
3908
]
],
[
[
3914,
3930
]
],
[
[
3936,
3953
]
],
[
[
3959,
3977
]
],
[
[
3983,
3992
]
],
[
[
3997,
4004
]
]
] |
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Startup message
To begin a session, the frontend opens a connection to the backend and sends a
Startup message.
"""
from __future__ import print_function, division, absolute_import
import platform
import os
from struct import pack
# noinspection PyUnresolvedReferences,PyCompatibility
import vertica_python
from ..message import BulkFrontendMessage
class Startup(BulkFrontendMessage):
message_id = None
def __init__(self, user, database, session_label, os_user_name):
BulkFrontendMessage.__init__(self)
try:
os_platform = platform.platform()
except Exception as e:
os_platform = ''
print("WARN: Cannot get the OS info: {}".format(str(e)))
try:
pid = str(os.getpid())
except Exception as e:
pid = '0'
print("WARN: Cannot get the process ID: {}".format(str(e)))
self.parameters = {
b'user': user,
b'database': database,
b'client_label': session_label,
b'client_type': 'vertica-python',
b'client_version': vertica_python.__version__,
b'client_os': os_platform,
b'client_os_user_name': os_user_name,
b'client_pid': pid,
}
def read_bytes(self):
# The fixed protocol version is followed by pairs of parameter name and value strings.
# A zero byte is required as a terminator after the last name/value pair.
# Parameters can appear in any order.
fixed_protocol_version = 3 << 16 | 5
bytes_ = pack('!I', fixed_protocol_version)
# The frontend sends a requested protocol version to the backend.
# Old servers (protocol < 3.7) ignore this value and use the fixed protocol version.
# New servers (protocol >= 3.7) would try to find the common protocol
# version in use for both client and server, and send back a ParameterStatus
# message (key='protocol_version', value=<effective protocol version>)
bytes_ += pack('!16sxIx', b'protocol_version', vertica_python.PROTOCOL_VERSION)
for k in self.parameters:
v = self.parameters[k].encode('utf-8')
bytes_ += pack('!{}sx{}sx'.format(len(k), len(v)), k, v)
bytes_ += pack('x')
return bytes_
| [
[
[
1908,
1922
]
],
[
[
1924,
1932
]
],
[
[
1934,
1949
]
],
[
[
1958,
1966
],
[
2334,
2342
]
],
[
[
1974,
1976
],
[
2519,
2521
]
],
[
[
1996,
2000
],
[
3340,
3344
],
[
3803,
3807
],
[
3981,
3985
],
[
4047,
4051
]
],
[
[
2064,
2078
],
[
2869,
2883
],
[
3840,
3854
]
],
[
[
2101,
2120
],
[
2137,
2156
],
[
2259,
2278
]
],
[
[
2129,
2136
]
]
] |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_setting
short_description: Configure router settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify router feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
router_setting:
description:
- Configure router settings.
default: null
suboptions:
hostname:
description:
- Hostname for this virtual domain router.
show-filter:
description:
- Prefix-list as filter for showing routes. Source router.prefix-list.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure router settings.
fortios_router_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_setting:
hostname: "myhostname"
show-filter: "<your_own_value> (source router.prefix-list.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_router_setting_data(json):
option_list = ['hostname', 'show-filter']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def router_setting(data, fos):
vdom = data['vdom']
router_setting_data = data['router_setting']
flattened_data = flatten_multilists_attributes(router_setting_data)
filtered_data = filter_router_setting_data(flattened_data)
return fos.set('router',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_router(data, fos):
login(data)
if data['router_setting']:
resp = router_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"router_setting": {
"required": False, "type": "dict",
"options": {
"hostname": {"required": False, "type": "str"},
"show-filter": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_router(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
[
[
42,
57
]
],
[
[
59,
67
]
],
[
[
69,
83
]
],
[
[
847,
860
]
],
[
[
869,
885
]
],
[
[
1009,
1022
]
],
[
[
2797,
2805
]
],
[
[
3262,
3268
]
],
[
[
4564,
4577
],
[
6751,
6764
]
],
[
[
4579,
4582
],
[
4702,
4705
],
[
4772,
4775
],
[
4807,
4810
],
[
4828,
4831
]
],
[
[
4596,
4601
],
[
5946,
5951
]
],
[
[
4870,
4896
],
[
5738,
5764
]
],
[
[
5148,
5177
],
[
5667,
5696
]
],
[
[
5546,
5560
],
[
6005,
6019
]
],
[
[
5915,
5929
],
[
7054,
7068
]
],
[
[
6133,
6137
],
[
7272,
7276
]
],
[
[
6998,
7001
],
[
7084,
7087
]
]
] |
import os
import posixpath
from enum import Enum
from fastapi import Path, HTTPException
from utils import security
class UploadPath(str, Enum):
default = "default"
UPLOAD_PATH_DICT = {
UploadPath.default: "default/"
}
def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")):
"""
获取文件上传目录
:param upload_key:
:return:
"""
root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key])
def func(folder):
path = security.safe_join(root_path, folder)
os.makedirs(path, exist_ok=True)
return path
return func
class DownloadPath(str, Enum):
default = "default"
DOWNLOAD_PATH_DICT = {
DownloadPath.default: "default/"
}
def get_download(download_key: DownloadPath = Path(..., description="下载文件块位置")):
"""
获取下载文件路径
:param download_key:
:return:
"""
root_path = posixpath.abspath(DOWNLOAD_PATH_DICT[download_key])
def func(folder):
path = security.safe_join(root_path, folder)
if not posixpath.exists(path):
raise HTTPException(404, "The access file does not exist")
for filename in os.listdir(path):
return posixpath.join(path, filename), filename
return func
| [
[
[
7,
9
],
[
526,
528
],
[
1148,
1150
]
],
[
[
17,
26
],
[
394,
403
],
[
886,
895
],
[
1029,
1038
],
[
1185,
1194
]
],
[
[
44,
48
],
[
142,
146
],
[
622,
626
]
],
[
[
70,
74
],
[
275,
279
],
[
765,
769
]
],
[
[
76,
89
],
[
1071,
1084
]
],
[
[
109,
117
],
[
480,
488
],
[
976,
984
]
],
[
[
126,
136
],
[
200,
210
],
[
262,
272
]
],
[
[
175,
191
],
[
412,
428
]
],
[
[
239,
249
]
],
[
[
604,
616
],
[
682,
694
],
[
750,
762
]
],
[
[
655,
673
],
[
904,
922
]
],
[
[
723,
735
]
]
] |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Repeatmodeler(Package):
"""RepeatModeler is a de-novo repeat family identification and modeling
package."""
homepage = "http://www.repeatmasker.org/RepeatModeler/"
url = "http://www.repeatmasker.org/RepeatModeler/RepeatModeler-open-1.0.11.tar.gz"
version('1.0.11', sha256='7ff0d588b40f9ad5ce78876f3ab8d2332a20f5128f6357413f741bb7fa172193')
depends_on('perl', type=('build', 'run'))
depends_on('perl-json', type=('build', 'run'))
depends_on('perl-uri', type=('build', 'run'))
depends_on('perl-libwww-perl', type=('build', 'run'))
depends_on('repeatmasker', type='run')
depends_on('recon+repeatmasker', type='run')
depends_on('repeatscout', type='run')
depends_on('trf', type='run')
depends_on('nseg', type='run')
depends_on('ncbi-rmblastn', type='run')
def install(self, spec, prefix):
# like repeatmasker, another interactive installer
# questions:
# 1. <enter to continue>
# 2. <perl path, default is OK>
# 3. <source path, default is OK>
# 4. RepeatMasker bin path
# 5. RECON bin path
# 6. RepeatScout bin path
# 7. Nseg bin path
# 8. trf bin path
# 9. Add a search engine:
# 1. RMBlast -> Path, Default? (Y/N)
# 2. WUBlast/ABBlast -> Path, Default? (Y/N)
# 3. Done
config_answers = [
'', '', '',
spec['repeatmasker'].prefix.bin,
spec['recon'].prefix.bin,
spec['repeatscout'].prefix.bin,
spec['nseg'].prefix.bin,
spec['trf'].prefix.bin,
'1', spec['ncbi-rmblastn'].prefix.bin, 'Y',
'3',
]
config_filename = 'spack-config.in'
with open(config_filename, 'w') as f:
f.write('\n'.join(config_answers))
with open(config_filename, 'r') as f:
perl = which('perl')
perl('configure', input=f)
install_tree('.', prefix.bin)
| [
[
[
216,
217
],
[
240,
247
],
[
503,
510
],
[
601,
611
],
[
647,
657
],
[
698,
708
],
[
748,
758
],
[
807,
817
],
[
850,
860
],
[
899,
909
],
[
941,
951
],
[
975,
985
],
[
1010,
1020
],
[
2162,
2167
],
[
2224,
2236
]
],
[
[
226,
239
]
]
] |
import math
pi = math.pi
raio = float(input('Qual é o raio da esfera?: '))
volume_esf = 4/3*pi*math.pow(raio, 3)
litro = 1
lata = litro*5
precolata = 50.00
totaltinta = volume_esf *lata
totalpreco = totaltinta * precolata
print(f'O volume da esfera é {volume_esf: .2f}')
print(f'A quantidade de tinta necessária é {totaltinta} litros de tinta')
print(f'O total a pagar é: R$ {totalpreco: .2f}')
#AE TO COM DUVIDA
| [
[
[
9,
13
],
[
20,
24
],
[
98,
102
]
],
[
[
15,
17
],
[
95,
97
]
],
[
[
28,
32
],
[
107,
111
]
],
[
[
78,
88
],
[
175,
185
],
[
260,
270
]
],
[
[
118,
123
],
[
135,
140
]
],
[
[
128,
132
],
[
187,
191
]
],
[
[
143,
152
],
[
218,
227
]
],
[
[
162,
172
],
[
205,
215
],
[
324,
334
]
],
[
[
192,
202
],
[
384,
394
]
]
] |
from datetime import datetime, timezone
from http import HTTPStatus
from json import dumps, load
from logging import getLogger
from os import environ
from unittest.mock import MagicMock, patch
from mypy_boto3_events import EventBridgeClient
from mypy_boto3_lambda import LambdaClient
from mypy_boto3_sns.type_defs import MessageAttributeValueTypeDef
from pytest import mark
from pytest_subtests import SubTests
from backend.api_keys import EVENT_KEY
from backend.api_responses import STATUS_CODE_KEY
from backend.aws_message_attributes import DATA_TYPE_STRING
from backend.notify_status_update.task import (
EVENT_DETAIL_KEY,
MESSAGE_ATTRIBUTE_DATASET_KEY,
MESSAGE_ATTRIBUTE_STATUS_KEY,
SLACK_URL_ENV_NAME,
STEP_FUNCTION_ARN_KEY,
STEP_FUNCTION_STARTDATE_KEY,
STEP_FUNCTION_STOPDATE_KEY,
WEBHOOK_MESSAGE_BLOCKS_KEY,
lambda_handler,
publish_sns_message,
)
from backend.resources import ResourceName
from backend.step_function import Outcome
from backend.step_function_keys import (
ASSET_UPLOAD_KEY,
DATASET_ID_KEY,
DATASET_PREFIX_KEY,
ERRORS_KEY,
INPUT_KEY,
JOB_STATUS_FAILED,
JOB_STATUS_RUNNING,
JOB_STATUS_SUCCEEDED,
METADATA_UPLOAD_KEY,
NEW_VERSION_S3_LOCATION,
OUTPUT_KEY,
STATUS_KEY,
STEP_FUNCTION_KEY,
UPDATE_DATASET_KEY,
UPLOAD_STATUS_KEY,
VALIDATION_KEY,
VERSION_ID_KEY,
)
from .aws_utils import any_arn_formatted_string, any_lambda_context, any_s3_url
from .general_generators import any_https_url
from .stac_generators import any_dataset_id, any_dataset_prefix, any_dataset_version_id
STEP_FUNCTION_START_MILLISECOND_TIMESTAMP = round(
datetime(
2001, 2, 3, hour=4, minute=5, second=6, microsecond=789876, tzinfo=timezone.utc
).timestamp()
* 1000
)
STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP = STEP_FUNCTION_START_MILLISECOND_TIMESTAMP + 10
@patch("backend.notify_status_update.task.WebhookClient.send")
@patch("backend.notify_status_update.task.get_import_status_given_arn")
def should_notify_slack_with_finished_details_when_url_set(
step_func_status_mock: MagicMock, webhook_client_mock: MagicMock
) -> None:
# Given
webhook_client_mock.return_value.status_code = HTTPStatus.OK
step_func_status_mock.return_value = {
STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_SUCCEEDED},
VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
}
mock_slack_url = any_https_url()
with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch(
"backend.notify_status_update.task.publish_sns_message"
):
# When
notify_status_update_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_SUCCEEDED,
STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(),
INPUT_KEY: dumps(
{
DATASET_ID_KEY: any_dataset_id(),
DATASET_PREFIX_KEY: any_dataset_prefix(),
VERSION_ID_KEY: any_dataset_version_id(),
}
),
OUTPUT_KEY: dumps(
{
UPLOAD_STATUS_KEY: {
VALIDATION_KEY: "",
ASSET_UPLOAD_KEY: "",
METADATA_UPLOAD_KEY: "",
},
UPDATE_DATASET_KEY: {NEW_VERSION_S3_LOCATION: any_s3_url()},
}
),
STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP,
STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP,
}
}
lambda_handler(notify_status_update_input, any_lambda_context())
# Then assert there is 15 slack_sdk message 'blocks' sent to webhook url
webhook_client_mock.assert_called_once()
assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 15
@patch("backend.notify_status_update.task.WebhookClient.send")
def should_not_notify_slack_when_step_function_running(webhook_client_mock: MagicMock) -> None:
# Given
webhook_client_mock.return_value.status_code = HTTPStatus.OK
mock_slack_url = any_https_url()
with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch(
"backend.notify_status_update.task.publish_sns_message"
):
# When
notify_status_update_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_RUNNING,
STEP_FUNCTION_STOPDATE_KEY: None,
}
}
lambda_handler(notify_status_update_input, any_lambda_context())
# Then
webhook_client_mock.assert_not_called()
@patch("backend.notify_status_update.task.WebhookClient.send")
@patch("backend.notify_status_update.task.get_import_status_given_arn")
def should_notify_slack_when_step_function_failed(
step_func_status_mock: MagicMock, webhook_client_mock: MagicMock
) -> None:
# Given
webhook_client_mock.return_value.status_code = HTTPStatus.OK
mock_slack_url = any_https_url()
step_func_status_mock.return_value = {
STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_FAILED},
VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []},
}
with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch(
"backend.notify_status_update.task.publish_sns_message"
):
# When
notify_status_update_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_FAILED,
STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(),
INPUT_KEY: dumps(
{
DATASET_ID_KEY: any_dataset_id(),
DATASET_PREFIX_KEY: any_dataset_prefix(),
VERSION_ID_KEY: any_dataset_version_id(),
}
),
STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP,
STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP,
},
OUTPUT_KEY: None,
}
lambda_handler(notify_status_update_input, any_lambda_context())
# Then assert there is 13 slack_sdk message 'blocks' sent to webhook url
webhook_client_mock.assert_called_once()
assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 13
@patch("backend.notify_status_update.task.WebhookClient.send")
def should_log_and_not_post_to_slack_when_url_not_set(
webhook_client_mock: MagicMock, subtests: SubTests
) -> None:
# Given
logger = getLogger("backend.notify_status_update.task")
with patch("backend.notify_status_update.task.publish_sns_message"), patch.object(
logger, "debug"
) as logger_mock:
# When
lambda_handler({}, any_lambda_context())
# Then
with subtests.test("no slack message"):
assert not webhook_client_mock.called
with subtests.test("log created"):
expected_log = dumps({EVENT_KEY: {}})
logger_mock.assert_any_call(expected_log)
@patch("backend.notify_status_update.task.get_param")
def should_publish_sns_message(get_param_mock: MagicMock) -> None:
# Given
get_param_mock.return_value = topic_arn = any_arn_formatted_string()
dataset_prefix = any_dataset_prefix()
publish_sns_message_input = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_SUCCEEDED,
INPUT_KEY: dumps(
{
DATASET_PREFIX_KEY: dataset_prefix,
}
),
}
}
expected_sns_call = {
"TopicArn": topic_arn,
"Message": dumps(publish_sns_message_input),
"MessageAttributes": {
MESSAGE_ATTRIBUTE_DATASET_KEY: MessageAttributeValueTypeDef(
DataType=DATA_TYPE_STRING, StringValue=dataset_prefix
),
MESSAGE_ATTRIBUTE_STATUS_KEY: MessageAttributeValueTypeDef(
DataType=DATA_TYPE_STRING, StringValue=JOB_STATUS_SUCCEEDED
),
},
}
# When
with patch("backend.notify_status_update.task.SNS_CLIENT.publish") as sns_client_mock:
publish_sns_message(publish_sns_message_input)
# Then
assert sns_client_mock.call_args[1] == expected_sns_call
@mark.infrastructure
def should_launch_notify_slack_endpoint_lambda_function(
lambda_client: LambdaClient, events_client: EventBridgeClient
) -> None:
notify_status_lambda_arn = events_client.list_targets_by_rule(
Rule=ResourceName.CLOUDWATCH_RULE_NAME.value
)["Targets"][0]["Arn"]
# When
body = {
EVENT_DETAIL_KEY: {
STATUS_KEY: JOB_STATUS_FAILED,
INPUT_KEY: dumps(
{
DATASET_ID_KEY: any_dataset_id(),
DATASET_PREFIX_KEY: any_dataset_prefix(),
}
),
},
OUTPUT_KEY: None,
}
resp = load(
lambda_client.invoke(
FunctionName=notify_status_lambda_arn,
Payload=dumps(body).encode(),
)["Payload"]
)
assert resp.get(STATUS_CODE_KEY) == HTTPStatus.OK, resp
| [
[
[
21,
29
],
[
1661,
1669
]
],
[
[
31,
39
],
[
1746,
1754
]
],
[
[
57,
67
],
[
2221,
2231
],
[
4382,
4392
],
[
5259,
5269
],
[
9579,
9589
]
],
[
[
85,
90
],
[
3001,
3006
],
[
3289,
3294
],
[
6032,
6037
],
[
7434,
7439
],
[
7888,
7893
],
[
8095,
8100
],
[
9153,
9158
],
[
9489,
9494
]
],
[
[
92,
96
],
[
9382,
9386
]
],
[
[
117,
126
],
[
7024,
7033
]
],
[
[
142,
149
],
[
2645,
2652
],
[
4455,
4462
],
[
5679,
5686
]
],
[
[
176,
185
],
[
2104,
2113
],
[
2136,
2145
],
[
4298,
4307
],
[
5142,
5151
],
[
5174,
5183
],
[
6958,
6967
],
[
7610,
7619
]
],
[
[
187,
192
],
[
1883,
1888
],
[
1946,
1951
],
[
4160,
4165
],
[
4930,
4935
],
[
4993,
4998
],
[
6816,
6821
],
[
7510,
7515
],
[
2634,
2639
],
[
2693,
2698
],
[
4444,
4449
],
[
4503,
4508
],
[
5668,
5673
],
[
5727,
5732
],
[
7081,
7086
],
[
7145,
7150
],
[
8519,
8524
]
],
[
[
224,
241
],
[
8857,
8874
]
],
[
[
272,
284
],
[
8828,
8840
]
],
[
[
322,
350
],
[
8203,
8231
],
[
8360,
8388
]
],
[
[
370,
374
],
[
8732,
8736
]
],
[
[
403,
411
],
[
6979,
6987
]
],
[
[
442,
451
],
[
7441,
7450
]
],
[
[
486,
501
],
[
9559,
9574
]
],
[
[
545,
561
],
[
8258,
8274
],
[
8415,
8431
]
],
[
[
614,
630
],
[
2837,
2853
],
[
4647,
4663
],
[
5871,
5887
],
[
7799,
7815
],
[
9067,
9083
]
],
[
[
636,
665
],
[
8172,
8201
]
],
[
[
671,
699
],
[
8330,
8358
]
],
[
[
705,
723
],
[
2655,
2673
],
[
4465,
4483
],
[
5689,
5707
]
],
[
[
729,
750
],
[
2923,
2944
],
[
5954,
5975
]
],
[
[
756,
783
],
[
3683,
3710
],
[
6308,
6335
]
],
[
[
789,
815
],
[
3771,
3797
],
[
4731,
4757
],
[
6396,
6422
]
],
[
[
821,
847
],
[
4122,
4148
],
[
6778,
6804
]
],
[
[
853,
867
],
[
3874,
3888
],
[
4798,
4812
],
[
6530,
6544
],
[
7228,
7242
]
],
[
[
873,
892
],
[
8609,
8628
]
],
[
[
926,
938
],
[
8967,
8979
]
],
[
[
973,
980
],
[
2379,
2386
],
[
2461,
2468
],
[
2540,
2547
],
[
5451,
5458
],
[
5533,
5540
],
[
5612,
5619
]
],
[
[
1026,
1042
],
[
2509,
2525
],
[
3439,
3455
],
[
5581,
5597
]
],
[
[
1048,
1062
],
[
3054,
3068
],
[
6085,
6099
],
[
9198,
9212
]
],
[
[
1068,
1086
],
[
3112,
3130
],
[
6143,
6161
],
[
7933,
7951
],
[
9252,
9270
]
],
[
[
1092,
1102
],
[
2402,
2412
],
[
2484,
2494
],
[
2563,
2573
],
[
5474,
5484
],
[
5556,
5566
],
[
5635,
5645
]
],
[
[
1108,
1117
],
[
2990,
2999
],
[
6021,
6030
],
[
7877,
7886
],
[
9142,
9151
]
],
[
[
1123,
1140
],
[
5394,
5411
],
[
5919,
5936
],
[
9111,
9128
]
],
[
[
1146,
1164
],
[
4695,
4713
]
],
[
[
1170,
1190
],
[
2319,
2339
],
[
2885,
2905
],
[
7843,
7863
],
[
8445,
8465
]
],
[
[
1196,
1215
],
[
2427,
2446
],
[
3489,
3508
],
[
5499,
5518
]
],
[
[
1221,
1244
],
[
3586,
3609
]
],
[
[
1250,
1260
],
[
3277,
3287
],
[
6493,
6503
],
[
9346,
9356
]
],
[
[
1266,
1276
],
[
2307,
2317
],
[
2367,
2377
],
[
2449,
2459
],
[
2528,
2538
],
[
2873,
2883
],
[
4683,
4693
],
[
5382,
5392
],
[
5439,
5449
],
[
5521,
5531
],
[
5600,
5610
],
[
5907,
5917
],
[
7831,
7841
],
[
9099,
9109
]
],
[
[
1282,
1299
],
[
2287,
2304
],
[
5362,
5379
]
],
[
[
1305,
1323
],
[
3565,
3583
]
],
[
[
1329,
1346
],
[
3342,
3359
]
],
[
[
1352,
1366
],
[
2350,
2364
],
[
3391,
3405
],
[
5422,
5436
]
],
[
[
1372,
1386
],
[
3178,
3192
],
[
6209,
6223
]
],
[
[
1414,
1438
],
[
2946,
2970
],
[
5977,
6001
],
[
7688,
7712
]
],
[
[
1440,
1458
],
[
3917,
3935
],
[
4841,
4859
],
[
6573,
6591
],
[
7247,
7265
]
],
[
[
1460,
1470
],
[
3611,
3621
]
],
[
[
1503,
1516
],
[
2608,
2621
],
[
4418,
4431
],
[
5294,
5307
]
],
[
[
1546,
1560
],
[
3070,
3084
],
[
6101,
6115
],
[
9214,
9228
]
],
[
[
1562,
1580
],
[
3132,
3150
],
[
6163,
6181
],
[
7736,
7754
],
[
9272,
9290
]
],
[
[
1582,
1604
],
[
3194,
3216
],
[
6225,
6247
]
],
[
[
1606,
1647
],
[
1833,
1874
],
[
3712,
3753
],
[
6337,
6378
]
],
[
[
1790,
1830
],
[
3799,
3839
],
[
6424,
6464
]
],
[
[
2021,
2075
]
],
[
[
4226,
4276
]
],
[
[
5068,
5113
]
],
[
[
6882,
6931
]
],
[
[
7567,
7593
]
],
[
[
8756,
8807
]
]
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 08:55:39 2020
@author: rolly
"""
import config
from twilio.rest import Client
#https://api.whatsapp.com/send?phone=14155238886&text=join%20actual-nor&source=&data=
def sendMsg(num,msg):
client = Client(config.account_sid, config.auth_token)
message = client.messages.create(
to="whatsapp:+"+num,
from_="whatsapp:+14155238886",
body=msg)
print(message.sid)
| [
[
[
91,
97
],
[
259,
265
],
[
279,
285
]
],
[
[
122,
128
],
[
252,
258
]
],
[
[
221,
228
]
]
] |
import logging
from typing import Any, Dict, List, Optional, Union
from ..models import SnsModel
from ..types import Model
from .base import BaseEnvelope
logger = logging.getLogger(__name__)
class SnsEnvelope(BaseEnvelope):
"""SNS Envelope to extract array of Records
The record's body parameter is a string, though it can also be a JSON encoded string.
Regardless of its type it'll be parsed into a BaseModel object.
Note: Records will be parsed the same way so if model is str,
all items in the list will be parsed as str and npt as JSON (and vice versa)
"""
def parse(self, data: Optional[Union[Dict[str, Any], Any]], model: Model) -> List[Optional[Model]]:
"""Parses records found with model provided
Parameters
----------
data : Dict
Lambda event to be parsed
model : Model
Data model provided to parse after extracting data using envelope
Returns
-------
List
List of records parsed with model provided
"""
logger.debug(f"Parsing incoming data with SNS model {SnsModel}")
parsed_envelope = SnsModel.parse_obj(data)
output = []
logger.debug(f"Parsing SNS records in `body` with {model}")
for record in parsed_envelope.Records:
output.append(self._parse(data=record.Sns.Message, model=model))
return output
| [
[
[
7,
14
],
[
165,
172
]
],
[
[
34,
37
],
[
643,
646
],
[
649,
652
]
],
[
[
39,
43
],
[
633,
637
]
],
[
[
45,
49
],
[
673,
677
]
],
[
[
51,
59
],
[
678,
686
],
[
618,
626
]
],
[
[
61,
66
],
[
627,
632
]
],
[
[
89,
97
],
[
1119,
1127
],
[
1157,
1165
]
],
[
[
118,
123
],
[
687,
692
],
[
663,
668
]
],
[
[
142,
154
],
[
213,
225
]
],
[
[
156,
162
],
[
1066,
1072
],
[
1210,
1216
]
],
[
[
201,
212
]
]
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_datacenter
short_description: Manage VMware vSphere Datacenters
description:
- This module can be used to manage (create, delete) VMware vSphere Datacenters.
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
- Kamil Szczygiel (@kamsz)
notes:
- Tested on vSphere 6.0, 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter the cluster will be created in.
required: True
state:
description:
- If the datacenter should be present or absent.
choices: [ present, absent ]
default: present
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create Datacenter
vmware_datacenter:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
state: present
delegate_to: localhost
- name: Delete Datacenter
vmware_datacenter:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
state: absent
delegate_to: localhost
register: datacenter_delete_result
'''
RETURN = """#
"""
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, find_datacenter_by_name, vmware_argument_spec, wait_for_task
from ansible.module_utils._text import to_native
class VmwareDatacenterManager(PyVmomi):
def __init__(self, module):
super(VmwareDatacenterManager, self).__init__(module)
self.datacenter_name = self.params.get('datacenter_name')
self.datacenter_obj = self.get_datacenter()
def ensure(self):
state = self.module.params.get('state')
if state == 'present':
self.create_datacenter()
if state == 'absent':
self.destroy_datacenter()
def get_datacenter(self):
try:
datacenter_obj = find_datacenter_by_name(self.content, self.datacenter_name)
return datacenter_obj
except (vmodl.MethodFault, vmodl.RuntimeFault) as runtime_fault:
self.module.fail_json(msg="Failed to get datacenter '%s'"
" due to : %s" % (self.datacenter_name,
to_native(runtime_fault.msg)))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to get datacenter"
" '%s' due to generic error: %s" % (self.datacenter_name,
to_native(generic_exc)))
def create_datacenter(self):
folder = self.content.rootFolder
changed = False
try:
if not self.datacenter_obj and not self.module.check_mode:
changed = True
folder.CreateDatacenter(name=self.datacenter_name)
self.module.exit_json(changed=changed)
except vim.fault.DuplicateName as duplicate_name:
self.module.exit_json(changed=changed)
except vim.fault.InvalidName as invalid_name:
self.module.fail_json(msg="Specified datacenter name '%s' is an"
" invalid name : %s" % (self.datacenter_name,
to_native(invalid_name.msg)))
except vmodl.fault.NotSupported as not_supported:
# This should never happen
self.module.fail_json(msg="Trying to create a datacenter '%s' on"
" an incorrect folder object : %s" % (self.datacenter_name,
to_native(not_supported.msg)))
except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
self.module.fail_json(msg="Failed to create a datacenter"
" '%s' due to : %s" % (self.datacenter_name,
to_native(runtime_fault.msg)))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to create a datacenter"
" '%s' due to generic error: %s" % (self.datacenter_name,
to_native(generic_exc)))
def destroy_datacenter(self):
results = dict(changed=False)
try:
if self.datacenter_obj and not self.module.check_mode:
task = self.datacenter_obj.Destroy_Task()
changed, result = wait_for_task(task)
results['changed'] = changed
results['result'] = result
self.module.exit_json(**results)
except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
self.module.fail_json(msg="Failed to delete a datacenter"
" '%s' due to : %s" % (self.datacenter_name,
to_native(runtime_fault.msg)))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to delete a datacenter"
" '%s' due to generic error: %s" % (self.datacenter_name,
to_native(generic_exc)))
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
datacenter_name=dict(required=True, type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
vmware_datacenter_mgr = VmwareDatacenterManager(module)
vmware_datacenter_mgr.ensure()
if __name__ == '__main__':
main()
| [
[
[
256,
271
]
],
[
[
273,
281
]
],
[
[
283,
297
]
],
[
[
298,
311
]
],
[
[
320,
336
]
],
[
[
435,
448
]
],
[
[
1148,
1156
]
],
[
[
1711,
1717
]
],
[
[
1759,
1762
],
[
3598,
3601
],
[
3707,
3710
],
[
5417,
5420
]
],
[
[
1764,
1769
],
[
2658,
2663
],
[
2677,
2682
],
[
4014,
4019
],
[
4395,
4400
],
[
4415,
4420
],
[
5437,
5442
],
[
5457,
5462
]
],
[
[
1839,
1852
],
[
6317,
6330
]
],
[
[
1893,
1900
],
[
2044,
2051
]
],
[
[
1902,
1925
],
[
2548,
2571
]
],
[
[
1927,
1947
],
[
6079,
6099
]
],
[
[
1949,
1962
],
[
5248,
5261
]
],
[
[
2002,
2011
],
[
2919,
2928
],
[
3226,
3235
],
[
3969,
3978
],
[
4348,
4357
],
[
4666,
4675
],
[
4978,
4987
],
[
5708,
5717
],
[
6020,
6029
]
],
[
[
2020,
2043
],
[
2100,
2123
],
[
6415,
6438
]
],
[
[
6051,
6055
],
[
6515,
6519
]
]
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=190)),
('body', models.TextField()),
('slug', models.SlugField(max_length=190)),
('status', models.IntegerField(default=0, choices=[(0, b'Draft'), (1, b'Published')])),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(related_name=b'blog_article_author', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
},
bases=(models.Model,),
),
]
| [
[
[
47,
63
]
],
[
[
87,
93
],
[
428,
434
],
[
546,
552
],
[
606,
612
],
[
652,
658
],
[
714,
720
],
[
822,
828
],
[
907,
913
],
[
1109,
1115
]
],
[
[
95,
105
],
[
186,
196
],
[
239,
249
],
[
332,
342
]
],
[
[
113,
134
],
[
851,
857
]
],
[
[
159,
167
],
[
271,
279
],
[
965,
973
]
],
[
[
176,
185
]
]
] |
#!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Auxiliary module for testing gflags.py.
The purpose of this module is to define a few flags. We want to make
sure the unit tests for gflags.py involve more than one module.
"""
__author__ = 'salcianu@google.com (Alex Salcianu)'
import gflags
from gflags import _helpers
FLAGS = gflags.FLAGS
def DefineFlags(flag_values=FLAGS):
"""Defines some flags.
Args:
flag_values: The FlagValues object we want to register the flags
with.
"""
# The 'tmod_bar_' prefix (short for 'test_module_bar') ensures there
# is no name clash with the existing flags.
gflags.DEFINE_boolean('tmod_bar_x', True, 'Boolean flag.',
flag_values=flag_values)
gflags.DEFINE_string('tmod_bar_y', 'default', 'String flag.',
flag_values=flag_values)
gflags.DEFINE_boolean('tmod_bar_z', False,
'Another boolean flag from module bar.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_t', 4, 'Sample int flag.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_u', 5, 'Sample int flag.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_bar_v', 6, 'Sample int flag.',
flag_values=flag_values)
def RemoveOneFlag(flag_name, flag_values=FLAGS):
"""Removes the definition of one flag from gflags.FLAGS.
Note: if the flag is not defined in gflags.FLAGS, this function does
not do anything (in particular, it does not raise any exception).
Motivation: We use this function for cleanup *after* a test: if
there was a failure during a test and not all flags were declared,
we do not want the cleanup code to crash.
Args:
flag_name: A string, the name of the flag to delete.
flag_values: The FlagValues object we remove the flag from.
"""
if flag_name in flag_values.FlagDict():
flag_values.__delattr__(flag_name)
def NamesOfDefinedFlags():
"""Returns: List of names of the flags declared in this module."""
return ['tmod_bar_x',
'tmod_bar_y',
'tmod_bar_z',
'tmod_bar_t',
'tmod_bar_u',
'tmod_bar_v']
def RemoveFlags(flag_values=FLAGS):
"""Deletes the flag definitions done by the above DefineFlags().
Args:
flag_values: The FlagValues object we remove the flags from.
"""
for flag_name in NamesOfDefinedFlags():
RemoveOneFlag(flag_name, flag_values=flag_values)
def GetModuleName():
"""Uses GetCallingModule() to return the name of this module.
For checking that _GetCallingModule works as expected.
Returns:
A string, the name of this module.
"""
return _helpers.GetCallingModule()
def ExecuteCode(code, global_dict):
"""Executes some code in a given global environment.
For testing of _GetCallingModule.
Args:
code: A string, the code to be executed.
global_dict: A dictionary, the global environment that code should
be executed in.
"""
# Indeed, using exec generates a lint warning. But some user code
# actually uses exec, and we have to test for it ...
exec(code, global_dict) # pylint: disable=exec-used
def DisclaimKeyFlags():
"""Disclaims flags declared in this module."""
gflags.DISCLAIM_key_flags()
| [
[
[
1732,
1742
]
],
[
[
1791,
1797
],
[
1835,
1841
],
[
2126,
2132
],
[
2235,
2241
],
[
2346,
2352
],
[
2503,
2509
],
[
2612,
2618
],
[
2721,
2727
],
[
4776,
4782
]
],
[
[
1817,
1825
],
[
4209,
4217
]
],
[
[
1827,
1832
],
[
1878,
1883
],
[
2871,
2876
],
[
3747,
3752
]
],
[
[
1854,
1865
]
],
[
[
2834,
2847
],
[
3948,
3961
]
],
[
[
3481,
3500
],
[
3921,
3940
]
],
[
[
3723,
3734
]
],
[
[
4004,
4017
]
],
[
[
4243,
4254
]
],
[
[
4705,
4721
]
]
] |
from pluto.control.modes import mode
from pluto.control.modes.processes import process_manager
from protos import broker_pb2_grpc
class LiveControlMode(mode.ControlCommandHandler):
def __init__(self, server, framework_url, process_factory):
super(LiveControlMode, self).__init__(framework_url, process_factory)
broker_pb2_grpc.add_BrokerServicer_to_server(self._broker, server)
def _create_process_manager(self):
return process_manager.LiveProcessManager()
def _accept_loop(self, loop):
# todo: only accept LiveLoop type or subtypes
return False
| [
[
[
32,
36
],
[
154,
158
]
],
[
[
79,
94
],
[
455,
470
]
],
[
[
115,
130
],
[
333,
348
]
],
[
[
138,
153
],
[
261,
276
]
]
] |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'err-stackstorm'
copyright = '2019, err-stackstorm contributors'
author = 'err-stackstorm contributors'
# The full version, including alpha/beta/rc tags
release = '2.1.4'
# -- General configuration ---------------------------------------------------
master_doc = "index"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| [
[
[
684,
691
]
],
[
[
711,
720
]
],
[
[
759,
765
]
],
[
[
848,
855
]
],
[
[
948,
958
]
],
[
[
1117,
1127
]
],
[
[
1208,
1222
]
],
[
[
1433,
1449
]
],
[
[
1704,
1714
]
],
[
[
1967,
1983
]
]
] |
import pytest
jax = pytest.importorskip("jax", minversion="0.2")
jnp = jax.numpy
import numpy as np
import pennylane as qml
from pennylane.devices.default_qubit_jax import DefaultQubitJax
pytestmark = pytest.mark.usefixtures("tape_mode")
class TestQNodeIntegration:
"""Integration tests for default.qubit.jax. This test ensures it integrates
properly with the PennyLane UI, in particular the new QNode."""
def test_defines_correct_capabilities(self):
"""Test that the device defines the right capabilities"""
dev = qml.device("default.qubit.jax", wires=1)
cap = dev.capabilities()
capabilities = {
"model": "qubit",
"supports_finite_shots": True,
"supports_tensor_observables": True,
"returns_probs": True,
"returns_state": True,
"supports_reversible_diff": False,
"supports_inverse_operations": True,
"supports_analytic_computation": True,
"passthru_interface": "jax",
}
assert cap == capabilities
def test_defines_correct_capabilities_directly_from_class(self):
"""Test that the device defines the right capabilities"""
dev = DefaultQubitJax(wires=1)
cap = dev.capabilities()
assert cap["supports_reversible_diff"] == False
assert cap["passthru_interface"] == "jax"
def test_load_device(self):
"""Test that the plugin device loads correctly"""
dev = qml.device("default.qubit.jax", wires=2)
assert dev.num_wires == 2
assert dev.shots == 1000
assert dev.analytic
assert dev.short_name == "default.qubit.jax"
assert dev.capabilities()["passthru_interface"] == "jax"
def test_qubit_circuit(self, tol):
"""Test that the device provides the correct
result for a simple circuit."""
p = jnp.array(0.543)
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
expected = -jnp.sin(p)
if not qml.tape_mode_active():
assert isinstance(circuit, qml.qnodes.PassthruQNode)
assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0)
def test_qubit_circuit_with_jit(self, tol):
"""Test that the device provides the correct
result for a simple circuit under a jax.jit."""
p = jnp.array(0.543)
dev = qml.device("default.qubit.jax", wires=1)
@jax.jit
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliY(0))
expected = -jnp.sin(p)
# Do not test isinstance here since the @jax.jit changes the function
# type.
# Just test that it works and spits our the right value.
assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0)
def test_correct_state(self, tol):
"""Test that the device state is correct after applying a
quantum function on the device"""
dev = qml.device("default.qubit.jax", wires=2)
state = dev.state
expected = jnp.array([1, 0, 0, 0])
assert jnp.allclose(state, expected, atol=tol, rtol=0)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.Hadamard(wires=0)
qml.RZ(jnp.pi / 4, wires=0)
return qml.expval(qml.PauliZ(0))
circuit()
state = dev.state
amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2)
expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0])
assert jnp.allclose(state, expected, atol=tol, rtol=0)
def test_correct_state_returned(self, tol):
"""Test that the device state is correct after applying a
quantum function on the device"""
if not qml.tape_mode_active():
pytest.skip("Only supported in tape mode")
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.Hadamard(wires=0)
qml.RZ(jnp.pi / 4, wires=0)
return qml.state()
state = circuit()
amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2)
expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0])
assert jnp.allclose(state, expected, atol=tol, rtol=0)
def test_sampling_with_jit(self):
"""Test that sampling works with a jax.jit"""
@jax.jit
def circuit(key):
dev = qml.device("default.qubit.jax", wires=1, prng_key=key)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def inner_circuit():
qml.Hadamard(0)
return qml.sample(qml.PauliZ(wires=0))
return inner_circuit()
a = circuit(jax.random.PRNGKey(0))
b = circuit(jax.random.PRNGKey(0))
c = circuit(jax.random.PRNGKey(1))
np.testing.assert_array_equal(a, b)
assert not np.all(a == c)
def test_sampling_op_by_op(self):
"""Test that op-by-op sampling works as a new user would expect"""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.Hadamard(0)
return qml.sample(qml.PauliZ(wires=0))
a = circuit()
b = circuit()
assert not np.all(a == b)
def test_gates_dont_crash(self):
"""Test for gates that weren't covered by other tests. """
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.CRZ(0.0, wires=[0, 1])
qml.CRot(1.0, 0.0, 0.0, wires=[0, 1])
qml.CRY(0.0, wires=[0, 1])
return qml.sample(qml.PauliZ(wires=0))
circuit() # Just don't crash.
def test_diagonal_doesnt_crash(self):
"""Test that diagonal gates can be used."""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax", diff_method="backprop")
def circuit():
qml.DiagonalQubitUnitary(np.array([1.0, 1.0]), wires=0)
return qml.sample(qml.PauliZ(wires=0))
circuit() # Just don't crash.
class TestPassthruIntegration:
"""Tests for integration with the PassthruQNode"""
@pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev])
def test_jacobian_variable_multiply(self, tol, jacobian_transform):
"""Test that jacobian of a QNode with an attached default.qubit.jax device
gives the correct result in the case of parameters multiplied by scalars"""
x = 0.43316321
y = 0.2162158
z = 0.75110998
weights = jnp.array([x, y, z])
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax")
def circuit(p):
qml.RX(3 * p[0], wires=0)
qml.RY(p[1], wires=0)
qml.RX(p[2] / 2, wires=0)
return qml.expval(qml.PauliZ(0))
if not qml.tape_mode_active():
assert isinstance(circuit, qml.qnodes.PassthruQNode)
res = circuit(weights)
expected = jnp.cos(3 * x) * jnp.cos(y) * jnp.cos(z / 2) - jnp.sin(3 * x) * jnp.sin(z / 2)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
grad_fn = jacobian_transform(circuit, 0)
res = grad_fn(jnp.array(weights))
expected = jnp.array(
[
-3
* (jnp.sin(3 * x) * jnp.cos(y) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.sin(z / 2)),
-jnp.cos(3 * x) * jnp.sin(y) * jnp.cos(z / 2),
-0.5
* (jnp.sin(3 * x) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.cos(y) * jnp.sin(z / 2)),
]
)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev])
def test_jacobian_repeated(self, tol, jacobian_transform):
"""Test that jacobian of a QNode with an attached default.qubit.jax device
gives the correct result in the case of repeated parameters"""
x = 0.43316321
y = 0.2162158
z = 0.75110998
p = jnp.array([x, y, z])
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, interface="jax")
def circuit(x):
qml.RX(x[1], wires=0)
qml.Rot(x[0], x[1], x[2], wires=0)
return qml.expval(qml.PauliZ(0))
res = circuit(p)
expected = jnp.cos(y) ** 2 - jnp.sin(x) * jnp.sin(y) ** 2
assert jnp.allclose(res, expected, atol=tol, rtol=0)
grad_fn = jacobian_transform(circuit, 0)
res = grad_fn(p)
expected = jnp.array(
[-jnp.cos(x) * jnp.sin(y) ** 2, -2 * (jnp.sin(x) + 1) * jnp.sin(y) * jnp.cos(y), 0]
)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
def test_state_differentiability(self, tol):
"""Test that the device state can be differentiated"""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(a):
qml.RY(a, wires=0)
return qml.expval(qml.PauliZ(0))
a = jnp.array(0.54)
def cost(a):
"""A function of the device quantum state, as a function
of ijnput QNode parameters."""
circuit(a)
res = jnp.abs(dev.state) ** 2
return res[1] - res[0]
grad = jax.grad(cost)(a)
expected = jnp.sin(a)
assert jnp.allclose(grad, expected, atol=tol, rtol=0)
def test_prob_differentiability(self, tol):
"""Test that the device probability can be differentiated"""
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(a, b):
qml.RX(a, wires=0)
qml.RY(b, wires=1)
qml.CNOT(wires=[0, 1])
return qml.probs(wires=[1])
a = jnp.array(0.54)
b = jnp.array(0.12)
def cost(a, b):
prob_wire_1 = circuit(a, b).squeeze()
return prob_wire_1[1] - prob_wire_1[0]
res = cost(a, b)
expected = -jnp.cos(a) * jnp.cos(b)
assert jnp.allclose(res, expected, atol=tol, rtol=0)
grad = jax.jit(jax.grad(cost, argnums=(0, 1)))(a, b)
expected = [jnp.sin(a) * jnp.cos(b), jnp.cos(a) * jnp.sin(b)]
assert jnp.allclose(grad, expected, atol=tol, rtol=0)
def test_backprop_gradient(self, tol):
"""Tests that the gradient of the qnode is correct"""
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(a, b):
qml.RX(a, wires=0)
qml.CRX(b, wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
a = jnp.array(-0.234)
b = jnp.array(0.654)
res = circuit(a, b)
expected_cost = 0.5 * (jnp.cos(a) * jnp.cos(b) + jnp.cos(a) - jnp.cos(b) + 1)
assert jnp.allclose(res, expected_cost, atol=tol, rtol=0)
res = jax.grad(lambda x, y: circuit(x, y).reshape(()), argnums=(0, 1))(a, b)
expected_grad = jnp.array(
[-0.5 * jnp.sin(a) * (jnp.cos(b) + 1), 0.5 * jnp.sin(b) * (1 - jnp.cos(a))]
)
assert jnp.allclose(res, expected_grad, atol=tol, rtol=0)
@pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition])
@pytest.mark.parametrize("diff_method", ["backprop"])
def test_jax_interface_gradient(self, operation, diff_method, tol):
"""Tests that the gradient of an arbitrary U3 gate is correct
using the Jax interface, using a variety of differentiation methods."""
dev = qml.device("default.qubit.jax", wires=1)
@qml.qnode(dev, diff_method=diff_method, interface="jax")
def circuit(x, weights, w=None):
"""In this example, a mixture of scalar
arguments, array arguments, and keyword arguments are used."""
qml.QubitStateVector(1j * jnp.array([1, -1]) / jnp.sqrt(2), wires=w)
operation(x, weights[0], weights[1], wires=w)
return qml.expval(qml.PauliX(w))
# Check that the correct QNode type is being used.
if not qml.tape_mode_active():
if diff_method == "backprop":
assert isinstance(circuit, qml.qnodes.PassthruQNode)
assert not hasattr(circuit, "jacobian")
else:
assert not isinstance(circuit, qml.qnodes.PassthruQNode)
assert hasattr(circuit, "jacobian")
def cost(params):
"""Perform some classical processing"""
return (circuit(params[0], params[1:], w=0) ** 2).reshape(())
theta = 0.543
phi = -0.234
lam = 0.654
params = jnp.array([theta, phi, lam])
res = cost(params)
expected_cost = (
jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi)
) ** 2
assert jnp.allclose(res, expected_cost, atol=tol, rtol=0)
res = jax.grad(cost)(params)
expected_grad = (
jnp.array(
[
jnp.sin(theta) * jnp.cos(lam) * jnp.cos(phi),
jnp.cos(theta) * jnp.cos(lam) * jnp.sin(phi) + jnp.sin(lam) * jnp.cos(phi),
jnp.cos(theta) * jnp.sin(lam) * jnp.cos(phi) + jnp.cos(lam) * jnp.sin(phi),
]
)
* 2
* (jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi))
)
assert jnp.allclose(res, expected_grad, atol=tol, rtol=0)
@pytest.mark.parametrize("interface", ["autograd", "tf", "torch"])
def test_error_backprop_wrong_interface(self, interface, tol):
"""Tests that an error is raised if diff_method='backprop' but not using
the Jax interface"""
dev = qml.device("default.qubit.jax", wires=1)
def circuit(x, w=None):
qml.RZ(x, wires=w)
return qml.expval(qml.PauliX(w))
error_type = qml.QuantumFunctionError if qml.tape_mode_active() else ValueError
with pytest.raises(
error_type,
match="default.qubit.jax only supports diff_method='backprop' when using the jax interface",
):
qml.qnode(dev, diff_method="backprop", interface=interface)(circuit)
class TestHighLevelIntegration:
"""Tests for integration with higher level components of PennyLane."""
def test_template_integration(self):
"""Test that a PassthruQNode using default.qubit.jax works with templates."""
dev = qml.device("default.qubit.jax", wires=2)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1])
return qml.expval(qml.PauliZ(0))
weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2))
grad = jax.grad(lambda a: circuit(a).reshape(()))(weights)
assert grad.shape == weights.shape
def test_qnode_collection_integration(self):
"""Test that a PassthruQNode using default.qubit.jax works with QNodeCollections."""
dev = qml.device("default.qubit.jax", wires=2)
def ansatz(weights, **kwargs):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
qml.CNOT(wires=[0, 1])
obs_list = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliZ(1)]
qnodes = qml.map(ansatz, obs_list, dev, interface="jax")
if not qml.tape_mode_active():
assert qnodes.interface == "jax"
weights = jnp.array([0.1, 0.2])
def cost(weights):
return jnp.sum(jnp.array(qnodes(weights)))
grad = jax.grad(cost)(weights)
assert grad.shape == weights.shape
def test_non_backprop_error(self):
"""Test that an error is raised in tape mode if the diff method is not backprop"""
if not qml.tape_mode_active():
pytest.skip("Test only applies in tape mode")
dev = qml.device("default.qubit.jax", wires=2)
def circuit(weights):
qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1])
return qml.expval(qml.PauliZ(0))
qnode = qml.QNode(circuit, dev, interface="jax", diff_method="parameter-shift")
weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2))
with pytest.raises(qml.QuantumFunctionError, match="The JAX interface can only be used with"):
qnode(weights)
class TestOps:
"""Unit tests for operations supported by the default.qubit.jax device"""
@pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev])
def test_multirz_jacobian(self, jacobian_transform):
"""Test that the patched numpy functions are used for the MultiRZ
operation and the jacobian can be computed."""
wires = 4
dev = qml.device("default.qubit.jax", wires=wires)
@qml.qnode(dev, diff_method="backprop", interface="jax")
def circuit(param):
qml.MultiRZ(param, wires=[0, 1])
return qml.probs(wires=list(range(wires)))
param = 0.3
res = jacobian_transform(circuit)(param)
assert jnp.allclose(res, jnp.zeros(wires ** 2))
def test_full_subsystem(self, mocker):
"""Test applying a state vector to the full subsystem"""
dev = DefaultQubitJax(wires=["a", "b", "c"])
state = jnp.array([1, 0, 0, 0, 1, 0, 1, 1]) / 2.0
state_wires = qml.wires.Wires(["a", "b", "c"])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
assert jnp.all(dev._state.flatten() == state)
spy.assert_not_called()
def test_partial_subsystem(self, mocker):
"""Test applying a state vector to a subset of wires of the full subsystem"""
dev = DefaultQubitJax(wires=["a", "b", "c"])
state = jnp.array([1, 0, 1, 0]) / jnp.sqrt(2.0)
state_wires = qml.wires.Wires(["a", "c"])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
res = jnp.sum(dev._state, axis=(1,)).flatten()
assert jnp.all(res == state)
spy.assert_called()
| [
[
[
7,
13
],
[
23,
29
],
[
211,
217
],
[
6675,
6681
],
[
8238,
8244
],
[
11949,
11955
],
[
12024,
12030
],
[
14305,
14311
],
[
17481,
17487
],
[
4076,
4082
],
[
14825,
14831
],
[
16804,
16810
],
[
17256,
17262
]
],
[
[
17,
20
],
[
75,
78
],
[
6722,
6725
],
[
6734,
6737
],
[
8285,
8288
],
[
8297,
8300
],
[
17528,
17531
],
[
17540,
17543
],
[
2637,
2640
],
[
4709,
4712
],
[
5070,
5073
],
[
5114,
5117
],
[
5158,
5161
],
[
9962,
9965
],
[
10831,
10834
],
[
10839,
10842
],
[
11667,
11670
],
[
13722,
13725
],
[
15692,
15695
],
[
16549,
16552
]
],
[
[
69,
72
],
[
1945,
1948
],
[
2190,
2193
],
[
2323,
2326
],
[
2550,
2553
],
[
2813,
2816
],
[
3002,
3005
],
[
3313,
3316
],
[
3353,
3356
],
[
3686,
3689
],
[
3700,
3703
],
[
3714,
3717
],
[
3748,
3751
],
[
3773,
3776
],
[
3814,
3817
],
[
4427,
4430
],
[
4441,
4444
],
[
4455,
4458
],
[
4489,
4492
],
[
4514,
4517
],
[
4555,
4558
],
[
7079,
7082
],
[
7548,
7551
],
[
7565,
7568
],
[
7578,
7581
],
[
7595,
7598
],
[
7612,
7615
],
[
7643,
7646
],
[
7764,
7767
],
[
7806,
7809
],
[
7872,
7875
],
[
7889,
7892
],
[
7902,
7905
],
[
7919,
7922
],
[
7936,
7939
],
[
7971,
7974
],
[
7988,
7991
],
[
8001,
8004
],
[
8059,
8062
],
[
8076,
8079
],
[
8093,
8096
],
[
8110,
8113
],
[
8123,
8126
],
[
8184,
8187
],
[
8614,
8617
],
[
8939,
8942
],
[
8957,
8960
],
[
8970,
8973
],
[
9002,
9005
],
[
9148,
9151
],
[
9174,
9177
],
[
9187,
9190
],
[
9210,
9213
],
[
9228,
9231
],
[
9241,
9244
],
[
9283,
9286
],
[
9687,
9690
],
[
10000,
10003
],
[
10027,
10030
],
[
10503,
10506
],
[
10532,
10535
],
[
10727,
10730
],
[
10740,
10743
],
[
10767,
10770
],
[
10898,
10901
],
[
10911,
10914
],
[
10923,
10926
],
[
10936,
10939
],
[
10964,
10967
],
[
11419,
11422
],
[
11450,
11453
],
[
11530,
11533
],
[
11543,
11546
],
[
11556,
11559
],
[
11569,
11572
],
[
11601,
11604
],
[
11763,
11766
],
[
11795,
11798
],
[
11809,
11812
],
[
11832,
11835
],
[
11850,
11853
],
[
11890,
11893
],
[
13448,
13451
],
[
13547,
13550
],
[
13562,
13565
],
[
13577,
13580
],
[
13594,
13597
],
[
13609,
13612
],
[
13654,
13657
],
[
13785,
13788
],
[
13836,
13839
],
[
13853,
13856
],
[
13868,
13871
],
[
13903,
13906
],
[
13920,
13923
],
[
13935,
13938
],
[
13950,
13953
],
[
13965,
13968
],
[
14000,
14003
],
[
14017,
14020
],
[
14032,
14035
],
[
14047,
14050
],
[
14062,
14065
],
[
14143,
14146
],
[
14158,
14161
],
[
14173,
14176
],
[
14190,
14193
],
[
14205,
14208
],
[
14246,
14249
],
[
15606,
15609
],
[
16423,
16426
],
[
17172,
17175
],
[
18109,
18112
],
[
18127,
18130
],
[
18333,
18336
],
[
18566,
18569
],
[
18847,
18850
],
[
18873,
18876
],
[
19070,
19073
],
[
19129,
19132
],
[
3548,
3551
],
[
4322,
4325
],
[
9884,
9887
],
[
12637,
12640
],
[
12658,
12661
],
[
16495,
16498
],
[
16503,
16506
]
],
[
[
93,
104
],
[
5190,
5192
],
[
5246,
5248
],
[
5673,
5675
],
[
6446,
6448
]
],
[
[
113,
129
],
[
11987,
11990
],
[
11995,
11998
],
[
567,
570
],
[
1536,
1539
],
[
1979,
1982
],
[
2032,
2035
],
[
2217,
2220
],
[
2281,
2284
],
[
2584,
2587
],
[
2655,
2658
],
[
3223,
3226
],
[
3413,
3416
],
[
4039,
4042
],
[
4134,
4137
],
[
4187,
4190
],
[
5393,
5396
],
[
5444,
5447
],
[
5811,
5814
],
[
5862,
5865
],
[
6277,
6280
],
[
6328,
6331
],
[
7117,
7120
],
[
7170,
7173
],
[
7404,
7407
],
[
7468,
7471
],
[
8650,
8653
],
[
8703,
8706
],
[
9460,
9463
],
[
9513,
9516
],
[
10210,
10213
],
[
10263,
10266
],
[
11135,
11138
],
[
11188,
11191
],
[
12317,
12320
],
[
12370,
12373
],
[
12863,
12866
],
[
12974,
12977
],
[
13124,
13127
],
[
14566,
14569
],
[
14772,
14775
],
[
14744,
14747
],
[
14996,
14999
],
[
15324,
15327
],
[
15377,
15380
],
[
15616,
15619
],
[
15949,
15952
],
[
16173,
16176
],
[
16189,
16192
],
[
16204,
16207
],
[
16219,
16222
],
[
16235,
16238
],
[
16268,
16271
],
[
16332,
16335
],
[
16767,
16770
],
[
16867,
16870
],
[
17081,
17084
],
[
17182,
17185
],
[
17270,
17273
],
[
17776,
17779
],
[
17833,
17836
],
[
18398,
18401
],
[
18910,
18913
],
[
2102,
2105
],
[
2141,
2144
],
[
2152,
2155
],
[
2725,
2728
],
[
2764,
2767
],
[
2775,
2778
],
[
3506,
3509
],
[
3541,
3544
],
[
3589,
3592
],
[
3600,
3603
],
[
4280,
4283
],
[
4315,
4318
],
[
4363,
4366
],
[
4763,
4766
],
[
4832,
4835
],
[
5537,
5540
],
[
5573,
5576
],
[
5584,
5587
],
[
5955,
5958
],
[
5995,
5998
],
[
6046,
6049
],
[
6093,
6096
],
[
6104,
6107
],
[
6421,
6424
],
[
6497,
6500
],
[
6508,
6511
],
[
7240,
7243
],
[
7279,
7282
],
[
7314,
7317
],
[
7360,
7363
],
[
7371,
7374
],
[
8773,
8776
],
[
8808,
8811
],
[
8863,
8866
],
[
8874,
8877
],
[
9607,
9610
],
[
9646,
9649
],
[
9657,
9660
],
[
10360,
10363
],
[
10392,
10395
],
[
10424,
10427
],
[
10467,
10470
],
[
11285,
11288
],
[
11317,
11320
],
[
11362,
11365
],
[
11373,
11376
],
[
11389,
11392
],
[
12611,
12614
],
[
12759,
12762
],
[
12770,
12773
],
[
14655,
14658
],
[
14694,
14697
],
[
14705,
14708
],
[
15477,
15480
],
[
15559,
15562
],
[
15570,
15573
],
[
16045,
16048
],
[
16086,
16089
],
[
16127,
16130
],
[
16954,
16957
],
[
17036,
17039
],
[
17047,
17050
],
[
17931,
17934
],
[
17984,
17987
],
[
4939,
4942
],
[
4979,
4982
],
[
4990,
4993
]
],
[
[
179,
194
],
[
1260,
1275
],
[
18277,
18292
],
[
18791,
18806
]
],
[
[
198,
208
]
],
[
[
259,
279
]
],
[
[
6586,
6609
]
],
[
[
15076,
15100
]
],
[
[
17385,
17392
]
]
] |
import pytest
from pytest_mock_resources.fixture.database.generic import assign_fixture_credentials
from pytest_mock_resources.fixture.database.relational.generic import EngineManager
from pytest_mock_resources.fixture.database.relational.postgresql import (
_create_clean_database,
get_sqlalchemy_engine,
)
from pytest_mock_resources.patch.redshift import psycopg2, sqlalchemy
def create_redshift_fixture(*ordered_actions, scope="function", tables=None, session=None):
"""Produce a Redshift fixture.
Any number of fixture functions can be created. Under the hood they will all share the same
database server.
Arguments:
ordered_actions: Any number of ordered actions to be run on test setup.
scope: Passthrough pytest's fixture scope.
tables: Subsets the tables created by `ordered_actions`. This is generally
most useful when a model-base was specified in `ordered_actions`.
session: Whether to return a session instead of an engine directly. This can
either be a bool or a callable capable of producing a session.
"""
from pytest_mock_resources.fixture.database.relational.redshift.udf import REDSHIFT_UDFS
ordered_actions = ordered_actions + (REDSHIFT_UDFS,)
@pytest.fixture(scope=scope)
def _(_redshift_container, pmr_postgres_config):
database_name = _create_clean_database(pmr_postgres_config)
engine = get_sqlalchemy_engine(pmr_postgres_config, database_name)
assign_fixture_credentials(
engine,
drivername="postgresql+psycopg2",
host=pmr_postgres_config.host,
port=pmr_postgres_config.port,
database=database_name,
username=pmr_postgres_config.username,
password=pmr_postgres_config.password,
)
engine = sqlalchemy.substitute_execute_with_custom_execute(engine)
engine_manager = EngineManager(
engine, ordered_actions, tables=tables, default_schema="public"
)
with psycopg2.patch_connect(pmr_postgres_config):
for engine in engine_manager.manage(session=session):
yield engine
return _
| [
[
[
7,
13
],
[
1268,
1274
]
],
[
[
74,
100
],
[
1501,
1527
]
],
[
[
171,
184
],
[
1930,
1943
]
],
[
[
264,
286
],
[
1373,
1395
]
],
[
[
292,
313
],
[
1434,
1455
]
],
[
[
366,
374
],
[
2045,
2053
]
],
[
[
376,
386
],
[
1847,
1857
]
],
[
[
393,
416
]
]
] |
from __future__ import annotations
from typing import NoReturn
from . import LinearRegression
from ...base import BaseEstimator
import numpy as np
class PolynomialFitting(BaseEstimator):
"""
Polynomial Fitting using Least Squares estimation
"""
def __init__(self, k: int) -> PolynomialFitting:
"""
Instantiate a polynomial fitting estimator
Parameters
----------
k : int
Degree of polynomial to fit
"""
super().__init__()
self.degree = k
self.linear_regression_model = LinearRegression(
include_intercept=False)
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to polynomial transformed samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
x = self.__transform(X)
self.linear_regression_model.fit(x, y)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
x = self.__transform(X)
return self.linear_regression_model.predict(x)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
x = self.__transform(X)
return self.linear_regression_model.loss(x, y)
def __transform(self, X: np.ndarray) -> np.ndarray:
"""
Transform given input according to the univariate polynomial
transformation
Parameters
----------
X: ndarray of shape (n_samples,)
Returns
-------
transformed: ndarray of shape (n_samples, k+1)
Vandermonde matrix of given samples up to degree k
"""
return np.vander(X, N=self.degree+1, increasing=True) | [
[
[
23,
34
]
],
[
[
54,
62
],
[
680,
688
]
],
[
[
77,
93
],
[
572,
588
]
],
[
[
114,
127
],
[
173,
186
]
],
[
[
135,
146
],
[
650,
652
],
[
665,
667
],
[
1130,
1132
],
[
1115,
1117
],
[
1617,
1619
],
[
1632,
1634
],
[
2166,
2168
],
[
2151,
2153
],
[
2540,
2542
]
],
[
[
155,
172
],
[
293,
310
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class PriceInformation(object):
def __init__(self):
self._amount = None
self._type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PriceInformation()
if 'amount' in d:
o.amount = d['amount']
if 'type' in d:
o.type = d['type']
return o
| [
[
[
53,
57
]
],
[
[
110,
111
]
],
[
[
120,
136
],
[
1084,
1100
]
]
] |
# Copyright 2020 The StackStorm Authors.
# Copyright (C) 2020 Extreme Networks, Inc - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2tests.fixturesloader import FixturesLoader
from st2api.controllers.v1.traces import TracesController
from tests.base import APIControllerWithRBACTestCase
from st2tests.api import APIControllerWithIncludeAndExcludeFilterTestCase
http_client = six.moves.http_client
__all__ = [
'TraceControllerRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = {
'traces': ['trace_for_test_enforce.yaml', 'trace_for_test_enforce_2.yaml',
'trace_for_test_enforce_3.yaml'],
}
class TraceControllerRBACTestCase(APIControllerWithRBACTestCase,
APIControllerWithIncludeAndExcludeFilterTestCase):
# Attributes used by APIControllerWithIncludeAndExcludeFilterTestCase
get_all_path = '/v1/traces'
controller_cls = TracesController
include_attribute_field_name = 'trace_tag'
exclude_attribute_field_name = 'start_timestamp'
rbac_enabled = True
fixtures_loader = FixturesLoader()
def setUp(self):
super(TraceControllerRBACTestCase, self).setUp()
self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
file_name = 'trace_for_test_enforce.yaml'
TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'traces': [file_name]})['traces'][file_name]
file_name = 'trace_for_test_enforce_2.yaml'
TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'traces': [file_name]})['traces'][file_name]
file_name = 'trace_for_test_enforce_3.yaml'
TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'traces': [file_name]})['traces'][file_name]
# Insert mock users, roles and assignments
# Users
user_1_db = UserDB(name='trace_list')
user_1_db = User.add_or_update(user_1_db)
self.users['trace_list'] = user_1_db
user_2_db = UserDB(name='trace_view')
user_2_db = User.add_or_update(user_2_db)
self.users['trace_view'] = user_2_db
# Roles
# trace_list
grant_db = PermissionGrantDB(resource_uid=None,
resource_type=ResourceType.TRACE,
permission_types=[PermissionType.TRACE_LIST])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_1_db = RoleDB(name='trace_list', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['trace_list'] = role_1_db
# trace_view on trace 1
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
grant_db = PermissionGrantDB(resource_uid=trace_uid,
resource_type=ResourceType.TRACE,
permission_types=[PermissionType.TRACE_VIEW])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_1_db = RoleDB(name='trace_view', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['trace_view'] = role_1_db
# Role assignments
role_assignment_db = UserRoleAssignmentDB(
user=self.users['trace_list'].name,
role=self.roles['trace_list'].name,
source='assignments/%s.yaml' % self.users['trace_list'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
role_assignment_db = UserRoleAssignmentDB(
user=self.users['trace_view'].name,
role=self.roles['trace_view'].name,
source='assignments/%s.yaml' % self.users['trace_view'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_get_all_no_permissions(self):
user_db = self.users['no_permissions']
self.use_user(user_db)
resp = self.app.get('/v1/traces', expect_errors=True)
expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_list"')
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_one_no_permissions(self):
user_db = self.users['no_permissions']
self.use_user(user_db)
trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True)
expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_view"'
' on resource "%s"' % (trace_uid))
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_all_permission_success_get_one_no_permission_failure(self):
user_db = self.users['trace_list']
self.use_user(user_db)
# trace_list permission, but no trace_view permission
resp = self.app.get('/v1/traces')
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(len(resp.json), 3)
trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True)
expected_msg = ('User "trace_list" doesn\'t have required permission "trace_view"'
' on resource "%s"' % (trace_uid))
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_get_one_permission_success_get_all_no_permission_failure(self):
user_db = self.users['trace_view']
self.use_user(user_db)
# trace_view permission, but no trace_list permission
trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id
trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid()
resp = self.app.get('/v1/traces/%s' % (trace_id))
self.assertEqual(resp.status_code, http_client.OK)
self.assertEqual(resp.json['uid'], trace_uid)
resp = self.app.get('/v1/traces', expect_errors=True)
expected_msg = ('User "trace_view" doesn\'t have required permission "trace_list"')
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def _insert_mock_models(self):
trace_ids = [trace['id'] for trace in self.models['traces'].values()]
return trace_ids
| [
[
[
660,
663
],
[
1413,
1416
]
],
[
[
698,
712
],
[
3697,
3711
],
[
4312,
4326
]
],
[
[
746,
758
],
[
3622,
3634
],
[
4237,
4249
]
],
[
[
798,
802
],
[
3260,
3264
],
[
3402,
3406
]
],
[
[
842,
846
],
[
3934,
3938
],
[
4549,
4553
]
],
[
[
886,
904
],
[
4881,
4899
],
[
5164,
5182
]
],
[
[
944,
959
],
[
3744,
3759
],
[
4359,
4374
]
],
[
[
997,
1003
],
[
3214,
3220
],
[
3356,
3362
]
],
[
[
1041,
1047
],
[
3851,
3857
],
[
4466,
4472
]
],
[
[
1085,
1105
],
[
4681,
4701
],
[
4964,
4984
]
],
[
[
1143,
1160
],
[
3534,
3551
],
[
4144,
4161
]
],
[
[
1197,
1211
],
[
2103,
2117
]
],
[
[
1253,
1269
],
[
1939,
1955
]
],
[
[
1294,
1323
],
[
1695,
1724
]
],
[
[
1349,
1397
],
[
1760,
1808
]
],
[
[
1399,
1410
],
[
5541,
5552
],
[
6185,
6196
],
[
6573,
6584
],
[
7063,
7074
],
[
7626,
7637
],
[
7894,
7905
]
],
[
[
1436,
1443
]
],
[
[
1485,
1498
],
[
2276,
2289
],
[
2542,
2555
],
[
2790,
2803
],
[
3038,
3051
]
],
[
[
1511,
1524
],
[
2368,
2381
]
],
[
[
1667,
1694
],
[
2156,
2183
],
[
2442,
2469
],
[
2690,
2717
],
[
2938,
2965
]
]
] |
import requests
import json
url = "https://www.cbr-xml-daily.ru/daily_json.js"
response = requests.get(url)
data = json.loads(response.text)
print(data)
| [
[
[
7,
15
],
[
91,
99
]
],
[
[
23,
27
],
[
116,
120
]
],
[
[
29,
32
],
[
104,
107
]
],
[
[
80,
88
],
[
127,
135
]
],
[
[
109,
113
],
[
148,
152
]
]
] |
from django.db import models
from authors import settings
from authors.apps.articles.models import Article
from authors.apps.profiles.models import Profile
# Create your models here.
class ReportArticle(models.Model):
"""model for reporting an article"""
reporter = models.ForeignKey(Profile, on_delete=models.CASCADE)
article = models.ForeignKey(Article, to_field="slug", on_delete=models.CASCADE)
violation_subject = models.CharField(max_length=100, blank=False, null=False)
violation_report = models.CharField(max_length=300, blank=True, null=True)
report_status = models.CharField(max_length=20, default='pending')
submission_date = models.DateTimeField(auto_now_add=True, editable=False)
| [
[
[
22,
28
],
[
204,
210
],
[
275,
281
],
[
312,
318
],
[
342,
348
],
[
396,
402
],
[
436,
442
],
[
517,
523
],
[
593,
599
],
[
666,
672
]
],
[
[
49,
57
]
],
[
[
99,
106
],
[
360,
367
]
],
[
[
148,
155
],
[
293,
300
]
],
[
[
190,
203
]
]
] |
# -*- coding: utf-8 -*-
import re
from six.moves import http_client
from six.moves import urllib
from wsgiref.headers import Headers
class Request(object):
def __init__(self, environ):
self.environ = environ
@property
def path(self):
return self.environ['PATH_INFO']
@property
def args(self):
""" 把查询参数转成字典形式 """
get_arguments = urllib.parse.parse_qs(self.environ['QUERY_STRING'])
return {k: v[0] for k, v in get_arguments.items()}
class Response(object):
def __init__(self, response=None, status=200, charset='utf-8', content_type='text/html'):
self.response = [] if response is None else response
self.charset = charset
self.headers = Headers([])
content_type = '{content_type}; charset={charset}'.format(content_type=content_type, charset=charset)
self.headers.add_header('content-type', content_type)
self._status = status
@property
def status(self):
status_string = http_client.responses.get(self._status, 'UNKNOWN')
return '{status} {status_string}'.format(status=self._status, status_string=status_string)
def __iter__(self):
for val in self.response:
if isinstance(val, bytes):
yield val
else:
yield val.encode(self.charset)
# 试试结合了 Resquest 和 Response 的新 application:
def request_response_application(func):
def application(environ, start_response):
request = Request(environ)
response = func(request)
start_response(
response.status,
response.headers.items()
)
return iter(response)
return application
class NotFoundError(Exception):
""" url pattern not found """
pass
class DecoratorRouter:
def __init__(self):
self.routing_table = [] # 保存 url pattern 和 可调用对象
def match(self, path):
for (pattern, callback) in self.routing_table:
m = re.match(pattern, path)
if m:
return (callback, m.groups())
raise NotFoundError()
def __call__(self, pattern):
def _(func):
self.routing_table.append((pattern, func))
return _
routers = DecoratorRouter()
@routers(r'/hello/(.*)/$')
def hello(request, name):
return Response("<h1>Hello, {name}</h1>".format(name=name))
@routers(r'/goodbye/(.*)/$')
def goodbye(request, name):
return Response("<h1>Goodbye, {name}</h1>".format(name=name))
class Application(object):
def __init__(self, routers, **kwargs):
self.routers = routers
def __call__(self, environ, start_response):
try:
request = Request(environ)
callback, args = routers.match(request.path)
response = callback(request, *args)
except NotFoundError:
response = Response("<h1>Not found</h1>", status=404)
start_response(response.status, response.headers.items())
return iter(response)
application = Application(routers)
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('127.0.0.1', 8000, application)
httpd.serve_forever()
| [
[
[
32,
34
],
[
1984,
1986
]
],
[
[
57,
68
],
[
1007,
1018
]
],
[
[
91,
97
],
[
386,
392
]
],
[
[
126,
133
],
[
732,
739
]
],
[
[
142,
149
],
[
2692,
2699
],
[
1496,
1503
]
],
[
[
505,
513
],
[
2325,
2333
],
[
2448,
2456
],
[
2867,
2875
]
],
[
[
1396,
1424
]
],
[
[
1707,
1720
],
[
2086,
2099
],
[
2829,
2842
]
],
[
[
1784,
1799
],
[
2241,
2256
]
],
[
[
2231,
2238
],
[
2262,
2269
],
[
2381,
2388
],
[
3034,
3041
],
[
2738,
2745
]
],
[
[
2292,
2297
]
],
[
[
2413,
2420
]
],
[
[
2511,
2522
],
[
3022,
3033
]
],
[
[
3008,
3019
],
[
3164,
3175
]
],
[
[
3109,
3120
],
[
3133,
3144
]
],
[
[
3125,
3130
],
[
3181,
3186
]
]
] |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_ips_rule_settings
short_description: Configure IPS rule setting in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify ips feature and rule_settings category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
ips_rule_settings:
description:
- Configure IPS rule setting.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
id:
description:
- Rule ID.
required: true
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPS rule setting.
fortios_ips_rule_settings:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
ips_rule_settings:
id: "3"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_ips_rule_settings_data(json):
option_list = ['id']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def ips_rule_settings(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['ips_rule_settings'] and data['ips_rule_settings']:
state = data['ips_rule_settings']['state']
else:
state = True
ips_rule_settings_data = data['ips_rule_settings']
filtered_data = underscore_to_hyphen(filter_ips_rule_settings_data(ips_rule_settings_data))
if state == "present":
return fos.set('ips',
'rule-settings',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('ips',
'rule-settings',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_ips(data, fos):
if data['ips_rule_settings']:
resp = ips_rule_settings(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"ips_rule_settings": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"id": {"required": True, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_ips(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_ips(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
[
[
42,
57
]
],
[
[
59,
67
]
],
[
[
69,
83
]
],
[
[
755,
768
]
],
[
[
777,
793
]
],
[
[
917,
930
]
],
[
[
3667,
3675
]
],
[
[
4098,
4104
]
],
[
[
5400,
5413
],
[
8611,
8624
]
],
[
[
5458,
5468
],
[
9116,
9126
]
],
[
[
5526,
5540
],
[
9166,
9180
]
],
[
[
5602,
5617
],
[
9315,
9330
]
],
[
[
5624,
5629
],
[
9532,
9537
]
],
[
[
5958,
5987
],
[
6934,
6963
]
],
[
[
6218,
6238
],
[
6322,
6342
],
[
6482,
6502
],
[
6913,
6933
]
],
[
[
6553,
6570
],
[
7595,
7612
]
],
[
[
7364,
7384
],
[
7640,
7660
]
],
[
[
7521,
7532
],
[
9238,
9249
],
[
9598,
9609
]
],
[
[
7728,
7732
],
[
9834,
9838
]
]
] |
import torch
import torchvision
# An instance of your model.
model = torchvision.models.resnet18()
# An example input you would normally provide to your model's forward() method.
example = torch.rand(1, 3, 224, 224)
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, example)
# save
traced_script_module.save("model.pt")
| [
[
[
7,
12
],
[
191,
196
],
[
314,
319
]
],
[
[
20,
31
],
[
70,
81
]
],
[
[
62,
67
],
[
330,
335
]
],
[
[
181,
188
],
[
337,
344
]
],
[
[
291,
311
],
[
354,
374
]
]
] |
import glob
import logging
import os
from typing import Any, Dict, List, Optional
from django.conf import settings
from zerver.lib.storage import static_path
# See https://jackstromberg.com/2013/01/useraccountcontrol-attributeflag-values/
# for docs on what these values mean.
LDAP_USER_ACCOUNT_CONTROL_NORMAL = '512'
LDAP_USER_ACCOUNT_CONTROL_DISABLED = '514'
def generate_dev_ldap_dir(mode: str, num_users: int=8) -> Dict[str, Dict[str, Any]]:
mode = mode.lower()
ldap_data = []
for i in range(1, num_users+1):
name = 'LDAP User %d' % (i,)
email = 'ldapuser%d@zulip.com' % (i,)
phone_number = '999999999%d' % (i,)
birthdate = '19%02d-%02d-%02d' % (i, i, i)
ldap_data.append((name, email, phone_number, birthdate))
profile_images = [open(path, "rb").read() for path in
glob.glob(os.path.join(static_path("images/team"), "*"))]
ldap_dir = {}
for i, user_data in enumerate(ldap_data):
email = user_data[1].lower()
email_username = email.split('@')[0]
common_data = {
'cn': [user_data[0]],
'userPassword': [email_username],
'phoneNumber': [user_data[2]],
'birthDate': [user_data[3]],
}
if mode == 'a':
ldap_dir['uid=' + email + ',ou=users,dc=zulip,dc=com'] = dict(
uid=[email],
thumbnailPhoto=[profile_images[i % len(profile_images)]],
userAccountControl=[LDAP_USER_ACCOUNT_CONTROL_NORMAL],
**common_data)
elif mode == 'b':
ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict(
uid=[email_username],
jpegPhoto=[profile_images[i % len(profile_images)]],
**common_data)
elif mode == 'c':
ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict(
uid=[email_username],
email=[email],
**common_data)
return ldap_dir
def init_fakeldap(directory: Optional[Dict[str, Dict[str, List[str]]]]=None) -> None: # nocoverage
# We only use this in development. Importing mock inside
# this function is an import time optimization, which
# avoids the expensive import of the mock module (slow
# because its dependency pbr uses pkgresources, which is
# really slow to import.)
from unittest import mock
from fakeldap import MockLDAP
# Silent `django_auth_ldap` logger in dev mode to avoid
# spammy user not found log messages.
ldap_auth_logger = logging.getLogger('django_auth_ldap')
ldap_auth_logger.setLevel(logging.CRITICAL)
fakeldap_logger = logging.getLogger('fakeldap')
fakeldap_logger.setLevel(logging.CRITICAL)
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = directory or generate_dev_ldap_dir(settings.FAKE_LDAP_MODE,
settings.FAKE_LDAP_NUM_USERS)
| [
[
[
7,
11
],
[
853,
857
]
],
[
[
19,
26
],
[
2600,
2607
],
[
2668,
2675
],
[
2709,
2716
],
[
2768,
2775
]
],
[
[
34,
36
],
[
863,
865
]
],
[
[
56,
59
],
[
443,
446
]
],
[
[
61,
65
],
[
423,
427
],
[
433,
437
],
[
2077,
2081
],
[
2087,
2091
]
],
[
[
67,
71
],
[
2097,
2101
]
],
[
[
73,
81
],
[
2068,
2076
]
],
[
[
107,
115
],
[
3037,
3045
],
[
3123,
3131
]
],
[
[
148,
159
],
[
876,
887
]
],
[
[
280,
312
],
[
1493,
1525
]
],
[
[
321,
355
]
],
[
[
369,
390
],
[
3015,
3036
]
],
[
[
2043,
2056
]
]
] |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.core.paginator import Paginator
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.views.decorators.cache import cache_page
from django.db.models import Count
from .forms import CreatePost, CreateComment
from .models import Post, User, Comment, Follow
def _create_paginator(request, post):
paginator = Paginator(post, 10)
page_number = request.GET.get("page")
page = paginator.get_page(page_number)
return page, paginator
def _search_text(request):
keyword = request.GET.get("q", None)
posts_list = Post.objects.select_related(
"author", "group").filter(
text__contains=keyword
).prefetch_related("comments")
data_paginator = _create_paginator(request, posts_list)
return data_paginator
@cache_page(20, key_prefix="index_page")
def index(request):
if request.GET.get("q") is None:
posts_list = Post.objects.order_by("-pub_date")\
.all()\
.select_related("author", "group", )\
.prefetch_related("comments",)
data_paginator = _create_paginator(request, posts_list)
else:
data_paginator = _search_text(request)
return render(request, "index.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"title": "Последние обновления",
"description": "Последние обновления на сайте",
"changing_it": "index"})
@login_required
def new_post(request):
content = {"title_name": "Новый пост", "btn_name": "Добавить пост"}
if request.method == "POST":
form = CreatePost(request.POST, files=request.FILES or None)
if form.is_valid():
author = request.user
form.cleaned_data['author'] = author
date_clean = form.cleaned_data
post = Post.objects.create(**date_clean)
messages.success(request, "Пост добавлен")
return redirect("index")
else:
form = CreatePost()
return render(request, "add_post.html", {"form": form, "content": content})
def profile(request, username):
user_name = get_object_or_404(User, username=username)
following = None
if request.user != AnonymousUser():
following = Follow.objects.filter(user=request.user, author=user_name)
print(following)
posts = Post.objects.filter(author_id__username=user_name)\
.select_related("author", "group")\
.prefetch_related("comments")
data_paginator = _create_paginator(request, posts)
return render(request, "profile.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"author": user_name,
"following": following})
def post_view(request, username, post_id):
profile_person = get_object_or_404(User, username=username)
print(type(profile_person))
select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id)
# comments = select_post.comments.all()
comments = list(Comment.objects.filter(post_id=post_id).select_related("author", "post"))
return render(request, "post.html", {"user_post": select_post,
"author": profile_person,
"comments": comments})
def post_edit(request, username, post_id):
content = {"title_name": "Редактировать запись", "btn_name": "Сохранить"}
profile_person = get_object_or_404(User, username=username)
select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id)
if request.user != profile_person:
return redirect("post", username=username, post_id=post_id)
form = CreatePost(request.POST or None,
instance=select_post,
files=request.FILES or None)
if form.is_valid():
form.save()
print("Post can editable")
return redirect("post", username=username, post_id=post_id)
return render(request, "add_post.html", {"form": form,
"selected_post": select_post,
"content": content})
def page_not_found(request, exeption):
return render(request, "misc/404.html", {"path": request.path}, status=404)
def server_error(request):
return render(request, "misc/500.html", status=500)
@login_required
def add_comment(request, username, post_id):
profile_person = get_object_or_404(User, username=username)
select_post = get_object_or_404(Post, pk=post_id, author=profile_person)
if request.method == "POST":
form = CreateComment(request.POST)
print(form)
if form.is_valid():
author = request.user
form.cleaned_data["post"] = select_post
form.cleaned_data["author"] = author
data_clean = form.cleaned_data
comment = Comment.objects.create(**data_clean)
messages.success(request, "Коммент поставлен")
return redirect("post", username=username, post_id=post_id)
else:
form = CreateComment()
return render(request, "comments.html", {"form": form})
@login_required
def follow_index(request):
my_follow = Post.objects.filter(author__following__user=request.user)\
.select_related("author", "group")\
.prefetch_related("comments")
data_paginator = _create_paginator(request, my_follow)
return render(request, "index.html", {"page": data_paginator[0],
"paginator": data_paginator[1],
"title": "Подписки",
"description": "Последние обновления твоих людей",
"changing_it": "follow"})
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if request.user != author:
Follow.objects.get_or_create(author=author, user=request.user)
return redirect("profile", username=username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
if request.user != author:
Follow.objects.filter(author=author, user=request.user).delete()
return redirect('profile', username=username) | [
[
[
43,
57
],
[
1698,
1712
],
[
4696,
4710
],
[
5493,
5507
],
[
6126,
6140
],
[
6391,
6405
]
],
[
[
97,
110
],
[
2464,
2477
]
],
[
[
145,
154
],
[
494,
503
]
],
[
[
184,
190
],
[
1331,
1337
],
[
2258,
2264
],
[
2797,
2803
],
[
3438,
3444
],
[
4298,
4304
],
[
4539,
4545
],
[
4648,
4654
],
[
5441,
5447
],
[
5764,
5770
]
],
[
[
192,
200
],
[
2191,
2199
],
[
3946,
3954
],
[
4233,
4241
],
[
5336,
5344
],
[
6349,
6357
],
[
6618,
6626
]
],
[
[
202,
219
],
[
2377,
2394
],
[
3133,
3150
],
[
3226,
3243
],
[
3769,
3786
],
[
3830,
3847
],
[
4777,
4794
],
[
4838,
4855
],
[
6193,
6210
],
[
6460,
6477
]
],
[
[
247,
255
],
[
2129,
2137
],
[
5270,
5278
]
],
[
[
298,
308
],
[
931,
941
]
],
[
[
338,
343
]
],
[
[
364,
374
],
[
1856,
1866
],
[
2234,
2244
],
[
4011,
4021
]
],
[
[
376,
389
],
[
4945,
4958
],
[
5414,
5427
]
],
[
[
410,
414
],
[
713,
717
],
[
1049,
1053
],
[
2083,
2087
],
[
2597,
2601
],
[
3244,
3248
],
[
3848,
3852
],
[
4856,
4860
],
[
5551,
5555
]
],
[
[
416,
420
],
[
2395,
2399
],
[
3151,
3155
],
[
3787,
3791
],
[
4795,
4799
],
[
6211,
6215
],
[
6478,
6482
]
],
[
[
422,
429
],
[
3352,
3359
],
[
5221,
5228
]
],
[
[
431,
437
],
[
2501,
2507
],
[
6275,
6281
],
[
6542,
6548
]
],
[
[
444,
461
],
[
864,
881
],
[
1223,
1240
],
[
2752,
2769
],
[
5714,
5731
]
],
[
[
632,
644
],
[
1297,
1309
]
],
[
[
975,
980
]
],
[
[
1717,
1725
]
],
[
[
2333,
2340
]
],
[
[
3073,
3082
]
],
[
[
3631,
3640
]
],
[
[
4493,
4507
]
],
[
[
4614,
4626
]
],
[
[
4715,
4726
]
],
[
[
5512,
5524
]
],
[
[
6145,
6159
]
],
[
[
6410,
6426
]
]
] |
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo import MongoClient
__all__ = ['PymongoConnection', 'MotorConnection']
class PymongoConnection:
def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None):
"""Create database connection."""
if user and password:
self.db_client = MongoClient(f"mongodb://{user}:{password}@{host}:{port}")
else:
self.db_client = MongoClient(f"mongodb://{host}:{port}")
self.db_name = db
def get_db_client(self) -> MongoClient:
"""Return database client instance."""
return self.db_client
def get_db(self):
"""Return database instance."""
return self.get_db_client()[self.db_name]
def close_db(self):
"""Close database connection."""
self.db_client.close()
class MotorConnection:
def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None):
"""Create database connection."""
if user and password:
self.db_client = AsyncIOMotorClient(f"mongodb://{user}:{password}@{host}:{port}")
else:
self.db_client = AsyncIOMotorClient(f"mongodb://{host}:{port}")
self.db_name = db
def get_db_client(self) -> AsyncIOMotorClient:
"""Return database client instance."""
return self.db_client
def get_db(self):
"""Return database instance."""
return self.get_db_client()[self.db_name]
def close_db(self):
"""Close database connection."""
self.db_client.close() | [
[
[
33,
51
],
[
1081,
1099
],
[
1189,
1207
],
[
1294,
1312
]
],
[
[
72,
83
],
[
360,
371
],
[
461,
472
],
[
559,
570
]
],
[
[
86,
93
]
],
[
[
144,
161
]
],
[
[
867,
882
]
]
] |
import base64
import json
import os
import os.path
import shlex
import string
from datetime import datetime
from distutils.version import StrictVersion
from .. import errors
from .. import tls
from ..constants import DEFAULT_HTTP_HOST
from ..constants import DEFAULT_UNIX_SOCKET
from ..constants import DEFAULT_NPIPE
from ..constants import BYTE_UNITS
from urllib.parse import splitnport, urlparse
def create_ipam_pool(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_pool has been removed. Please use a '
'docker.types.IPAMPool object instead.'
)
def create_ipam_config(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_config has been removed. Please use a '
'docker.types.IPAMConfig object instead.'
)
def decode_json_header(header):
data = base64.b64decode(header)
data = data.decode('utf-8')
return json.loads(data)
def compare_version(v1, v2):
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
def version_lt(v1, v2):
return compare_version(v1, v2) > 0
def version_gte(v1, v2):
return not version_lt(v1, v2)
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
elif isinstance(binding, dict):
if 'HostPort' in binding:
result['HostPort'] = binding['HostPort']
if 'HostIp' in binding:
result['HostIp'] = binding['HostIp']
else:
raise ValueError(binding)
else:
result['HostPort'] = binding
if result['HostPort'] is None:
result['HostPort'] = ''
else:
result['HostPort'] = str(result['HostPort'])
return result
def convert_port_bindings(port_bindings):
result = {}
for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(binds):
if isinstance(binds, list):
return binds
result = []
for k, v in binds.items():
if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
'Binding cannot contain both "ro" and "mode": {}'
.format(repr(v))
)
bind = v['bind']
if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
mode = v['mode']
else:
mode = 'rw'
result.append(
str('{0}:{1}:{2}').format(k, bind, mode)
)
else:
if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
str('{0}:{1}:rw').format(k, v)
)
return result
def convert_tmpfs_mounts(tmpfs):
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
'Expected tmpfs value to be either a list or a dict, found: {}'
.format(type(tmpfs).__name__)
)
result = {}
for mount in tmpfs:
if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
"Expected item in tmpfs list to be a string, found: {}"
.format(type(mount).__name__)
)
result[name] = options
return result
def convert_service_networks(networks):
if not networks:
return networks
if not isinstance(networks, list):
raise TypeError('networks parameter must be a list.')
result = []
for n in networks:
if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
def parse_host(addr, is_win32=False, tls=False):
path = ''
port = None
host = None
# Sensible defaults
if not addr and is_win32:
return DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
parsed_url = urlparse(addr)
proto = parsed_url.scheme
if not proto or any([x not in string.ascii_letters + '+' for x in proto]):
# https://bugs.python.org/issue754016
parsed_url = urlparse('//' + addr, 'tcp')
proto = 'tcp'
if proto == 'fd':
raise errors.DockerException('fd protocol is not implemented')
# These protos are valid aliases for our library but not for the
# official spec
if proto == 'http' or proto == 'https':
tls = proto == 'https'
proto = 'tcp'
elif proto == 'http+unix':
proto = 'unix'
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
"Invalid bind address protocol: {}".format(addr)
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
)
if any([
parsed_url.params, parsed_url.query, parsed_url.fragment,
parsed_url.password
]):
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
)
if parsed_url.path and proto == 'ssh':
raise errors.DockerException(
'Invalid bind address format: no path allowed for this protocol:'
' {}'.format(addr)
)
else:
path = parsed_url.path
if proto == 'unix' and parsed_url.hostname is not None:
# For legacy reasons, we consider unix://path
# to be valid and equivalent to unix:///path
path = '/'.join((parsed_url.hostname, path))
if proto in ('tcp', 'ssh'):
# parsed_url.hostname strips brackets from IPv6 addresses,
# which can be problematic hence our use of splitnport() instead.
host, port = splitnport(parsed_url.netloc)
if port is None or port < 0:
if proto != 'ssh':
raise errors.DockerException(
'Invalid bind address format: port is required:'
' {}'.format(addr)
)
port = 22
if not host:
host = DEFAULT_HTTP_HOST
# Rewrite schemes to fit library internals (requests adapters)
if proto == 'tcp':
proto = 'http{}'.format('s' if tls else '')
elif proto == 'unix':
proto = 'http+unix'
if proto in ('http+unix', 'npipe'):
return "{}://{}".format(proto, path).rstrip('/')
return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
def parse_devices(devices):
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, str):
raise errors.DockerException(
'Invalid device type {0}'.format(type(device))
)
device_mapping = device.split(':')
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = 'rwm'
device_list.append({
'PathOnHost': path_on_host,
'PathInContainer': path_in_container,
'CgroupPermissions': permissions
})
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not environment:
environment = os.environ
host = environment.get('DOCKER_HOST')
# empty string for cert path is the same as unset.
cert_path = environment.get('DOCKER_CERT_PATH') or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify = environment.get('DOCKER_TLS_VERIFY')
if tls_verify == '':
tls_verify = False
else:
tls_verify = tls_verify is not None
enable_tls = cert_path or tls_verify
params = {}
if host:
params['base_url'] = host
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return params
def convert_filters(filters):
result = {}
for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = [
str(item) if not isinstance(item, str) else item
for item in v
]
return json.dumps(result)
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp"""
delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s):
if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = float(digits_part)
except ValueError:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
' an integer.'.format(digits_part)
)
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
return s
def normalize_links(links):
if isinstance(links, dict):
links = iter(links.items())
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, 'r') as f:
for line in f:
if line[0] == '#':
continue
line = line.strip()
if not line:
continue
parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment
def split_command(command):
return shlex.split(command)
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, bytes):
value = value.decode('utf-8')
return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
'{} {}'.format(v, k) for k, v in sorted(iter(extra_hosts.items()))
]
return [
'{}:{}'.format(k, v) for k, v in sorted(iter(extra_hosts.items()))
]
def create_host_config(self, *args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_host_config has been removed. Please use a '
'docker.types.HostConfig object instead.'
)
| [
[
[
7,
13
],
[
841,
847
]
],
[
[
21,
25
],
[
909,
913
],
[
10589,
10593
]
],
[
[
33,
35
]
],
[
[
43,
50
],
[
9040,
9042
],
[
9661,
9663
],
[
9674,
9676
],
[
9969,
9971
],
[
10027,
10029
],
[
10080,
10082
]
],
[
[
58,
63
],
[
12885,
12890
]
],
[
[
71,
77
],
[
5389,
5395
]
],
[
[
99,
107
],
[
10711,
10719
]
],
[
[
138,
151
],
[
1160,
1173
],
[
1187,
1200
]
],
[
[
168,
174
],
[
451,
457
],
[
648,
654
],
[
5589,
5595
],
[
5955,
5961
],
[
6234,
6240
],
[
6457,
6463
],
[
6608,
6614
],
[
7343,
7349
],
[
8176,
8182
],
[
11453,
11459
],
[
11729,
11735
],
[
12682,
12688
],
[
13628,
13634
]
],
[
[
190,
193
],
[
9933,
9936
]
],
[
[
218,
235
],
[
7556,
7573
]
],
[
[
260,
279
],
[
5247,
5266
]
],
[
[
304,
317
],
[
5172,
5185
]
],
[
[
342,
352
],
[
11027,
11037
]
],
[
[
379,
389
],
[
7223,
7233
]
],
[
[
391,
399
],
[
5310,
5318
],
[
5501,
5509
]
],
[
[
406,
422
]
],
[
[
601,
619
]
],
[
[
802,
820
]
],
[
[
932,
947
],
[
1339,
1354
]
],
[
[
1308,
1318
],
[
1409,
1419
]
],
[
[
1373,
1384
]
],
[
[
1434,
1455
],
[
2502,
2523
],
[
2592,
2613
]
],
[
[
2270,
2291
]
],
[
[
2642,
2662
]
],
[
[
3672,
3692
]
],
[
[
4411,
4435
]
],
[
[
4743,
4763
]
],
[
[
5011,
5021
]
],
[
[
7949,
7962
]
],
[
[
8919,
8934
]
],
[
[
10247,
10262
]
],
[
[
10614,
10635
]
],
[
[
10796,
10807
]
],
[
[
11960,
11975
]
],
[
[
12133,
12147
]
],
[
[
12850,
12863
]
],
[
[
12912,
12930
]
],
[
[
13240,
13258
]
],
[
[
13575,
13593
]
]
] |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
# generated by datamodel-codegen:
# filename: airbyte_protocol.yaml
from __future__ import annotations
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from pydantic import AnyUrl, BaseModel, Extra, Field
class Type(Enum):
RECORD = "RECORD"
STATE = "STATE"
LOG = "LOG"
SPEC = "SPEC"
CONNECTION_STATUS = "CONNECTION_STATUS"
CATALOG = "CATALOG"
class AirbyteRecordMessage(BaseModel):
class Config:
extra = Extra.allow
stream: str = Field(..., description="the name of this record's stream")
data: Dict[str, Any] = Field(..., description="the record data")
emitted_at: int = Field(
...,
description="when the data was emitted from the source. epoch in millisecond.",
)
namespace: Optional[str] = Field(None, description="the namespace of this record's stream")
class AirbyteStateMessage(BaseModel):
class Config:
extra = Extra.allow
data: Dict[str, Any] = Field(..., description="the state data")
class Level(Enum):
FATAL = "FATAL"
ERROR = "ERROR"
WARN = "WARN"
INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
class AirbyteLogMessage(BaseModel):
class Config:
extra = Extra.allow
level: Level = Field(..., description="the type of logging")
message: str = Field(..., description="the log message")
class Status(Enum):
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
class AirbyteConnectionStatus(BaseModel):
class Config:
extra = Extra.allow
status: Status
message: Optional[str] = None
class SyncMode(Enum):
full_refresh = "full_refresh"
incremental = "incremental"
class DestinationSyncMode(Enum):
append = "append"
overwrite = "overwrite"
append_dedup = "append_dedup"
class OAuth2Specification(BaseModel):
class Config:
extra = Extra.allow
rootObject: Optional[List[Union[str, int]]] = Field(
None,
description="A list of strings representing a pointer to the root object which contains any oauth parameters in the ConnectorSpecification.\nExamples:\nif oauth parameters were contained inside the top level, rootObject=[] If they were nested inside another object {'credentials': {'app_id' etc...}, rootObject=['credentials'] If they were inside a oneOf {'switch': {oneOf: [{client_id...}, {non_oauth_param]}}, rootObject=['switch', 0] ",
)
oauthFlowInitParameters: Optional[List[List[str]]] = Field(
None,
description="Pointers to the fields in the rootObject needed to obtain the initial refresh/access tokens for the OAuth flow. Each inner array represents the path in the rootObject of the referenced field. For example. Assume the rootObject contains params 'app_secret', 'app_id' which are needed to get the initial refresh token. If they are not nested in the rootObject, then the array would look like this [['app_secret'], ['app_id']] If they are nested inside an object called 'auth_params' then this array would be [['auth_params', 'app_secret'], ['auth_params', 'app_id']]",
)
oauthFlowOutputParameters: Optional[List[List[str]]] = Field(
None,
description="Pointers to the fields in the rootObject which can be populated from successfully completing the oauth flow using the init parameters. This is typically a refresh/access token. Each inner array represents the path in the rootObject of the referenced field.",
)
class AuthType(Enum):
oauth2_0 = "oauth2.0"
class AuthSpecification(BaseModel):
auth_type: Optional[AuthType] = None
oauth2Specification: Optional[OAuth2Specification] = Field(
None,
description="If the connector supports OAuth, this field should be non-null.",
)
class AuthFlowType(Enum):
oauth2_0 = "oauth2.0"
oauth1_0 = "oauth1.0"
class OAuthConfigSpecification(BaseModel):
oauth_user_input_from_connector_config_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations used as input to OAuth.\nMust be a valid non-nested JSON that refers to properties from ConnectorSpecification.connectionSpecification\nusing special annotation 'path_in_connector_config'.\nThese are input values the user is entering through the UI to authenticate to the connector, that might also shared\nas inputs for syncing data via the connector.\n\nExamples:\n\nif no connector values is shared during oauth flow, oauth_user_input_from_connector_config_specification=[]\nif connector values such as 'app_id' inside the top level are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['app_id']\n }\n }\nif connector values such as 'info.app_id' nested inside another object are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['info', 'app_id']\n }\n }",
)
complete_oauth_output_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations produced by the OAuth flows as they are\nreturned by the distant OAuth APIs.\nMust be a valid JSON describing the fields to merge back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_output_specification={\n refresh_token: {\n type: string,\n path_in_connector_config: ['credentials', 'refresh_token']\n }\n }",
)
complete_oauth_server_input_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations.\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nserver when completing an OAuth flow (typically exchanging an auth code for refresh token).\n\nExamples:\n\n complete_oauth_server_input_specification={\n client_id: {\n type: string\n },\n client_secret: {\n type: string\n }\n }",
)
complete_oauth_server_output_specification: Optional[Dict[str, Any]] = Field(
None,
description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations that\nalso need to be merged back into the connector configuration at runtime.\nThis is a subset configuration of `complete_oauth_server_input_specification` that filters fields out to retain only the ones that\nare necessary for the connector to function with OAuth. (some fields could be used during oauth flows but not needed afterwards, therefore\nthey would be listed in the `complete_oauth_server_input_specification` but not `complete_oauth_server_output_specification`)\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nconnector when using OAuth flow APIs.\nThese fields are to be merged back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_server_output_specification={\n client_id: {\n type: string,\n path_in_connector_config: ['credentials', 'client_id']\n },\n client_secret: {\n type: string,\n path_in_connector_config: ['credentials', 'client_secret']\n }\n }",
)
class AirbyteStream(BaseModel):
class Config:
extra = Extra.allow
name: str = Field(..., description="Stream's name.")
json_schema: Dict[str, Any] = Field(..., description="Stream schema using Json Schema specs.")
supported_sync_modes: Optional[List[SyncMode]] = None
source_defined_cursor: Optional[bool] = Field(
None,
description="If the source defines the cursor field, then any other cursor field inputs will be ignored. If it does not, either the user_provided one is used, or the default one is used as a backup.",
)
default_cursor_field: Optional[List[str]] = Field(
None,
description="Path to the field that will be used to determine if a record is new or modified since the last sync. If not provided by the source, the end user will have to specify the comparable themselves.",
)
source_defined_primary_key: Optional[List[List[str]]] = Field(
None,
description="If the source defines the primary key, paths to the fields that will be used as a primary key. If not provided by the source, the end user will have to specify the primary key themselves.",
)
namespace: Optional[str] = Field(
None,
description="Optional Source-defined namespace. Currently only used by JDBC destinations to determine what schema to write to. Airbyte streams from the same sources should have the same namespace.",
)
class ConfiguredAirbyteStream(BaseModel):
class Config:
extra = Extra.allow
stream: AirbyteStream
sync_mode: SyncMode
cursor_field: Optional[List[str]] = Field(
None,
description="Path to the field that will be used to determine if a record is new or modified since the last sync. This field is REQUIRED if `sync_mode` is `incremental`. Otherwise it is ignored.",
)
destination_sync_mode: DestinationSyncMode
primary_key: Optional[List[List[str]]] = Field(
None,
description="Paths to the fields that will be used as primary key. This field is REQUIRED if `destination_sync_mode` is `*_dedup`. Otherwise it is ignored.",
)
class AdvancedAuth(BaseModel):
auth_flow_type: Optional[AuthFlowType] = None
predicate_key: Optional[List[str]] = Field(
None,
description="Json Path to a field in the connectorSpecification that should exist for the advanced auth to be applicable.",
)
predicate_value: Optional[str] = Field(
None,
description="Value of the predicate_key fields for the advanced auth to be applicable.",
)
oauth_config_specification: Optional[OAuthConfigSpecification] = None
class ConnectorSpecification(BaseModel):
class Config:
extra = Extra.allow
documentationUrl: Optional[AnyUrl] = None
changelogUrl: Optional[AnyUrl] = None
connectionSpecification: Dict[str, Any] = Field(
...,
description="ConnectorDefinition specific blob. Must be a valid JSON string.",
)
supportsIncremental: Optional[bool] = Field(None, description="If the connector supports incremental mode or not.")
supportsNormalization: Optional[bool] = Field(False, description="If the connector supports normalization or not.")
supportsDBT: Optional[bool] = Field(False, description="If the connector supports DBT or not.")
supported_destination_sync_modes: Optional[List[DestinationSyncMode]] = Field(
None, description="List of destination sync modes supported by the connector"
)
authSpecification: Optional[AuthSpecification] = Field(None, description="deprecated, switching to advanced_auth instead")
advanced_auth: Optional[AdvancedAuth] = Field(
None,
description="Additional and optional specification object to describe what an 'advanced' Auth flow would need to function.\n - A connector should be able to fully function with the configuration as described by the ConnectorSpecification in a 'basic' mode.\n - The 'advanced' mode provides easier UX for the user with UI improvements and automations. However, this requires further setup on the\n server side by instance or workspace admins beforehand. The trade-off is that the user does not have to provide as many technical\n inputs anymore and the auth process is faster and easier to complete.",
)
class AirbyteCatalog(BaseModel):
class Config:
extra = Extra.allow
streams: List[AirbyteStream]
class ConfiguredAirbyteCatalog(BaseModel):
class Config:
extra = Extra.allow
streams: List[ConfiguredAirbyteStream]
class AirbyteMessage(BaseModel):
class Config:
extra = Extra.allow
type: Type = Field(..., description="Message type")
log: Optional[AirbyteLogMessage] = Field(
None,
description="log message: any kind of logging you want the platform to know about.",
)
spec: Optional[ConnectorSpecification] = None
connectionStatus: Optional[AirbyteConnectionStatus] = None
catalog: Optional[AirbyteCatalog] = Field(None, description="catalog message: the catalog")
record: Optional[AirbyteRecordMessage] = Field(None, description="record message: the record")
state: Optional[AirbyteStateMessage] = Field(
None,
description="schema message: the state. Must be the last message produced. The platform uses this information",
)
class AirbyteProtocol(BaseModel):
airbyte_message: Optional[AirbyteMessage] = None
configured_airbyte_catalog: Optional[ConfiguredAirbyteCatalog] = None
| [
[
[
157,
168
]
],
[
[
187,
191
],
[
311,
315
],
[
1097,
1101
],
[
1446,
1450
],
[
1664,
1668
],
[
1765,
1769
],
[
3524,
3528
],
[
3828,
3832
]
],
[
[
211,
214
],
[
647,
650
],
[
1035,
1038
],
[
4009,
4012
],
[
5227,
5230
],
[
5922,
5925
],
[
6577,
6580
],
[
8111,
8114
],
[
10808,
10811
]
],
[
[
216,
220
],
[
637,
641
],
[
1025,
1029
],
[
3999,
4003
],
[
5217,
5221
],
[
5912,
5916
],
[
6567,
6571
],
[
8101,
8105
],
[
10798,
10802
]
],
[
[
222,
226
],
[
1968,
1972
],
[
2507,
2511
],
[
2512,
2516
],
[
3181,
3185
],
[
3186,
3190
],
[
8218,
8222
],
[
8556,
8560
],
[
8853,
8857
],
[
8858,
8862
],
[
9543,
9547
],
[
9861,
9865
],
[
9866,
9870
],
[
10184,
10188
],
[
11315,
11319
],
[
12349,
12353
],
[
12474,
12478
]
],
[
[
228,
236
],
[
847,
855
],
[
1626,
1634
],
[
1959,
1967
],
[
2498,
2506
],
[
3172,
3180
],
[
3610,
3618
],
[
3661,
3669
],
[
3990,
3998
],
[
5208,
5216
],
[
5903,
5911
],
[
6558,
6566
],
[
8209,
8217
],
[
8268,
8276
],
[
8547,
8555
],
[
8844,
8852
],
[
9125,
9133
],
[
9534,
9542
],
[
9852,
9860
],
[
10126,
10134
],
[
10175,
10183
],
[
10377,
10385
],
[
10549,
10557
],
[
10703,
10711
],
[
10745,
10753
],
[
10953,
10961
],
[
11075,
11083
],
[
11185,
11193
],
[
11306,
11314
],
[
11466,
11474
],
[
11589,
11597
],
[
12651,
12659
],
[
12811,
12819
],
[
12873,
12881
],
[
12927,
12935
],
[
13022,
13030
],
[
13120,
13128
],
[
13356,
13364
],
[
13420,
13428
]
],
[
[
238,
243
],
[
1973,
1978
]
],
[
[
266,
272
],
[
10712,
10718
],
[
10754,
10760
]
],
[
[
274,
283
],
[
491,
500
],
[
956,
965
],
[
1246,
1255
],
[
1535,
1544
],
[
1884,
1893
],
[
3583,
3592
],
[
3920,
3929
],
[
7968,
7977
],
[
9407,
9416
],
[
10094,
10103
],
[
10622,
10631
],
[
12277,
12286
],
[
12402,
12411
],
[
12527,
12536
],
[
13323,
13332
]
],
[
[
285,
290
],
[
537,
542
],
[
1002,
1007
],
[
1292,
1297
],
[
1581,
1586
],
[
1930,
1935
],
[
8014,
8019
],
[
9453,
9458
],
[
10668,
10673
],
[
12323,
12328
],
[
12448,
12453
],
[
12573,
12578
]
],
[
[
292,
297
],
[
568,
573
],
[
654,
659
],
[
718,
723
],
[
863,
868
],
[
1042,
1047
],
[
1324,
1329
],
[
1389,
1394
],
[
1993,
1998
],
[
2526,
2531
],
[
3200,
3205
],
[
3693,
3698
],
[
4017,
4022
],
[
5235,
5240
],
[
5930,
5935
],
[
6585,
6590
],
[
8043,
8048
],
[
8118,
8123
],
[
8285,
8290
],
[
8569,
8574
],
[
8872,
8877
],
[
9141,
9146
],
[
9556,
9561
],
[
9880,
9885
],
[
10197,
10202
],
[
10393,
10398
],
[
10815,
10820
],
[
10970,
10975
],
[
11092,
11097
],
[
11202,
11207
],
[
11344,
11349
],
[
11496,
11501
],
[
11614,
11619
],
[
12603,
12608
],
[
12681,
12686
],
[
12954,
12959
],
[
13055,
13060
],
[
13152,
13157
]
],
[
[
306,
310
],
[
12596,
12600
]
],
[
[
470,
490
],
[
13031,
13051
]
],
[
[
936,
955
],
[
13129,
13148
]
],
[
[
1091,
1096
],
[
1316,
1321
]
],
[
[
1228,
1245
],
[
12660,
12677
]
],
[
[
1439,
1445
],
[
1606,
1612
]
],
[
[
1511,
1534
],
[
12882,
12905
]
],
[
[
1655,
1663
],
[
8223,
8231
],
[
9507,
9515
]
],
[
[
1745,
1764
],
[
9815,
9834
],
[
11320,
11339
]
],
[
[
1864,
1883
],
[
3670,
3689
]
],
[
[
3515,
3523
],
[
3619,
3627
]
],
[
[
3565,
3582
],
[
11475,
11492
]
],
[
[
3815,
3827
],
[
10135,
10147
]
],
[
[
3895,
3919
],
[
10558,
10582
]
],
[
[
7954,
7967
],
[
9478,
9491
],
[
12354,
12367
]
],
[
[
9383,
9406
],
[
12479,
12502
]
],
[
[
10081,
10093
],
[
11598,
11610
]
],
[
[
10599,
10621
],
[
12820,
12842
]
],
[
[
12262,
12276
],
[
12936,
12950
]
],
[
[
12377,
12401
],
[
13429,
13453
]
],
[
[
12512,
12526
],
[
13365,
13379
]
],
[
[
13307,
13322
]
]
] |
import os, collections, sqlite3
from flask import Flask, render_template
from flask.ext.bootstrap import Bootstrap
from AsciiDammit import asciiDammit
app = Flask(__name__)
bootstrap = Bootstrap(app)
import util as wpu
configDict = {}
appDataDirDict = {}
appName = "waypointapp"
@app.route('/')
def index():
appNames = appDataDirDict.keys()
return render_template('index.html', appNames=appNames)
@app.route('/reportAppIndex/<appName>')
def reportAppIndex(appName):
'''
Lists the runs for the assay.
'''
answer = []
for app_name, app_dir in appDataDirDict.items():
if appName == app_name:
dirname, dirnames, filenames = next(os.walk(app_dir))
# ignore the folder named "scrap"
answer.extend([(app_name, run_id) for run_id in [x for x in dirnames if x != "scrap"]])
return render_template('reportAppIndex.html', app_name=appName, answer=answer)
@app.route('/report_app/<app_name>/<run_id>')
def report_app(app_name, run_id):
return reportHelper(appDataDirDict[app_name], run_id, app_name)
def reportHelper(localAppDatadir, run_id, app_name):
# list all files in the report folder
dirname, dirnames, filenames = next(os.walk(localAppDatadir+'/'+run_id))
filepaths = ["file://localhost/"+dirname+"/"+z for z in filenames ]
# identify all png files in the directory and encode it into database
images = [x for x in filenames if str(x).endswith('.png')]
imagepaths = [dirname+"/"+x for x in images]
imagetags = []
for ipath in imagepaths:
data_uri = open(ipath, 'rb').read().encode('base64').replace('\n', '')
img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
imagetags.append(img_tag)
# identify waypoint databases in the folder
databases = [dirname+'/'+x for x in filenames if str(x).endswith('waypoint.sqlite') ]
dbTables = collections.OrderedDict()
colnames = {}
if databases:
for db in databases:
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
tblNms = sorted([tblNm[0] for tblNm in c.fetchall()])
# reorder tblNms according to tableOrder
x = [d for d in configDict['applications'] if d['appName'] == app_name][0]
if x and 'tableOrder' in x.keys():
tableOrder = x['tableOrder']
tn_in_db = []
for tn in tableOrder:
if tn in tblNms:
tn_in_db.append(tn)
tblNms.remove(tn)
tblNms = tn_in_db + tblNms
tblTags= ["#%s"%tblNm for tblNm in tblNms]
# Iterate over individual tables and retrieve the row data for display
for tblNm in tblNms:
rowcount = [row for row in c.execute("SELECT count(*) row_count FROM %s"%tblNm)][0][0]
if rowcount < 500:
rows = c.execute('select * from %s'%tblNm)
# force ascii conversion for display
colnames[tblNm] = [asciiDammit(description[0]) for description in c.description]
dbTables[tblNm] = [[wpu.renderHtmlTableCell(x) for x in row] for row in rows]
conn.close()
return render_template('report.html', dbpaths=databases, run_id=run_id, tableNames=tblTags, filenames=filenames, filepaths=filepaths, imagetags=imagetags, dbTables=dbTables, colnames=colnames, app_name=app_name)
if __name__ == '__main__':
# read in the configuration file, then run the server
configDict, appDataDirDict = wpu.loadConfig(configFile = 'appconfig.json')
app.run(debug=True, host='0.0.0.0', port=5757)
| [
[
[
7,
9
],
[
642,
644
],
[
1144,
1146
]
],
[
[
11,
22
],
[
1786,
1797
]
],
[
[
24,
31
],
[
1875,
1882
]
],
[
[
50,
55
],
[
158,
163
]
],
[
[
57,
72
],
[
354,
369
],
[
796,
811
],
[
2925,
2940
]
],
[
[
105,
114
],
[
186,
195
]
],
[
[
139,
150
],
[
2755,
2766
]
],
[
[
152,
155
],
[
196,
199
],
[
284,
287
],
[
405,
408
],
[
870,
873
],
[
3290,
3293
]
],
[
[
174,
183
]
],
[
[
209,
220
],
[
3243,
3246
],
[
2842,
2845
]
],
[
[
221,
231
],
[
2104,
2114
]
],
[
[
237,
251
],
[
324,
338
],
[
553,
567
],
[
970,
984
]
],
[
[
258,
265
]
],
[
[
303,
308
]
],
[
[
448,
462
]
],
[
[
919,
929
]
],
[
[
1019,
1031
],
[
957,
969
]
],
[
[
3214,
3224
],
[
2104,
2114
]
],
[
[
3226,
3240
],
[
324,
338
],
[
553,
567
],
[
970,
984
]
]
] |
"""
Created: 16 August 2018
Last Updated: 16 August 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Class for performing deep learning in pytorch
Designed for running on desktop at TAMU
with specific set of software installed
--> not guaranteed to work in CMSSW environment!
Does not use ROOT directly.
Instead, this is setup to use flat ntuples
that are accessed via uproot.
> UPROOT: https://github.com/scikit-hep/uproot
> KERAS: https://keras.io/
> TENSORFLOW: https://www.tensorflow.org/
> PYTORCH: http://pytorch.org/
> LWTNN: https://github.com/lwtnn/lwtnn
"""
import json
import util
import datetime
import collections
from deepLearning import DeepLearning
import uproot
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as tf
from torch.autograd import Variable
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_curve
class LeopardNet(nn.Module):
"""Neural Network for Leopard in PyTorch
Adapted from (16 August 2018)
https://github.com/thongonary/surf18-tutorial/blob/master/tuto-8-torch.ipynb
"""
def __init__(self,layers):
super(LeopardNet,self).__init__()
self.dense = nn.ModuleList()
for l,layer in enumerate(layers):
self.dense.append( nn.Linear(layer['in'],layer['out']) )
def forward(self, x):
"""All the computation steps of the input are defined in this function"""
nlayers = len(self.dense)
for i,d in enumerate(self.dense):
x = d(x)
x = tf.relu(x) if i!=nlayers-1 else tf.sigmoid(x)
return x
class DeepLearningTorch(DeepLearning):
"""Deep Learning pytorch class"""
def __init__(self):
DeepLearning.__init__(self)
## PyTorch objects
self.loss_fn = None # pytorch loss function
self.torch_opt = None # pytorch optimizer
def initialize(self): #,config):
"""Initialize a few parameters after they've been set by user"""
DeepLearning.initialize(self)
return
## Specific functions to perform training/inference tasks
def build_model(self):
"""Construct the NN model -- only Keras support for now"""
self.msg_svc.INFO("DLPYTORCH : Build the neural network model")
## Declare the model
layers = []
layers.append( {'in':int(self.input_dim),'out':int(self.nNodes[0])} )
for i,n in enumerate(self.nNodes):
if i==len(self.nNodes)-1: continue
layers.append( {'in':int(n),'out':int(self.nNodes[i+1])} )
layers.append( {'in':int(self.nNodes[-1]),'out':self.output_dim} )
self.model = LeopardNet(layers)
self.model.cuda()
self.loss_fn = torch.nn.BCELoss()
self.torch_opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) #1e-4)
return
def train_epoch(self,X,Y):
""""""
losses = []
for beg_i in range(0, len(X), self.batch_size):
x_batch = torch.from_numpy(X[beg_i:beg_i+self.batch_size,:])
y_batch = torch.from_numpy(Y[beg_i:beg_i+self.batch_size])
x_batch = Variable(x_batch).cuda()
y_batch = Variable(y_batch).float().unsqueeze_(-1).cuda() # modify dimensions (X,) -> (X,1)
self.torch_opt.zero_grad()
y_hat = self.model(x_batch) # forward
loss = self.loss_fn(y_hat, y_batch) # compute loss
loss.backward() # compute gradients
self.torch_opt.step() # update weights
losses.append(loss.data.cpu().numpy())
return losses
def train_model(self):
"""Setup for training the model using k-fold cross-validation"""
X = self.df[self.features].values
Y = self.df['target'].values
kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed)
nsplits = kfold.get_n_splits(X,Y)
cvpredictions = [] # compare outputs from each cross-validation
self.msg_svc.INFO("DLPYTORCH : Fitting K-Fold cross validations")
for ind,(train,test) in enumerate(kfold.split(X,Y)):
self.msg_svc.INFO("DLPYTORCH : - Fitting K-Fold {0}".format(ind))
Y_train = Y[train]
Y_test = Y[test]
# -- store test/train data from each k-fold as histograms (to compare later)
h_tests = {}
h_trains = {}
for n,v in self.targets.iteritems():
h_tests[n] = ROOT.TH1D("test_"+n,"test_"+n,10,0,10)
h_trains[n] = ROOT.TH1D("train_"+n,"train_"+n,10,0,10)
# fill histogram for each target
for (n,v) in enumerate(self.targets.iteritems()):
[h_tests[n].Fill(i) for i in X[test][np.where(Y_test==v)]]
[h_trains[n].Fill(i) for i in X[train][np.where(Y_train==v)]]
## Fit the model to training data & save the history
self.model.train()
e_losses = []
for t in range(self.epochs):
e_losses += self.train_epoch(X[train],Y_train)
self.msg_svc.INFO("DLPYTORCH : Epoch {0} -- Loss {1}".format(t,e_losses[-1]))
self.histories.append(e_losses)
# evaluate the model
self.msg_svc.DEBUG("DLPYTORCH : Evaluate the model: ")
self.model.eval()
# Evaluate training sample
self.msg_svc.INFO("DLPYTORCH : Predictions from training sample")
train_predictions = self.predict(X[train])
self.train_predictions.append(train_predictions)
# Evaluate test sample
self.msg_svc.INFO("DLPYTORCH : Predictions from testing sample")
test_predictions = self.predict(X[test])
self.test_predictions.append(test_predictions)
# Make ROC curve from test sample
self.msg_svc.INFO("DLPYTORCH : Make ROC curves")
fpr,tpr,_ = roc_curve(Y[test], test_predictions)
self.fpr.append(fpr)
self.tpr.append(tpr)
# Plot the predictions to compare test/train
self.msg_svc.INFO("DLPYTORCH : Plot the train/test predictions")
self.plotter.prediction(h_trains,h_tests) # compare DNN prediction for different targets
self.msg_svc.INFO("DLPYTORCH : Finished K-Fold cross-validation: ")
self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)}
self.msg_svc.INFO("DLPYTORCH : - Accuracy: {0:.2f}% (+/- {1:.2f}%)".format(np.mean(cvpredictions), np.std(cvpredictions)))
return
def predict(self,data=None):
"""Return the prediction from a test sample"""
self.msg_svc.DEBUG("DLPYTORCH : Get the DNN prediction")
if data is None:
self.msg_svc.ERROR("DLPYTORCH : predict() given NoneType data. Returning -999.")
return -999.
data = torch.from_numpy(data)
return self.model( Variable(data,volatile=True).cuda() )
def load_model(self,from_lwtnn=False):
"""Load existing model to make plots or predictions"""
output = self.output_dir+'/'+self.model_name
self.model.load_state_dict(torch.load(output))
self.model.eval()
return
def save_model(self,to_lwtnn=False):
"""Save the model for use later"""
output = self.output_dir+'/'+self.model_name
torch.save(self.model.state_dict(),output)
return
## THE END ##
| [
[
[
636,
640
]
],
[
[
648,
652
]
],
[
[
660,
668
]
],
[
[
676,
687
]
],
[
[
714,
726
],
[
1722,
1734
],
[
1807,
1819
],
[
2090,
2102
]
],
[
[
735,
741
]
],
[
[
749,
760
],
[
4923,
4925
],
[
5000,
5002
],
[
6567,
6569
],
[
6596,
6598
],
[
6704,
6706
],
[
6728,
6730
]
],
[
[
768,
780
]
],
[
[
789,
794
],
[
2822,
2827
],
[
2866,
2871
],
[
3100,
3105
],
[
3173,
3178
],
[
7081,
7086
],
[
7365,
7370
],
[
7572,
7577
]
],
[
[
802,
816
],
[
996,
998
],
[
1278,
1280
],
[
1367,
1369
]
],
[
[
824,
849
],
[
1632,
1634
],
[
1664,
1666
]
],
[
[
877,
885
],
[
3244,
3252
],
[
3291,
3299
],
[
7132,
7140
]
],
[
[
923,
938
],
[
3943,
3958
]
],
[
[
967,
976
],
[
6115,
6124
]
],
[
[
985,
995
],
[
1229,
1239
],
[
2751,
2761
]
],
[
[
1704,
1721
]
]
] |
import torch
import os.path as osp
import sys
from torch.autograd import Variable
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(0, cur_dir)
import torch_nndistance as NND
p1 = torch.rand(10, 1000, 3)
p2 = torch.rand(10, 1500, 3)
points1 = Variable(p1, requires_grad=True)
points2 = p2
points1 = points1.cuda()
print(points1.requires_grad)
points2 = points2.cuda()
dist1, dist2 = NND.nnd(points1, points2)
print(dist1, dist2)
loss = torch.sum(dist1)
print("loss", loss)
loss.backward()
print(points1.grad, points2.grad)
print("====================")
points1 = Variable(p1.cuda(), requires_grad=True)
points2 = p2.cuda()
dist1, dist2 = NND.nnd(points1, points2)
print(dist1, dist2)
loss = torch.sum(dist1)
print("loss", loss)
loss.backward()
print(points1.grad, points2.grad)
| [
[
[
7,
12
],
[
194,
199
],
[
223,
228
],
[
450,
455
],
[
706,
711
]
],
[
[
20,
34
],
[
93,
96
],
[
105,
108
]
],
[
[
42,
45
],
[
128,
131
]
],
[
[
73,
81
],
[
257,
265
],
[
578,
586
]
],
[
[
83,
90
],
[
147,
154
]
],
[
[
163,
186
],
[
397,
400
],
[
653,
656
]
],
[
[
189,
191
],
[
266,
268
],
[
587,
589
]
],
[
[
218,
220
],
[
300,
302
],
[
628,
630
]
],
[
[
247,
254
],
[
313,
320
]
],
[
[
290,
297
],
[
367,
374
]
],
[
[
303,
310
],
[
334,
341
],
[
405,
412
],
[
509,
516
]
],
[
[
357,
364
],
[
414,
421
],
[
523,
530
]
],
[
[
382,
387
],
[
429,
434
],
[
460,
465
]
],
[
[
389,
394
],
[
436,
441
]
],
[
[
443,
447
],
[
481,
485
],
[
487,
491
]
],
[
[
568,
575
],
[
661,
668
],
[
765,
772
]
],
[
[
618,
625
],
[
670,
677
],
[
779,
786
]
],
[
[
638,
643
],
[
685,
690
],
[
716,
721
]
],
[
[
645,
650
],
[
692,
697
]
],
[
[
699,
703
],
[
737,
741
],
[
743,
747
]
]
] |
from geographiclib.geodesic import Geodesic
from pyproj import CRS, Transformer
from .geometry import Vector, Line
def azimuth(p1: Vector, p2: Vector):
""":return: azimuth of geodesic through p1 and p2 in p1 with WGS84"""
res = Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)
return res['azi1']
def dist_m(a, b):
"""
:param a: lon lat point
:param b: lon lat point
:return: distance between a and b in meters
"""
res = Geodesic.WGS84.Inverse(a.y, a.x, b.y, b.x)
return res['s12']
def mercator_project(origin: Vector, azimuth, points: [Vector], ellps='WGS84'):
"""
Perform a oblique mercator projection of a given list of points with the
pseudoequator defined by the given line.
Formulas from DOI 10.3133/pp1395 p.69 (Map projections: A working manual)
:param origin: (lon, lat) that will become (0, 0) in projection
:param azimuth: azimuth in degrees of origin defining the direction of the
geodesic that becomes the new equator (y=0) in projection
:param points: iterable of (lon,lat) Vector instance
:param ellps: proj ellipsoid identifier for ellipsoid to use as model for
the globe. Defaults to WGS84.
:return: iterable of (x, y) Vector instances in the coordinate system with
unit 1 meter
"""
base = CRS.from_user_input(4326)
mercator = CRS(f'+proj=omerc +lonc={origin.x} +lat_0={origin.y} '
f'+alpha={azimuth} +gamma=0 +ellps={ellps}')
t = Transformer.from_crs(base, mercator)
for p in points:
res = t.transform(p.y, p.x)
yield Vector(res[1], res[0])
| [
[
[
35,
43
],
[
239,
247
],
[
460,
468
]
],
[
[
63,
66
],
[
1308,
1311
],
[
1349,
1352
]
],
[
[
68,
79
],
[
1476,
1487
]
],
[
[
103,
109
],
[
134,
140
],
[
146,
152
],
[
556,
562
],
[
582,
588
],
[
1585,
1591
]
],
[
[
111,
115
]
],
[
[
122,
129
]
],
[
[
316,
322
]
],
[
[
531,
547
]
]
] |
"""
Segment_tree creates a segment tree with a given array and function,
allowing queries to be done later in log(N) time
function takes 2 values and returns a same type value
"""
class SegmentTree:
def __init__(self, arr, function):
self.segment = [0 for x in range(3 * len(arr) + 3)]
self.arr = arr
self.fn = function
self.maketree(0, 0, len(arr) - 1)
def make_tree(self, i, l, r):
if l == r:
self.segment[i] = self.arr[l]
elif l < r:
self.make_tree(2 * i + 1, l, int((l + r) / 2))
self.make_tree(2 * i + 2, int((l + r) / 2) + 1, r)
self.segment[i] = self.fn(self.segment[2 * i + 1], self.segment[2 * i + 2])
def __query(self, i, L, R, l, r):
if l > R or r < L or L > R or l > r:
return None
if L >= l and R <= r:
return self.segment[i]
val1 = self.__query(2 * i + 1, L, int((L + R) / 2), l, r)
val2 = self.__query(2 * i + 2, int((L + R + 2) / 2), R, l, r)
print(L, R, " returned ", val1, val2)
if val1 != None:
if val2 != None:
return self.fn(val1, val2)
return val1
return val2
def query(self, L, R):
return self.__query(0, 0, len(self.arr) - 1, L, R)
"""
Example -
mytree = SegmentTree([2,4,5,3,4],max)
mytree.query(2,4)
mytree.query(0,3) ...
mytree = SegmentTree([4,5,2,3,4,43,3],sum)
mytree.query(1,8)
...
"""
| [
[
[
188,
199
]
]
] |
import unittest
from rfapi.error import JsonParseError, MissingAuthError
class ApiClientTest(unittest.TestCase):
def test_json_parse_error(self):
resp = type('', (object,), {"content": ""})()
msg = "Could not parse"
e = JsonParseError(msg, resp)
self.assertEqual(str(e), msg)
def test_missing_auth_error(self):
e = MissingAuthError()
self.assertTrue("API" in str(e))
| [
[
[
7,
15
],
[
95,
103
]
],
[
[
40,
54
],
[
250,
264
]
],
[
[
56,
72
],
[
366,
382
]
],
[
[
81,
94
]
]
] |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def is_valid(array, exception=False):
pass
# """
# Args:
# array (#ak.Array, #ak.Record, #ak.layout.Content, #ak.layout.Record, #ak.ArrayBuilder, #ak.layout.ArrayBuilder):
# Array or record to check.
# exception (bool): If True, validity errors raise exceptions.
# Returns True if there are no errors and False if there is an error.
# Checks for errors in the structure of the array, such as indexes that run
# beyond the length of a node's `content`, etc. Either an error is raised or
# the function returns a boolean.
# See also #ak.validity_error.
# """
# out = validity_error(array, exception=exception)
# return out is None
| [
[
[
112,
127
]
],
[
[
136,
149
],
[
156,
158
]
],
[
[
151,
153
]
],
[
[
197,
205
]
]
] |
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import os,sys,inspect
import imageio
sys.path.insert(1, os.path.join(sys.path[0], '..')) #go up a dir to import
import CodePy2.funmath as funmath
#import imageio
n = 1.0
sizes = [i/n for i in range(33*int(n))]
xvals = sizes
filenames = []
for expectedlength in sizes:
yvals = []
fig = plt.figure()
for i in sizes:
variance = 1
strength = 1
yvals.append(funmath.getnormval(i,expectedlength,strength,variance))
maxval = mlab.normpdf(expectedlength, expectedlength, np.sqrt(variance))
#yvals[-1] = yvals[-1]*strength/maxval
plt.plot(xvals,yvals)
plt.grid(True)
plt.ylabel('Adjusted weight (A)')
plt.xlabel('Manhatten distance (M)')
plt.axis([0, 30, 0, 30])
plt.title('Gaussian adjusted matching distances')
plt.suptitle('variance = '+str(variance)+', w = '+str(expectedlength))
filename = 'gaussian/'+'gaussian-'+str(int(expectedlength*n))+'.png'
plt.savefig(filename)
filenames.append(filename)
plt.close()
#plt.show()
#os.system("avconv -y -f image2 -i figs/gaussian-%d.png -r 10 -s 800x600 gaussianvideo.avi")
#turn into gif
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('xbar_demo.gif', images)
| [
[
[
8,
32
],
[
379,
382
],
[
665,
668
],
[
691,
694
],
[
710,
713
],
[
748,
751
],
[
789,
792
],
[
818,
821
],
[
872,
875
],
[
1020,
1023
],
[
1077,
1080
]
],
[
[
40,
63
],
[
548,
552
]
],
[
[
71,
82
],
[
593,
595
]
],
[
[
90,
92
],
[
140,
142
]
],
[
[
93,
96
],
[
121,
124
],
[
153,
156
]
],
[
[
97,
104
]
],
[
[
112,
119
],
[
1271,
1278
],
[
1297,
1304
]
],
[
[
203,
229
],
[
475,
482
]
],
[
[
246,
247
],
[
290,
291
],
[
265,
266
],
[
1005,
1006
]
],
[
[
254,
259
],
[
304,
309
],
[
347,
352
],
[
405,
410
]
],
[
[
296,
301
],
[
674,
679
]
],
[
[
310,
319
],
[
1046,
1055
],
[
1242,
1251
]
],
[
[
329,
343
],
[
496,
510
],
[
561,
575
],
[
577,
591
],
[
926,
940
],
[
990,
1004
]
],
[
[
358,
363
],
[
462,
467
],
[
680,
685
]
],
[
[
373,
376
]
],
[
[
400,
401
],
[
494,
495
]
],
[
[
420,
428
],
[
520,
528
],
[
601,
609
],
[
903,
911
]
],
[
[
441,
449
],
[
511,
519
]
],
[
[
539,
545
]
],
[
[
947,
955
],
[
1032,
1040
],
[
1063,
1071
]
],
[
[
1214,
1220
],
[
1257,
1263
],
[
1330,
1336
]
],
[
[
1230,
1238
],
[
1286,
1294
]
]
] |
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.utils import clones
class LayerNormGoogle(nn.Module):
def __init__(self, features, epsilon=1e-6):
super(LayerNormGoogle, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.epsilon = epsilon
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.epsilon) + self.b_2
class EncoderBlockGoogle(nn.Module):
def __init__(self, layer, num_layers):
super(EncoderBlockGoogle, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = LayerNormGoogle(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class ResidualConnectionGoogle(nn.Module):
def __init__(self, size, keep_prob):
super(ResidualConnectionGoogle, self).__init__()
self.norm = LayerNormGoogle(size)
# TODO: Use dropout interface
self.dropout = nn.Dropout(keep_prob)
def forward(self, input, sublayer):
return input + self.dropout(sublayer(self.norm(input)))
class EncoderLayerGoogle(nn.Module):
def __init__(self, size, attention, feed_forward, keep_prob):
super(EncoderLayerGoogle, self).__init__()
self.size = size
self.attention = attention
self.feed_forward = feed_forward
# Each encoder layer has two sublayers
self.sublayer = clones(ResidualConnectionGoogle(size, keep_prob), 2)
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.attention(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class EncoderClassifier(nn.Module):
def __init__(self, embedding, encoder, classifier, device, is_average=True):
super(EncoderClassifier, self).__init__()
self.embedding = embedding
self.encoder = encoder
self.classifier = classifier
self.device = device
self.is_average = is_average
def forward(self, x, mask=None):
kl_loss = torch.Tensor([0.0])
# Initial x.size() = [length, batch_size]
x = x.permute(1, 0)
# After permute x.size = [batch_size, length]
x = self.embedding(x)
if "cuda" in str(self.device):
x = x.cuda()
kl_loss = kl_loss.cuda()
x = self.encoder(x, mask)
if self.is_average:
# Averaged sentence representation
x = torch.mean(x, dim=1)
x = self.classifier(x)
return x, kl_loss
class Classifier(nn.Module):
def __init__(self, d_model, d_hidden, num_classes, keep_prob):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(d_hidden, num_classes)
def forward(self, x):
x = self.dropout(self.relu(self.linear1(x)))
x = self.linear2(x)
return x
class MultiHeadedAttentionGoogle(nn.Module):
def __init__(self, heads=8, d_model=512, keep_prob=0.1):
super(MultiHeadedAttentionGoogle, self).__init__()
assert d_model % heads == 0
self.d_k = d_model // heads
self.heads = heads
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(keep_prob)
def attention(self, query, key, value, mask=None):
# Dot product attention
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
num_batches = query.size(0)
if mask is not None:
mask = mask.unsqueeze(1)
# Apply linear projection on the input sequence and split the heads.
query, key, value = [linear(x).view(num_batches, -1, self.heads, self.d_k).transpose(1, 2)
for linear, x in zip(self.linears, (query, key, value))]
# Apply attention on the projected and splitted vectors
x, self.attn = self.attention(query, key, value, mask=mask)
# Concat vectors and apply linear
x = x.transpose(1, 2).contiguous().view(num_batches, -1, self.heads * self.d_k)
return self.linears[-1](x)
class PositionalFeedForwardGoogle(nn.Module):
def __init__(self, d_model, d_ff, keep_prob=0.1):
super(PositionalFeedForwardGoogle, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
def forward(self, input):
return self.w_2(self.dropout(self.relu(self.w_1(input))))
class Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, padding_id, use_pretrained_embed, pretrained_weights,
optional_sqrt_mul=False):
super(Embeddings, self).__init__()
# Initialize embeddings
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_id).cpu()
if use_pretrained_embed:
self.embedding.from_pretrained(pretrained_weights)
self.embed_dim = embed_dim
self.optional_sqrt_mul = optional_sqrt_mul
def forward(self, input):
if self.optional_sqrt_mul:
return self.embedding(input) * math.sqrt(self.embed_dim)
else:
return self.embedding(input)
class PositionalEncodingGoogle(nn.Module):
def __init__(self, d_model, keep_prob=0.1, max_len=5000):
super(PositionalEncodingGoogle, self).__init__()
self.dropout = nn.Dropout(keep_prob)
positional_encoding = torch.zeros(max_len, d_model)
pos = torch.arange(0., max_len).unsqueeze(1)
# Log space
div_term = torch.exp(torch.arange(0., d_model, 2) * (-math.log(10000) / d_model))
positional_encoding[:, 0::2] = torch.sin(pos * div_term)
positional_encoding[:, 1::2] = torch.cos(pos * div_term)
positional_encoding = positional_encoding.unsqueeze(0)
self.register_buffer("pe", positional_encoding)
def forward(self, input):
return self.dropout(input + Variable(self.pe[:, :input.size(1)], requires_grad=False))
class TransformerGoogle:
def __init__(self, args):
super(TransformerGoogle, self).__init__()
self.args_common = args["common_model_properties"]
self.args_specific = args["transformer_google"]
# Device
self.device = self.args_common["device"]
# Input/Output dimensions
self.vocab_size = self.args_common["vocab_size"]
self.embed_dim = self.args_common["embed_dim"]
self.num_class = self.args_common["num_class"]
# Embedding parameters
self.padding_id = self.args_common["padding_id"]
# Condition parameters
self.use_pretrained_embed = self.args_common["use_pretrained_embed"]
self.use_embed_sqrt_mul = self.args_specific["use_embed_sqrt_mul"]
# Pretrained embedding weights
self.pretrained_weights = self.args_common["pretrained_weights"]
# Dropout probabilities for each individual part of the full model.
self.keep_prob_encoder = self.args_specific["keep_prob_encoder"]
self.keep_prob_pe = self.args_specific["keep_prob_pe"]
self.kee_prob_pff = self.args_specific["keep_prob_pff"]
self.keep_prob_attn = self.args_specific["keep_prob_attn"]
self.keep_prob_clf = self.args_specific["keep_prob_clf"]
# Condition parameter for the transformer type (It only supports classification for now)
self.transformer_type = self.args_specific["transformer_type"]
# Number of parallel attention layers for MultiHeadedAttention
self.heads = self.args_specific["heads"]
# Number of encoder layers
self.num_encoder_layers = self.args_specific["num_encoder_layers"]
# Number of hidden count units for Position-Wise Feed-Forward Network
self.num_hidden_pos_ff = self.args_specific["num_hidden_pos_ff"]
# Maximum length of an input
self.max_length = self.args_specific["max_length"]
if self.transformer_type == "classifier":
self.model = self.create_classifier_transformer()
else:
raise ValueError("Transformer can be created as classifier for now!")
def create_classifier_transformer(self):
c = copy.deepcopy
# Initialize individual parts of the full model
# attention = torch.nn.MultiheadAttention(num_heads=self.heads, embed_dim=self.embed_dim,
# dropout=self.keep_prob_attn)
attention = MultiHeadedAttentionGoogle(heads=self.heads, d_model=self.embed_dim, keep_prob=self.keep_prob_attn)
ff = PositionalFeedForwardGoogle(d_model=self.embed_dim, d_ff=self.num_hidden_pos_ff,
keep_prob=self.kee_prob_pff)
embeddings = Embeddings(self.embed_dim, self.vocab_size, self.padding_id, self.use_pretrained_embed,
self.pretrained_weights, optional_sqrt_mul=self.use_embed_sqrt_mul)
positional_embeddings = PositionalEncodingGoogle(d_model=self.embed_dim, keep_prob=self.keep_prob_pe,
max_len=self.max_length)
# Initialize the full model
model = EncoderClassifier(nn.Sequential(embeddings, c(positional_embeddings)),
EncoderBlockGoogle(
EncoderLayerGoogle(self.embed_dim, c(attention), c(ff), self.keep_prob_encoder),
self.num_encoder_layers),
Classifier(self.embed_dim, d_hidden=self.embed_dim // 2, num_classes=self.num_class,
keep_prob=self.keep_prob_clf),
device=self.device)
# Initialize model parameters
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
if __name__ == '__main__':
print("Transformer tests")
plt.figure(figsize=(15, 5))
pe = PositionalEncodingGoogle(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())
plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
plt.show()
| [
[
[
7,
11
],
[
8980,
8984
]
],
[
[
19,
23
],
[
3834,
3838
],
[
5885,
5889
],
[
6371,
6375
]
],
[
[
32,
56
],
[
10758,
10761
],
[
10885,
10888
],
[
10941,
10944
],
[
10994,
10997
]
],
[
[
64,
75
],
[
10894,
10896
]
],
[
[
83,
88
],
[
10855,
10860
],
[
375,
380
],
[
429,
434
],
[
2322,
2327
],
[
2730,
2735
],
[
3789,
3794
],
[
4076,
4081
],
[
6206,
6211
],
[
6250,
6255
],
[
6328,
6333
],
[
6338,
6343
],
[
6439,
6444
],
[
6504,
6509
]
],
[
[
96,
110
],
[
235,
237
],
[
686,
688
],
[
1047,
1049
],
[
1414,
1416
],
[
1954,
1956
],
[
2827,
2829
],
[
3292,
3294
],
[
4868,
4870
],
[
5273,
5275
],
[
5999,
6001
],
[
362,
364
],
[
416,
418
],
[
1260,
1262
],
[
2972,
2974
],
[
3024,
3026
],
[
3066,
3068
],
[
3099,
3101
],
[
3553,
3555
],
[
3633,
3635
],
[
5013,
5015
],
[
5057,
5059
],
[
5105,
5107
],
[
5147,
5149
],
[
5528,
5530
],
[
6153,
6155
],
[
9987,
9989
],
[
10646,
10648
]
],
[
[
118,
142
],
[
3954,
3955
]
],
[
[
170,
178
],
[
10846,
10854
],
[
6717,
6725
]
],
[
[
204,
210
],
[
814,
820
],
[
1715,
1721
],
[
3546,
3552
]
],
[
[
219,
234
],
[
309,
324
],
[
860,
875
],
[
1177,
1192
]
],
[
[
667,
685
],
[
755,
773
],
[
10074,
10092
]
],
[
[
1022,
1046
],
[
1114,
1138
],
[
1722,
1746
]
],
[
[
1395,
1413
],
[
1506,
1524
],
[
10132,
10150
]
],
[
[
1936,
1953
],
[
2061,
2078
],
[
9969,
9986
]
],
[
[
2816,
2826
],
[
2920,
2930
],
[
10311,
10321
]
],
[
[
3265,
3291
],
[
3379,
3405
],
[
9248,
9274
]
],
[
[
4840,
4867
],
[
4948,
4975
],
[
9362,
9389
]
],
[
[
5262,
5272
],
[
5442,
5452
],
[
9535,
9545
]
],
[
[
5974,
5998
],
[
10795,
10819
],
[
6087,
6111
],
[
9756,
9780
]
],
[
[
6784,
6801
],
[
6847,
6864
]
],
[
[
10790,
10792
],
[
10835,
10837
]
],
[
[
10831,
10832
],
[
10910,
10911
]
]
] |
from tensorflow import keras
from constants import TRADING_DAYS_PER_WEEK, INDEX_RETURN_INDICATOR_NUMBER
from ..constants import *
MODEL_NAME = 'ifcp_model_ver1_2'
ROLLING_WINDOW_SIZE = TRADING_DAYS_PER_WEEK
def build_model():
fund1_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND1_RETURN_NAME)
fund1_benchmark_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND1_BENCHMARK_RETURN_NAME)
fund2_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND2_RETURN_NAME)
fund2_benchmark_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, 1), name=FUND2_BENCHMARK_RETURN_NAME)
fund1_performance = keras.layers.subtract([fund1_return, fund1_benchmark_return], name='fund1_performance')
fund2_performance = keras.layers.subtract([fund2_return, fund2_benchmark_return], name='fund2_performance')
fund1_attributes = keras.layers.concatenate(
[fund1_return, fund1_benchmark_return, fund1_performance], name='fund1_attributes')
fund2_attributes = keras.layers.concatenate(
[fund2_return, fund2_benchmark_return, fund2_performance], name='fund2_attributes')
fund_attributes_gru = keras.layers.GRU(
12,
kernel_regularizer=keras.regularizers.l2(0.01),
recurrent_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01),
name='fund_attributes_gru',
)
fund1_attributes_after_gru = fund_attributes_gru(fund1_attributes)
fund2_attributes_after_gru = fund_attributes_gru(fund2_attributes)
fund_attributes_after_gru = keras.layers.concatenate(
[fund1_attributes_after_gru, fund2_attributes_after_gru], name='fund_attributes_after_gru')
auxiliary_output = keras.layers.Dense(1, activation='sigmoid', name=AUXILIARY_OUTPUT_NAME)(
fund_attributes_after_gru)
index_return = keras.Input(shape=(ROLLING_WINDOW_SIZE, INDEX_RETURN_INDICATOR_NUMBER), name=INDEX_RETURN_NAME)
index_return_gru = keras.layers.GRU(
35,
kernel_regularizer=keras.regularizers.l2(0.01),
recurrent_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01),
name='index_return_gru',
)
index_return_after_gru = index_return_gru(index_return)
merge = keras.layers.concatenate([fund_attributes_after_gru, index_return_after_gru], name='merge')
x = keras.layers.Dense(64, activation='relu',
kernel_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01))(merge)
x = keras.layers.Dense(64, activation='relu',
kernel_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01))(x)
x = keras.layers.Dense(16, activation='relu',
kernel_regularizer=keras.regularizers.l2(0.01),
activity_regularizer=keras.regularizers.l1(0.01))(x)
main_output = keras.layers.Dense(1, activation='sigmoid', name=MAIN_OUTPUT_NAME)(x)
model = keras.Model(inputs=[
fund1_return, fund1_benchmark_return, fund2_return, fund2_benchmark_return, index_return],
outputs=[main_output, auxiliary_output])
return model
| [
[
[
23,
28
],
[
249,
254
],
[
346,
351
],
[
443,
448
],
[
540,
545
],
[
643,
648
],
[
755,
760
],
[
867,
872
],
[
1008,
1013
],
[
1153,
1158
],
[
1210,
1215
],
[
1269,
1274
],
[
1327,
1332
],
[
1574,
1579
],
[
1724,
1729
],
[
1852,
1857
],
[
1971,
1976
],
[
2028,
2033
],
[
2087,
2092
],
[
2145,
2150
],
[
2285,
2290
],
[
2385,
2390
],
[
2473,
2478
],
[
2550,
2555
],
[
2594,
2599
],
[
2682,
2687
],
[
2759,
2764
],
[
2799,
2804
],
[
2887,
2892
],
[
2964,
2969
],
[
3014,
3019
],
[
3097,
3102
]
],
[
[
52,
73
],
[
187,
208
]
],
[
[
75,
104
],
[
1892,
1921
]
],
[
[
129,
130
],
[
298,
315
],
[
395,
422
],
[
492,
509
],
[
589,
616
],
[
1773,
1794
],
[
1929,
1946
],
[
3063,
3079
]
],
[
[
132,
142
]
],
[
[
165,
184
],
[
268,
287
],
[
365,
384
],
[
462,
481
],
[
559,
578
],
[
1871,
1890
]
],
[
[
215,
226
]
]
] |
# flake8: noqa
from fugue.extensions.creator.creator import Creator
from fugue.extensions.creator.convert import creator, _to_creator
| [
[
[
60,
67
]
],
[
[
113,
120
]
],
[
[
122,
133
]
]
] |
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import os
import time
import tempfile
import subprocess
class Process:
def __init__(self, command):
self._command = command
self._process = None
self._error = None
self._out_file = None
self._out_path = None
self._out_fd = None
print(command)
def start(self):
self._out_fd, self._out_path = tempfile.mkstemp(prefix='rfproc_', suffix='.txt', text=True)
self._out_file = open(self._out_path)
try:
self._process = subprocess.Popen(self._command, stdout=self._out_fd,
stderr=subprocess.STDOUT)
except OSError as err:
self._error = str(err)
def is_finished(self):
return self._error is not None or self._process.poll() is not None
def stop(self):
self._process.kill()
def wait(self):
if self._process is not None:
self._process.wait()
def get_output(self, wait_until_finished=False):
"""Returns the output produced by the process.
If ``wait_until_finished`` is True, blocks until the process is
finished and returns all output. Otherwise the currently available
output is returned immediately.
Currently available output depends on buffering and might not include
everything that has been written by the process.
"""
if self._error:
self._close_outputs()
return self._error
if wait_until_finished:
self._process.wait()
output = self._out_file.read()
if self.is_finished():
self._close_outputs()
return output
def _close_outputs(self):
self._out_file.close()
os.close(self._out_fd)
self._remove_tempfile()
def _remove_tempfile(self, attempts=10):
try:
os.remove(self._out_path)
except OSError:
if not attempts:
raise
time.sleep(1)
self._remove_tempfile(attempts - 1)
| [
[
[
25,
35
]
],
[
[
101,
103
],
[
1839,
1841
],
[
1965,
1967
]
],
[
[
111,
115
],
[
2078,
2082
]
],
[
[
123,
131
],
[
460,
468
]
],
[
[
139,
149
],
[
608,
618
],
[
713,
723
]
],
[
[
158,
165
]
]
] |
"""Inventory requests."""
from collections import defaultdict
from typing import Any, MutableMapping
import requests
from linnapi.request import LinnworksAPIRequest
class GetStockItemIDsBySKU(LinnworksAPIRequest):
"""Return the stock item ID for a SKU."""
URL = "https://eu-ext.linnworks.net/api/Inventory/GetStockItemIdsBySKU"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
skus: list[str] = kwargs["skus"]
return {"request": {"SKUS": skus}}
class GetStockLevel(LinnworksAPIRequest):
"""Return the current stock level for a product by stock item ID."""
URL = "https://eu-ext.linnworks.net/api/Stock/GetStockLevel"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
stock_item_id: str = kwargs["stock_item_id"]
return {"stockItemId": stock_item_id}
class GetStockLevelBatch(LinnworksAPIRequest):
"""Return the stock level for multiple products by stock item ID."""
URL = "https://eu-ext.linnworks.net/api/Stock/GetStockLevel_Batch"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
stock_item_ids: list[str] = kwargs["stock_item_ids"]
return {"request": {"StockItemIDs": stock_item_ids}}
class SetStockLevelBySKU(LinnworksAPIRequest):
"""Update the stock level for a product."""
URL = "https://eu-ext.linnworks.net/api/Stock/UpdateStockLevelsBySKU"
METHOD = LinnworksAPIRequest.POST
@classmethod
def params(cls, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""Return request URL parameters."""
return {"changeSource": str(kwargs["change_source"])}
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
location_id: str = kwargs["location_id"]
changes: tuple[tuple[str, int]] = kwargs["changes"]
stock_levels = [
{"SKU": str(sku), "LocationID": location_id, "Level": int(level)}
for sku, level in changes
]
return {"stockLevels": stock_levels}
class AddImageToInventoryItem(LinnworksAPIRequest):
"""
Adds an image to a stock item.
Either `item_number` or `stock_item_id` must be passed.
Kwargs:
image_url (str): The URL of the image to be added.
item_number (str): The SKU of the product to add the image to.
stock_item_id (str): The ID (GUID) of the product to add the image to.
is_main (bool): Is the image the main image for the product.
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/AddImageToInventoryItem"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
item_number: str = kwargs.get("item_number", "")
stock_item_id: str = kwargs.get("stock_item_id", "")
is_main: bool = kwargs["is_main"]
image_url: str = kwargs["image_url"]
request_data = {
"IsMain": is_main,
"ImageUrl": image_url,
}
if not item_number and not stock_item_id:
raise ValueError("Either `stock_item_id` or `sku` must be passed.")
if item_number:
request_data["ItemNumber"] = item_number
if stock_item_id:
request_data["StockItemId"] = stock_item_id
return {"request": request_data}
class UpdateImages(LinnworksAPIRequest):
"""
Update properties on images.
Kwargs:
row_id (str): ID (GUID) of image, passed as "pkRowId". Required.
stock_item_id (str): The ID (GUID) of the stock item to which the image
belongs. Requred.
is_main (bool): Set weather the image is the main image or not, passed as "IsMain".
sort_order (int): The position of the image, passed as "SortOrder".
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/UpdateImages"
METHOD = LinnworksAPIRequest.POST
@classmethod
def item_json(cls, **kwargs: Any) -> dict[str, Any]:
"""Return request data for a single image."""
row_id = kwargs.get("row_id")
is_main = kwargs.get("is_main")
sort_order = kwargs.get("sort_order")
checksum_value = kwargs.get("checksum_value")
raw_checksum = kwargs.get("raw_checksum")
stock_item_id = kwargs.get("stock_item_id")
stock_item_int_id = kwargs.get("stock_item_id_int")
image_data = {
"pkRowId": row_id,
"IsMain": is_main,
"SortOrder": sort_order,
"ChecksumValue": checksum_value,
"RawChecksum": raw_checksum,
"StockItemId": stock_item_id,
"StockItemIntId": stock_item_int_id,
}
return {key: value for key, value in image_data.items() if value is not None}
@classmethod
def multi_json(
cls, requests: list[MutableMapping[Any, Any]]
) -> dict[str, Any] | list[Any]:
"""Return request JSON with multiple updates."""
return {"images": [cls.item_json(**request) for request in requests]}
@classmethod
def parse_response(
cls, response: requests.models.Response, *args: Any, **kwargs: Any
) -> str:
"""Parse the request response."""
return response.text
class GetInventoryItemImages(LinnworksAPIRequest):
"""
Use this call to Get inventory item images.
Args:
inventory_item_id (str): The ID (GUID) of the stock item to retrive images for,
passed as "InventoryItemId".
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/GetInventoryItemImages"
METHOD = LinnworksAPIRequest.POST
@classmethod
def json(cls, *args: Any, **kwargs: Any) -> dict[str, Any] | list[Any]:
"""Return request JSON post data."""
inventory_item_id = kwargs.get("inventory_item_id")
return {"inventoryItemId": inventory_item_id}
class DeleteImagesFromInventoryItem(LinnworksAPIRequest):
"""
Remove an image from an inventory item.
Kwargs:
image_id (str): ID (GUID) of image, passed as "pkRowId". Required.
stock_item_id (str): The ID (GUID) of the stock item to which the image
belongs. Requred.
"""
URL = "https://eu-ext.linnworks.net/api/Inventory/DeleteImagesFromInventoryItem"
METHOD = LinnworksAPIRequest.POST
@classmethod
def item_json(cls, **kwargs: Any) -> dict[str, Any]:
"""Return request data for a single image."""
stock_item_id = kwargs["stock_item_id"]
image_url = kwargs["image_url"]
return {stock_item_id: [image_url]}
@classmethod
def multi_json(
cls, requests: list[MutableMapping[Any, Any]]
) -> dict[str, Any] | list[Any]:
"""Return request JSON with multiple updates."""
stock_items = defaultdict(list)
for request in requests:
for key, images in cls.item_json(**request).items():
stock_items[key].extend(images)
return {"inventoryItemImages": dict(stock_items)}
@classmethod
def parse_response(
cls, response: requests.models.Response, *args: Any, **kwargs: Any
) -> str:
"""Parse the request response."""
return response.text
class GetItemChangesHistory(LinnworksAPIRequest):
"""Get the stock change history for an item.
Kwargs:
"""
URL = "https://eu-ext.linnworks.net/api/Stock/GetItemChangesHistory"
METHOD = LinnworksAPIRequest.POST
@classmethod
def params(cls, *args: Any, **kwargs: Any) -> dict[str, Any]:
"""Return request JSON post data."""
stock_item_id = kwargs.get("stock_item_id")
location_id = kwargs.get("location_id", "")
entries_per_page = kwargs.get("entries_per_page", 500)
page_number = kwargs.get("page_number", 1)
return {
"stockItemId": stock_item_id,
"locationId": location_id,
"entriesPerPage": entries_per_page,
"pageNumber": page_number,
}
| [
[
[
51,
62
],
[
7181,
7192
]
],
[
[
82,
85
],
[
456,
459
],
[
468,
471
],
[
423,
426
],
[
438,
441
],
[
900,
903
],
[
912,
915
],
[
867,
870
],
[
882,
885
],
[
1370,
1373
],
[
1382,
1385
],
[
1337,
1340
],
[
1352,
1355
],
[
1843,
1846
],
[
1810,
1813
],
[
1825,
1828
],
[
2032,
2035
],
[
2044,
2047
],
[
1999,
2002
],
[
2014,
2017
],
[
3051,
3054
],
[
3063,
3066
],
[
3018,
3021
],
[
3033,
3036
],
[
4382,
4385
],
[
4364,
4367
],
[
5288,
5291
],
[
5300,
5303
],
[
5258,
5261
],
[
5263,
5266
],
[
5539,
5542
],
[
5554,
5557
],
[
6093,
6096
],
[
6105,
6108
],
[
6060,
6063
],
[
6075,
6078
],
[
6781,
6784
],
[
6763,
6766
],
[
7084,
7087
],
[
7096,
7099
],
[
7054,
7057
],
[
7059,
7062
],
[
7501,
7504
],
[
7516,
7519
],
[
7918,
7921
],
[
7885,
7888
],
[
7900,
7903
]
],
[
[
87,
101
],
[
5243,
5257
],
[
7039,
7053
]
],
[
[
110,
118
],
[
5506,
5514
],
[
7468,
7476
]
],
[
[
148,
167
],
[
197,
216
],
[
355,
374
],
[
625,
644
],
[
799,
818
],
[
1089,
1108
],
[
1269,
1288
],
[
1582,
1601
],
[
1740,
1759
],
[
2432,
2451
],
[
2950,
2969
],
[
3771,
3790
],
[
4288,
4307
],
[
5674,
5693
],
[
5992,
6011
],
[
6308,
6327
],
[
6687,
6706
],
[
7635,
7654
],
[
7815,
7834
]
],
[
[
176,
196
]
],
[
[
611,
624
]
],
[
[
1070,
1088
]
],
[
[
1563,
1581
]
],
[
[
2408,
2431
]
],
[
[
3758,
3770
]
],
[
[
5651,
5673
]
],
[
[
6278,
6307
]
],
[
[
7613,
7634
]
]
] |
from flask import Flask, render_template, request
from dashboard_forms import Dashform
#import create_pickle as p_j
import json
import os
app = Flask(__name__)
app.secret_key = 'dash_flask_key'
creddir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'credentials/dash_id.json')
# creddir_2 = os.path.join(os.path.dirname(
# os.path.dirname(os.path.realpath(__file__))), 'credentials')
tempdir = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))), 'www/templates/dash_id_template.json')
def Convert(string):
li = list(string.split(","))
k = []
for i in li:
str(i).replace(' ', '')
k.append(i)
return k
def formatting(string):
string = string.replace("[", "")
string = string.replace("]", "")
string = string.replace("'", "")
string = string.replace(" ", "")
return string
def json_exists(file_name):
return os.path.exists(file_name)
def getinfo():
data = []
if json_exists(creddir):
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
else:
with open(tempdir, "r") as f1, open(creddir, "w+") as f2:
f2.write(f1.read())
f2.close
with open(creddir, "r") as rdash_id:
data = json.load(rdash_id)
return data
def save_json(res):
with open(creddir, 'r') as f:
data = json.load(f)
data["Transit"]["T_URL"] = res["T_URL"]
data["Transit"]["T_API_KEY"] = res["T_API_KEY"]
data["Transit"]["Stops"] = Convert(res["Stops"])
data["Transit"]["T_BUS"] = res["T_BUS"]
data["Transit"]["T_BUS_TIME"] = res["T_BUS_TIME"]
data["Weather"]["W_URL"] = res["W_URL"]
data["Weather"]["UNITS"] = res["UNITS"]
data["Weather"]["W_API_KEY"] = res["W_API_KEY"]
data["Geolocation"]["G_URL"] = res["G_URL"]
data["Geolocation"]["G_API_KEY"] = res["G_API_KEY"]
data["Currency"]["C_URL_1"] = res["C_URL_1"]
data["Currency"]["C_API_KEY_1"] = res["C_API_KEY_1"]
data["Currency"]["C_URL_3"] = res["C_URL_3"]
data["Currency"]["C_URL_4"] = res["C_URL_4"]
data["Currency"]["CURR_CHECK"] = Convert(res["CURR_CHECK"])
data["Stocks"]["STOCK_W_URL"] = res["STOCK_W_URL"]
data["Stocks"]["STOCK_WE_URL"] = res["STOCK_WE_URL"]
data["Stocks"]["STOCK_API"] = res["STOCK_API"]
data["Stocks"]["STOCK_CHECK"] = Convert(res["STOCK_CHECK"])
data["Tasklist"]["gsheet_json"] = res["gsheet_json"]
data["Tasklist"]["sheetname"] = res["sheetname"]
data["G_Meetings"]["CREDENTIALS_FILE"] = res["CREDENTIALS_FILE"]
data["News"]["NEWS_URL"] = res["NEWS_URL"]
data["News"]["NEWS_API"] = res["NEWS_API"]
data["News"]["NEWS_SOURCES"] = str(res["NEWS_SOURCES"]).replace(' ', '')
data["System"]["waking_time"] = res["waking_time"]
data["System"]["sleeping_time"] = res["sleeping_time"]
data["System"]["mod_1_choice"] = res["mod_1_choice"]
data["System"]["mod_2_choice"] = res["mod_2_choice"]
data["System"]["mod_3_choice"] = res["mod_3_choice"]
data["System"]["mod_4_choice"] = res["mod_4_choice"]
data["System"]["refresh_time"] = res["refresh_time"]
data["System"]["awake"] = res["awake"]
os.remove(creddir)
with open(creddir, 'w+') as f:
json.dump(data, f, indent=4)
@ app.route('/', methods=['POST', 'GET'])
def login():
form = Dashform()
d_data = getinfo()
form.res_msg.label = ""
if request.method == 'POST':
form.res_msg.label = ""
if request.form['btn'] == 'Submit':
results = request.form
save_json(results)
form.res_msg.label = "Information saved successfully"
'''elif request.form['btn'] == 'Generate Pickle File':
results = request.form
p_j.get_calendar_service(results["CREDENTIALS_FILE"], creddir_2)
'''
d_data = getinfo()
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
elif request.method == 'GET':
# populate the form on start
d_data = getinfo()
form.res_msg.label = ""
form.T_URL.data = str(d_data["Transit"]["T_URL"])
form.T_API_KEY.data = str(d_data["Transit"]["T_API_KEY"])
form.Stops.data = formatting(str(d_data["Transit"]["Stops"]))
form.T_BUS.data = str(d_data["Transit"]["T_BUS"])
form.T_BUS_TIME.data = str(d_data["Transit"]["T_BUS_TIME"])
form.W_URL.data = str(d_data["Weather"]["W_URL"])
form.W_API_KEY.data = str(d_data["Weather"]["W_API_KEY"])
form.UNITS.data = str(d_data["Weather"]["UNITS"])
form.C_URL_1.data = str(d_data["Currency"]["C_URL_1"])
form.C_API_KEY_1.data = str(d_data["Currency"]["C_API_KEY_1"])
form.C_URL_3.data = str(d_data["Currency"]["C_URL_3"])
form.C_URL_4.data = str(d_data["Currency"]["C_URL_4"])
form.CURR_CHECK.data = formatting(str(d_data["Currency"]["CURR_CHECK"]))
form.STOCK_W_URL.data = str(d_data["Stocks"]["STOCK_W_URL"])
form.STOCK_WE_URL.data = str(d_data["Stocks"]["STOCK_WE_URL"])
form.STOCK_API.data = str(d_data["Stocks"]["STOCK_API"])
form.STOCK_CHECK.data = formatting(str(d_data["Stocks"]["STOCK_CHECK"]))
form.G_URL.data = str(d_data["Geolocation"]["G_URL"])
form.G_API_KEY.data = str(d_data["Geolocation"]["G_API_KEY"])
form.gsheet_json.data = str(d_data["Tasklist"]["gsheet_json"])
form.sheetname.data = str(d_data["Tasklist"]["sheetname"])
form.CREDENTIALS_FILE.data = str(d_data["G_Meetings"]["CREDENTIALS_FILE"])
form.NEWS_URL.data = str(d_data["News"]["NEWS_URL"])
form.NEWS_API.data = str(d_data["News"]["NEWS_API"])
form.NEWS_SOURCES.data = formatting(str(d_data["News"]["NEWS_SOURCES"]))
form.waking_time.data = str(d_data["System"]["waking_time"])
form.sleeping_time.data = str(d_data["System"]["sleeping_time"])
form.mod_1_choice.data = str(d_data["System"]["mod_1_choice"])
form.mod_2_choice.data = str(d_data["System"]["mod_2_choice"])
form.mod_3_choice.data = str(d_data["System"]["mod_3_choice"])
form.mod_4_choice.data = str(d_data["System"]["mod_4_choice"])
form.refresh_time.data = str(d_data["System"]["refresh_time"])
form.awake.data = str(d_data["System"]["awake"])
return render_template('Settings.html', form=form)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@ app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
if __name__ == '__main__':
app.run(host='0.0.0.0')
| [
[
[
18,
23
],
[
151,
156
]
],
[
[
25,
40
],
[
6443,
6458
],
[
8911,
8926
]
],
[
[
42,
49
],
[
3701,
3708
],
[
3774,
3781
],
[
3830,
3837
],
[
6499,
6506
],
[
8995,
9002
]
],
[
[
79,
87
],
[
3627,
3635
]
],
[
[
126,
130
],
[
1125,
1129
],
[
1365,
1369
],
[
1482,
1486
],
[
3525,
3529
]
],
[
[
139,
141
],
[
213,
215
],
[
226,
228
],
[
248,
250
],
[
264,
266
],
[
445,
447
],
[
458,
460
],
[
480,
482
],
[
496,
498
],
[
968,
970
],
[
3461,
3463
]
],
[
[
145,
148
],
[
168,
171
],
[
3561,
3564
],
[
9152,
9155
],
[
9309,
9312
]
],
[
[
203,
210
],
[
1049,
1056
],
[
1078,
1085
],
[
1222,
1229
],
[
1318,
1325
],
[
1446,
1453
],
[
3471,
3478
],
[
3495,
3502
]
],
[
[
435,
442
],
[
1196,
1203
]
],
[
[
574,
581
],
[
1637,
1644
],
[
2308,
2315
],
[
2556,
2563
]
],
[
[
732,
742
],
[
4306,
4316
],
[
4959,
4969
],
[
5250,
5260
],
[
5815,
5825
],
[
6776,
6786
],
[
7429,
7439
],
[
7720,
7730
],
[
8285,
8295
]
],
[
[
932,
943
],
[
1037,
1048
]
],
[
[
1003,
1010
],
[
3652,
3659
],
[
4143,
4150
],
[
6580,
6587
]
],
[
[
1415,
1424
],
[
3856,
3865
]
],
[
[
3606,
3611
]
],
[
[
8964,
8979
],
[
9214,
9229
]
],
[
[
9197,
9205
]
]
] |
class BaseDatabaseClient:
"""Encapsulate backend-specific methods for opening a client shell."""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self, parameters):
raise NotImplementedError(
"subclasses of BaseDatabaseClient must provide a runshell() method"
)
| [
[
[
6,
24
]
]
] |
"""
Unit tests for symupy.api.stream
"""
# ============================================================================
# STANDARD IMPORTS
# ============================================================================
import pytest
# ============================================================================
# INTERNAL IMPORTS
# ============================================================================
from symupy.runtime.logic.publisher import Publisher
from symupy.runtime.logic.subscriber import Subscriber
# ============================================================================
# TESTS AND DEFINITIONS
# ============================================================================
@pytest.fixture
def default_channel():
return ("default",)
@pytest.fixture
def channels():
return ("channel 1", "channel 2")
def test_default_constructor(default_channel):
p = Publisher()
assert p.channels == default_channel
def test_default_attach_observer(default_channel):
p = Publisher()
s = Subscriber(p)
assert p.channels == default_channel
assert p._channels[default_channel[0]][s] == s.update
def test_constructor_channels(channels):
p = Publisher(channels)
assert p.channels == channels
def test_attach_observer(channels):
p = Publisher(channels)
s = Subscriber(p, channels[0])
assert p.channels == channels
assert p._channels[channels[0]][s] == s.update
def test_attach_detach_observer(channels):
p = Publisher(channels)
s = Subscriber(p, channels[0])
assert p._channels[channels[0]][s] == s.update
def test_context_publisher(channels):
with Publisher(channels) as p:
s1 = Subscriber(p, channels[0])
s2 = Subscriber(p, channels[0])
p.dispatch(channels[0])
assert s1._call == 1
assert s2._call == 1
def test_context_observer(channels):
with Publisher(channels) as p:
with Subscriber(p, channels[0]), Subscriber(p, channels[1]):
p.dispatch(channels[0])
def test_context_dispatch(channels):
pass
| [
[
[
232,
238
],
[
712,
718
],
[
777,
783
]
],
[
[
461,
470
],
[
903,
912
],
[
1017,
1026
],
[
1201,
1210
],
[
1301,
1310
],
[
1494,
1503
],
[
1649,
1658
],
[
1893,
1902
]
],
[
[
515,
525
],
[
1037,
1047
],
[
1329,
1339
],
[
1522,
1532
],
[
1688,
1698
],
[
1728,
1738
],
[
1932,
1942
],
[
1960,
1970
]
],
[
[
731,
746
]
],
[
[
796,
804
]
],
[
[
852,
876
]
],
[
[
962,
990
]
],
[
[
1156,
1181
]
],
[
[
1261,
1281
]
],
[
[
1447,
1474
]
],
[
[
1606,
1628
]
],
[
[
1851,
1872
]
],
[
[
2030,
2051
]
]
] |
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{
'includes': [
'../../common.gypi',
],
'targets': [
{
'target_name': 'ionimage_test',
'includes': [ '../../dev/test_target.gypi' ],
'sources' : [
'conversionutils_test.cc',
'ninepatch_test.cc',
'renderutils_test.cc',
],
'dependencies' : [
'image_tests_assets',
'<(ion_dir)/image/image.gyp:ionimage_for_tests',
'<(ion_dir)/base/base.gyp:ionbase_for_tests',
'<(ion_dir)/external/gtest.gyp:iongtest_safeallocs',
'<(ion_dir)/port/port.gyp:ionport',
'<(ion_dir)/gfx/gfx.gyp:iongfx_for_tests',
'<(ion_dir)/gfxutils/gfxutils.gyp:iongfxutils_for_tests',
'<(ion_dir)/portgfx/portgfx.gyp:ionportgfx_for_tests',
],
},
{
'target_name': 'image_tests_assets',
'type': 'static_library',
'includes': [
'../../dev/zipasset_generator.gypi',
],
'sources' : [
'data/images.iad',
],
'dependencies' : [
'<(ion_dir)/base/base.gyp:ionbase_for_tests',
],
},
],
}
| [] |
exec(open("Modified_data/next_level.py").read())
| [] |
from .base import lr
from . import het
from .merge import merge
from .permutation import permutation
| [
[
[
18,
20
]
],
[
[
35,
38
]
],
[
[
58,
63
]
],
[
[
89,
100
]
]
] |
# encoding: utf-8
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from dateutil.tz import tzlocal
from core.models import Event, Venue
from programme.models import ProgrammeEventMeta, TimeBlock, SpecialStartTime
class Command(BaseCommand):
args = ''
help = 'Setup tracon8 specific stuff'
def handle(*args, **options):
tz = tzlocal()
venue, unused = Venue.objects.get_or_create(name='Tampere-talo')
event, unused = Event.objects.get_or_create(slug='tracon8', defaults=dict(
name='Tracon 8',
name_genitive='Tracon 8 -tapahtuman',
name_illative='Tracon 8 -tapahtumaan',
name_inessive='Tracon 8 -tapahtumassa',
homepage_url='http://2013.tracon.fi',
organization_name='Tracon ry',
organization_url='http://ry.tracon.fi',
start_time=datetime(2013, 9, 14, 10, 0, tzinfo=tz),
end_time=datetime(2013, 9, 15, 18, 0, tzinfo=tz),
venue=venue,
))
admin_group_name = "{installation_name}-{event_slug}-programme-admins".format(
installation_name=settings.KOMPASSI_INSTALLATION_SLUG,
event_slug=event.slug,
)
admin_group, unused = Group.objects.get_or_create(name=admin_group_name)
programme_event_meta, unused = ProgrammeEventMeta.objects.get_or_create(event=event, defaults=dict(
public=True,
admin_group=admin_group
))
# v5
if not programme_event_meta.contact_email:
programme_event_meta.contact_email = 'ohjelma@tracon.fi'
programme_event_meta.save()
# v6
for start_time, end_time in [
(
datetime(2013, 9, 14, 11, 0, 0, tzinfo=tz),
datetime(2013, 9, 15, 1 , 0, 0, tzinfo=tz)
),
(
datetime(2013, 9, 15, 9 , 0, 0, tzinfo=tz),
datetime(2013, 9, 15, 17, 0, 0, tzinfo=tz)
)
]:
TimeBlock.objects.get_or_create(
event=event,
start_time=start_time,
defaults=dict(
end_time=end_time
)
)
SpecialStartTime.objects.get_or_create(
event=event,
start_time=datetime(2013, 9, 14, 10, 30, 0, tzinfo=tz),
)
| [
[
[
40,
48
],
[
990,
998
],
[
1052,
1060
],
[
1846,
1854
],
[
1906,
1914
],
[
1994,
2002
],
[
2054,
2062
],
[
2433,
2441
]
],
[
[
50,
59
]
],
[
[
85,
93
],
[
1247,
1255
]
],
[
[
133,
138
],
[
1359,
1364
]
],
[
[
179,
190
],
[
355,
366
]
],
[
[
216,
223
],
[
473,
480
]
],
[
[
249,
254
],
[
581,
586
]
],
[
[
256,
261
],
[
508,
513
]
],
[
[
291,
309
],
[
1449,
1467
]
],
[
[
311,
320
],
[
2134,
2143
]
],
[
[
322,
338
],
[
2345,
2361
]
],
[
[
347,
354
]
]
] |
import svm as SVM
import numpy as np
data_dict = { -1:np.array( [[10,9,1],
[2,8,1],
[3,8,1],]),
1:np.array( [[5,1,1],
[6,-1,1],
[7,3,1],])}
svm = SVM.Support_Vector_Machine()
svm.fit(data=data_dict)
predict_us = [[0,10,1],
[1,3,1],
[3,4,1],
[3,5,1],
[5,5,1],
[5,6,1],
[6,-5,1],
[5,8,1]]
for p in predict_us:
svm.predict(p)
svm.visualize()
| [
[
[
7,
17
],
[
224,
227
]
],
[
[
25,
36
],
[
55,
57
],
[
123,
125
]
],
[
[
38,
47
],
[
266,
275
]
],
[
[
218,
221
],
[
253,
256
],
[
438,
441
],
[
453,
456
]
],
[
[
278,
288
],
[
425,
435
]
],
[
[
420,
421
],
[
450,
451
]
]
] |
#!/usr/bin/python3
import configparser
config = configparser.ConfigParser()
config.read('eve-conf.ini')
def int_imp(inp):
while True:
try:
int(inp)
break
except ValueError:
print('Input has to be a number.')
inp = input('Select again: ')
return int(inp)
def section_select(config):
csections = config.sections()
for section in csections:
print('{:>2}. {}'.format(csections.index(section),section))
num = len(csections)
print('% 2.0f. View All' % (num))
num2 = num + 1
print('%- 2.0f. Save File' % (num2))
num3 = num2 + 1
print('% 2.0f. Exit' % (num3))
while True:
inp = input('Select section to edit/option: ')
inp = int_imp(inp)
print()
if inp == num:
print_conf(config)
break
elif inp == num2:
save_file(config)
break
elif inp == num3:
print('Editor Closed')
break
elif inp < 0 or inp > num3:
print('Try again')
else:
item_editor(config, csections[inp])
break
def menu():
print()
print('Menu')
print('{:>2}. Edit a Section'.format(0))
print('{:>2}. View File'.format(1))
print('{:>2}. Save File'.format(2))
print('{:>2}. Exit'.format(3))
while True:
inp = input('Select option: ')
inp = int_imp(inp)
print()
if inp == 0:
section_select(config)
break
elif inp == 1:
print_conf(config)
break
elif inp == 2:
save_file(config)
break
elif inp == 3:
print('Editor Closed')
break
elif inp < 0 or inp > 3:
print('Try again')
def print_conf(config):
csections = config.sections()
for section in csections:
print()
print('Section: %s' % (csections[csections.index(section)]))
items = config.items(csections[csections.index(section)])
for item in items:
print('{:>2}. {:<24}: {}'.format(items.index(item),item[0], item[1]))
menu()
def save_file(config):
with open('eve-conf.ini', 'w') as cfgfile:
config.write(cfgfile)
cfgfile.close()
print('Config Saved')
menu()
def item_editor(config, section):
csections = config.sections()
items = config.items(section)
print('Section: {}'.format(section))
for item in items:
print('{:>2}. {:<24}: {}'.format(items.index(item),item[0], item[1]))
print()
menu_b = items.index(item) + 1
print('{:>2}. Back'.format(menu_b))
inp2 = input('Select key to edit: ')
inp2 = int_imp(inp2)
if inp2 == menu_b:
menu()
elif inp2 < 0 or inp2 > menu_b:
print('Try Agin')
item_editor(config, section)
else:
inp2 = int_imp(inp2)
change = input('New value: ')
old_value = config[section][items[inp2][0]]
config.set(section,items[inp2][0],change)
print()
print('Section: %s' % (section))
items = config.items(section)
for item in items:
print('{:>2}. {:<24}: {}'.format(items.index(item),item[0], item[1]))
conf = input('Confim Change [y,N]: ')
if conf == 'y' or conf == 'Y':
print('Config File Edited.')
else:
config.set(section,items[inp2][0],old_value)
print('Config File Not Changed.')
print()
another = input('Edit another key in this section [y,N]: ')
if another == 'y' or another == 'Y':
print()
item_editor(config,section)
else:
menu()
section_select(config)
| [
[
[
26,
38
],
[
49,
61
]
],
[
[
40,
46
],
[
77,
83
],
[
3737,
3743
],
[
1505,
1511
],
[
1577,
1583
],
[
1648,
1654
]
],
[
[
110,
117
],
[
753,
760
],
[
1427,
1434
],
[
2717,
2724
],
[
2893,
2900
]
],
[
[
333,
347
],
[
3722,
3736
],
[
1490,
1504
]
],
[
[
1160,
1164
],
[
2168,
2172
],
[
2326,
2330
],
[
2762,
2766
],
[
3714,
3718
]
],
[
[
1819,
1829
],
[
818,
828
],
[
1566,
1576
]
],
[
[
2180,
2189
],
[
893,
902
],
[
1638,
1647
]
],
[
[
2338,
2349
],
[
1101,
1112
],
[
2839,
2850
],
[
3659,
3670
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.